diff --git a/calico_versioned_docs/version-3.25/_includes/components/AutoHostendpointsMigrate.js b/calico_versioned_docs/version-3.25/_includes/components/AutoHostendpointsMigrate.js
deleted file mode 100644
index 2fa83d4362..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/components/AutoHostendpointsMigrate.js
+++ /dev/null
@@ -1,65 +0,0 @@
-import React from 'react';
-
-import Admonition from '@theme/Admonition';
-import Link from '@docusaurus/Link';
-import CodeBlock from '@theme/CodeBlock';
-import Heading from '@theme/Heading';
-
-import { prodname, baseUrl } from '../../variables';
-
-export default function AutoHostendpointsMigrate(props) {
- return (
- <>
-
- Migrating to auto host endpoints
-
-
- Auto host endpoints have an allow-all profile attached which allows all traffic in the absence of network
- policy. This may result in unexpected behavior and data.
-
-
In order to migrate existing all-interfaces host endpoints to {prodname}-managed auto host endpoints:
-
-
-
- Add any labels on existing all-interfaces host endpoints to their corresponding {props.orch} nodes.{' '}
- {prodname} manages labels on automatic host endpoints by syncing labels from their nodes. Any labels on
- existing all-interfaces host endpoints should be added to their respective nodes. For example, if your
- existing all-interface host endpoint for node node1 has the label{' '}
- environment: dev, then you must add that same label to its node:
-
- Enable auto host endpoints by following the{' '}
-
- enable automatic host endpoints how-to guide
-
- . Note that automatic host endpoints are created with a profile attached that allows all traffic in the
- absence of network policy.
-
- Delete old all-interfaces host endpoints. You can distinguish host endpoints managed by {prodname} from
- others in several ways. First, automatic host endpoints have the label{' '}
- projectcalico.org/created-by: calico-kube-controllers. Secondly, automatic host
- endpoints' name have the suffix -auto-hep.
-
- Get the cluster's Kubernetes API server host and port, which will be used to update the {prodnameWindows}{' '}
- config map. The API server host and port is required so that the {prodnameWindows} installation script can
- create a kubeconfig file for {prodname} services. If your Windows nodes already have {prodnameWindows}{' '}
- installed manually, skip this step. The installation script will use the API server host and port from your
- node's existing kubeconfig file if the KUBERNETES_SERVICE_HOST and{' '}
- KUBERNETES_SERVICE_PORT variables are not provided in the calico-windows-config{' '}
- ConfigMap.
-
-
First, make a note of the address of the API server:
-
-
-
- If you have a single API server with a static IP address, you can use its IP address and port. The IP can
- be found by running:
-
- kubectl get endpoints kubernetes -o wide
-
The output should look like the following, with a single IP address and port under "ENDPOINTS":
-
- {`NAME ENDPOINTS AGE
-kubernetes 172.16.101.157:6443 40m`}
-
-
- If there are multiple entries under "ENDPOINTS", then your cluster must have more than one API server. In
- this case, use the appropriate load balancing option below for your cluster.
-
-
-
-
- If using DNS load balancing (as used by kops), use the FQDN and port of the API server{' '}
-
- api.internal.{'<'}clustername{'>'}
-
- .
-
-
-
-
- If you have multiple API servers with a load balancer in front, you should use the IP and port of the load
- balancer.
-
-
-
-
- If your cluster uses a ConfigMap to configure kube-proxy you can find the "right" way to
- reach the API server by examining the config map. For example:
-
- In this case, the server is d881b853aea312e00302a84f1e346a77.gr7.us-west-2.eks.amazonaws.com{' '}
- and the port is 443 (the standard HTTPS port).
-
-
-
-
-
-
- Edit the calico-windows-config ConfigMap in the downloaded manifest and ensure the required
- variables are correct for your cluster.
-
-
-
- {props.networkingType === 'vxlan' ? (
- <>
- CALICO_NETWORKING_BACKEND: This should be set to vxlan.
- >
- ) : (
- <>
- CALICO_NETWORKING_BACKEND: This should be set to windows-bgp.
- >
- )}
-
-
- KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT: The Kubernetes API server
- host and port (discovered in the previous step) used to create a kubeconfig file for {prodname} services. If
- your node already has an existing kubeconfig file, leave these variables blank.
-
-
- K8S_SERVICE_CIDR: The Kubernetes service clusterIP range configured in your cluster. This must
- match the service-cluster-ip-range used by kube-apiserver.
-
-
- CNI_BIN_DIR: Path where {prodname} CNI binaries will be installed. This must match the CNI bin
- value in the ContainerD service configuration. If you used the provided Install-Containerd.ps1 script, you
- should use the CNI bin path value you provided to that script.
-
-
- CNI_CONF_DIR: Path where {prodname} CNI configuration will be installed. This must match the
- CNI conf value in the ContainerD service configuration. If you used the provided Install-Containerd.ps1
- script, you should use the CNI conf path value you provided to that script.
-
-
- DNS_NAME_SERVERS: The DNS nameservers that will be used in the CNI configuration.
-
-
- FELIX_HEALTHENABLED: The Felix health check server must be enabled.
-
-
-
-
-
Apply the {prodnameWindows} installation manifest.
- After the log Calico for Windows installed appears, installation is complete. Next, the{' '}
- {prodnameWindows} services are started in separate containers:
-
- Depending on your platform, you may already have kube-proxy running on your Windows nodes. If kube-proxy is
- already running on your Windows nodes, skip this step. If kube-proxy is not running, you must install and run
- kube-proxy on each of the Windows nodes in your cluster. Note: The provided manifest depends on the kubeconfig
- provided by the kube-proxy ConfigMap in the kube-system namespace.
-
-
- );
-}
diff --git a/calico_versioned_docs/version-3.25/_includes/components/EnvironmentFile.js b/calico_versioned_docs/version-3.25/_includes/components/EnvironmentFile.js
deleted file mode 100644
index cdeecc20ee..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/components/EnvironmentFile.js
+++ /dev/null
@@ -1,201 +0,0 @@
-import React from 'react';
-
-import Admonition from '@theme/Admonition';
-import CodeBlock from '@theme/CodeBlock';
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-import Link from '@docusaurus/Link';
-
-import { prodname, baseUrl } from '../../variables';
-
-export default function EnvironmentFile(props) {
- if (props.target === 'felix') {
- var etcd_endpoints = 'FELIX_ETCDENDPOINTS';
- var etcd_cert_file = 'FELIX_ETCDCERTFILE';
- var etcd_key_file = 'FELIX_ETCDKEYFILE';
- var etcd_ca_file = 'FELIX_ETCDCAFILE';
- var datastore_type = 'FELIX_DATASTORETYPE';
- } else {
- var etcd_endpoints = 'ETCD_ENDPOINTS';
- var etcd_cert_file = 'ETCD_CERT_FILE';
- var etcd_key_file = 'ETCD_KEY_FILE';
- var etcd_ca_file = 'ETCD_CA_CERT_FILE';
- var datastore_type = 'DATASTORE_TYPE';
- }
-
- return (
- <>
-
-
- Use the following guidelines and sample file to define the environment variables for starting Calico on the
- host. For more help, see the{' '}
-
- {props.install === 'container' ? (
- {props.nodecontainer} configuration reference
- ) : (
- Felix configuration reference
- )}
-
-
-
-
For a Kubernetes datastore (default) set the following:
-
-
-
-
Variable
-
Configuration guidance
-
-
-
-
-
FELIX_DATASTORETYPE
-
- Set to kubernetes
-
-
-
-
KUBECONFIG
-
Path to kubeconfig file to access the Kubernetes API Server
-
-
-
- {props.install === 'container' && (
-
- You will need to volume mount the kubeconfig file into the container at the location specified by the
- paths mentioned above.
-
- )}
-
-
-
For an etcdv3 datastore set the following:
-
-
-
-
Variable
-
Configuration guidance
-
-
-
-
-
{datastore_type}
-
- Set to etcdv3
-
-
-
-
{etcd_endpoints}
-
Comma separated list of etcdv3 cluster URLs, e.g. https://calico-datastore.example.com:2379
-
-
-
{etcd_ca_file}
-
- Path to CA certificate to validate etcd’s server cert. Required if using TLS and not using a public
- CA.
-
-
-
-
- {etcd_cert_file}
-
- {etcd_key_file}
-
-
Paths to certificate and keys used for client authentication to the etcd cluster, if enabled.
-
-
-
- {props.install === 'container' && (
-
- If using certificates and keys, you will need to volume mount them into the container at the location
- specified by the paths mentioned above.
-
- )}
-
-
-
For either datastore set the following:
-
-
-
-
Variable
-
Configuration guidance
-
-
-
-
-
CALICO_NODENAME
-
- Identifies the node. If a value is not specified, the compute server hostname is used to identify the
- Calico node.
-
-
-
-
CALICO_IP or CALICO_IP6
-
- If values are not specified for both, {prodname} uses the currently-configured values for the next hop
- IP addresses for this node—these can be configured through the Node resource. If no next hop addresses
- are configured, {prodname} automatically determines an IPv4 next hop address by querying the host
- interfaces (and configures this value in the Node resource). You can set CALICO_IP to{' '}
- autodetect for force auto-detection of IP address every time the node starts. If you set
- IP addresses through these environment variables, it reconfigures any values currently set through the
- Node resource.
-
-
-
-
CALICO_AS
-
- If not specified, {prodname} uses the currently configured value for the AS Number for the node BGP
- client—this can be configured through the Node resource. If the Node resource value is not set, Calico
- inherits the AS Number from the global default value. If you set a value through this environment
- variable, it reconfigures any value currently set through the Node resource.
-
-
-
-
NO_DEFAULT_POOLS
-
- Set to true to prevent {prodname} from creating a default pool if one does not exist. Pools are used
- for workload endpoints and not required for non-cluster hosts.
-
-
-
-
CALICO_NETWORKING_BACKEND
-
- The networking backend to use. In bird mode, Calico will provide BGP networking using the
- BIRD BGP daemon; VXLAN networking can also be used. In vxlan mode, only VXLAN networking
- is provided; BIRD and BGP are disabled. If you want to run Calico for policy only, set to{' '}
- none.
-
-
-
-
-
- Sample EnvironmentFile - save to /etc/calico/calico.env
-
-
- {`${datastore_type}=etcdv3
-${etcd_endpoints}=https://calico-datastore.example.com:2379
-${etcd_ca_file}="/pki/ca.pem"
-${etcd_cert_file}="/pki/client-cert.pem"
-${etcd_key_file}="/pki/client-key.pem"`}
- {props.install === 'container'
- ? `
-CALICO_NODENAME=""
-NO_DEFAULT_POOLS="true"
-CALICO_IP=""
-CALICO_IP6=""
-CALICO_AS=""
-CALICO_NETWORKING_BACKEND=bird`
- : ''}
-
-
-
- >
- );
-}
diff --git a/calico_versioned_docs/version-3.25/_includes/components/HostEndpointsUpgrade.js b/calico_versioned_docs/version-3.25/_includes/components/HostEndpointsUpgrade.js
deleted file mode 100644
index ebacc53bb8..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/components/HostEndpointsUpgrade.js
+++ /dev/null
@@ -1,84 +0,0 @@
-import React from 'react';
-import Admonition from '@theme/Admonition';
-import CodeBlock from '@theme/CodeBlock';
-import Heading from '@theme/Heading';
-
-import { prodname, version } from '../../variables';
-
-export default function HostEndpointsUpgrade(props) {
- return (
- <>
-
- Host Endpoints
-
-
- If your cluster has host endpoints with interfaceName: * you must prepare your cluster before
- upgrading. Failure to do so will result in an outage.
-
-
- In versions of {prodname} prior to v3.14, all-interfaces host endpoints (host endpoints with{' '}
- interfaceName: *) only supported pre-DNAT policy. The default behavior of all-interfaces host
- endpoints, in the absence of any policy, was to allow all traffic.
-
-
- Beginning from v3.14, all-interfaces host endpoints support normal policy in addition to pre-DNAT policy. The
- support for normal policy includes a change in default behavior for all-interfaces host endpoints: in the
- absence of policy the default behavior is to drop traffic. This default behavior is consistent
- with "named" host endpoints (which specify a named interface such as "eth0"); named host
- endpoints drop traffic in the absence of policy.
-
-
- Before upgrading to {version}, you must ensure that global network policies are in place that select existing
- all-interfaces host endpoints and explicitly allow existing traffic flows. As a starting point, you can create
- an allow-all policy that selects existing all-interfaces host endpoints. First, we'll add a label to the
- existing host endpoints. Get a list of the nodes that have an all-interfaces host endpoint:
-
- calicoctl get hep -owide | grep | awk '"{'print $1'}"'
-
- With the names of the all-interfaces host endpoints, we can label each host endpoint with a new label (for
- example, host-endpoint-upgrade: ""):
-
- Now that the nodes with an all-interfaces host endpoint are labeled with host-endpoint-upgrade,
- we can create a policy to log and allow all traffic going into or out of the host endpoints temporarily:
-
- After applying this policy, all-interfaces host endpoints will log and allow all traffic through them. This
- policy will allow all traffic not accounted for by other policies. After upgrading, please review syslog logs
- for traffic going through the host endpoints and update the policy as needed to secure traffic to the host
- endpoints.
-
-
- Release archive
- {' '}
- with Kubernetes manifests, Docker images and binaries.
-
- )}
- {release.note}
-
-
-
-
Component
-
Version
-
-
-
- {Object.keys(release.components).map((comp) => {
- // Use the imageName for the component, if it has one, for better readability
- const componentName = imageNames[comp] || comp;
-
- return (
-
If you are using one of the recommended distributions, you will already satisfy these.
-
-
- Due to the large number of distributions and kernel version out there, it’s hard to be precise about the names
- of the particular kernel modules that are required to run {prodname}. However, in general, you’ll need:
-
-
-
-
- The iptables modules (both the “legacy” and “nft” variants are supported). These are typically
- broken up into many small modules, one for each type of match criteria and one for each type of action.{' '}
- {prodname} requires:
-
-
-
The “base” modules (including the IPv6 versions if IPv6 is enabled in your cluster).
-
- At least the following match criteria: set,rpfilter, addrtype,{' '}
- comment,conntrack, icmp, tcp,udp,{' '}
- ipvs, icmpv6 (if IPv6 is enabled in your kernel), mark,{' '}
- multiport,rpfilter, sctp, ipvs (if using
- kube-proxy in IPVS mode).
-
-
- At least the following actions: REJECT,ACCEPT, DROP,{' '}
- LOG.
-
-
-
-
-
IP sets support.
-
-
-
Netfilter Conntrack support compiled in (with SCTP support if using SCTP).
-
-
-
- IPVS support if using kube-proxy in IPVS mode.
-
-
-
-
- IPIP, VXLAN, Wireguard support, if using {prodname}
- networking in one of those modes.
-
-
-
-
- eBPF (including the tc hook support) and XDP (if you want to use the eBPF dataplane).
-
-
-
- >
- );
-}
diff --git a/calico_versioned_docs/version-3.25/_includes/components/ReqsSys.js b/calico_versioned_docs/version-3.25/_includes/components/ReqsSys.js
deleted file mode 100644
index 521f31bdbc..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/components/ReqsSys.js
+++ /dev/null
@@ -1,330 +0,0 @@
-import React from 'react';
-
-import Admonition from '@theme/Admonition';
-import Link from '@docusaurus/Link';
-import Heading from '@theme/Heading';
-
-import { orchestrators } from '@site/variables';
-import { prodname, baseUrl } from '../../variables';
-
-function NodeRequirementsOSS(props) {
- return (
- <>
-
- Node requirements
-
-
-
-
x86-64, arm64, ppc64le, or s390x processor
-
-
-
- {prodname} must be able to manage cali* interfaces on the host. When IPIP is enabled (the
- default), {prodname} also needs to be able to manage tunl* interfaces. When VXLAN is enabled,{' '}
- {prodname} also needs to be able to manage the
- vxlan.calico interface.
-
-
-
-
- Linux kernel 3.10 or later with required dependencies. The
- following distributions have the required kernel, its dependencies, and are known to work well with{' '}
- {prodname} and {props.orch}.
-
- Many Linux distributions, such as most of the above, include NetworkManager. By default, NetworkManager does
- not allow {prodname} to manage interfaces. If your nodes have NetworkManager, complete the steps in{' '}
-
- Preventing NetworkManager from controlling {prodname} interfaces
- {' '}
- before installing {prodname}.
-
-
-
-
-
- If your Linux distribution comes with installed Firewalld or another iptables manager it should be disabled.
- These may interfere with rules added by {prodname} and result in unexpected behavior.
-
-
-
-
-
- If a host firewall is needed, it can be configured by {prodname} HostEndpoint and GlobalNetworkPolicy. More
- information about configuration at Security for host.
-
- {prodname} requires a key/value store accessible by all {prodname} components.
- {
- {
- OpenShift: With OpenShift, the Kubernetes API datastore is used for the key/value store.,
- Kubernetes: (
-
- On Kubernetes, you can configure {prodname} to access an etcdv3 cluster directly or to use the
- Kubernetes API datastore.
-
- ),
- OpenStack: (
-
- For production you will likely want multiple nodes for greater performance and reliability. If you don’t
- already have an etcdv3 cluster to connect to, please refer to{' '}
- the upstream etcd docs for detailed advice and setup.
-
- ),
- 'host protection': The key/value store must be etcdv3.,
- }[props.orch]
- }
-
- *{' '}
-
- The value passed to kube-apiserver using the --secure-port flag. If you cannot locate this,
- check the targetPort value returned by
- kubectl get svc kubernetes -o yaml.
-
-
- When installed as a Kubernetes daemon set, {prodname} meets this requirement by running as a privileged
- container. This requires that the kubelet be allowed to run privileged containers. There are two ways this
- can be achieved.
-
-
-
- Specify --allow-privileged on the kubelet (deprecated).
-
-
- Use a{' '}
- pod security policy.
-
-
- >
- )}
- >
- );
-}
-
-export default function ReqsSys(props) {
- return (
- <>
-
-
-
-
-
- >
- );
-}
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_calicoctl-version.mdx b/calico_versioned_docs/version-3.25/_includes/content/_calicoctl-version.mdx
deleted file mode 100644
index fe02003d55..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_calicoctl-version.mdx
+++ /dev/null
@@ -1,7 +0,0 @@
-| Field | Value |
-| ------------------- | ------------------------------------------------------ |
-| `Client Version` | Version of `calicoctl` |
-| `Build date` | Time and date of `calicoctl` build |
-| `Git commit` | Git commit number of `calicoctl` |
-| `Cluster Version`\* | Version number of `{{nodecontainer}}` and {{prodname}} |
-| `Cluster Type`\* | Other details about the cluster |
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_determine-ipam.mdx b/calico_versioned_docs/version-3.25/_includes/content/_determine-ipam.mdx
deleted file mode 100644
index a81831bd4f..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_determine-ipam.mdx
+++ /dev/null
@@ -1,37 +0,0 @@
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-If you are not sure which IPAM your cluster is using, the way to tell depends on install method.
-
-
-
-
-The IPAM plugin can be queried on the default Installation resource.
-
-```
-kubectl get installation default -o go-template --template {{.spec.cni.ipam.type}}
-```
-
-If your cluster is using Calico IPAM, the above command should return a result of `Calico`.
-
-
-
-
-SSH to one of your Kubernetes nodes and examine the CNI configuration.
-
-```
-cat /etc/cni/net.d/10-calico.conflist
-```
-
-Look for the entry:
-
-```
- "ipam": {
- "type": "calico-ipam"
- },
-```
-
-If it is present, you are using the {{prodname}} IPAM. If the IPAM is not {{prodname}}, or the 10-calico.conflist file does not exist, you cannot use these features in your cluster.
-
-
-
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_docker-container-service.mdx b/calico_versioned_docs/version-3.25/_includes/content/_docker-container-service.mdx
deleted file mode 100644
index f072233f84..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_docker-container-service.mdx
+++ /dev/null
@@ -1,80 +0,0 @@
-This section describes how to run `{{nodecontainer}}` as a Docker container.
-
-:::note
-
-We include examples for systemd, but the commands can be
-applied to other init daemons such as upstart.
-
-:::
-
-### Step 1: Create environment file
-
-
-
-### Step 2: Configure the init system
-
-Use an init daemon (like systemd or upstart) to start the {{nodecontainer}} image as a service using the EnvironmentFile values.
-
-Sample systemd service file: `{{noderunning}}.service`
-
-```shell
-[Unit]
-Description={{noderunning}}
-After=docker.service
-Requires=docker.service
-
-[Service]
-EnvironmentFile=/etc/calico/calico.env
-ExecStartPre=-/usr/bin/docker rm -f {{noderunning}}
-ExecStart=/usr/bin/docker run --net=host --privileged \
- --name={{noderunning}} \
- -e NODENAME=${CALICO_NODENAME} \
- -e IP=${CALICO_IP} \
- -e IP6=${CALICO_IP6} \
- -e CALICO_NETWORKING_BACKEND=${CALICO_NETWORKING_BACKEND} \
- -e AS=${CALICO_AS} \
- -e NO_DEFAULT_POOLS=${NO_DEFAULT_POOLS} \
- -e DATASTORE_TYPE=${DATASTORE_TYPE} \
- -e ETCD_ENDPOINTS=${ETCD_ENDPOINTS} \
- -e ETCD_CA_CERT_FILE=${ETCD_CA_CERT_FILE} \
- -e ETCD_CERT_FILE=${ETCD_CERT_FILE} \
- -e ETCD_KEY_FILE=${ETCD_KEY_FILE} \
- -e KUBECONFIG=${KUBECONFIG} \
- -v /var/log/calico:/var/log/calico \
- -v /var/lib/calico:/var/lib/calico \
- -v /var/run/calico:/var/run/calico \
- -v /run/docker/plugins:/run/docker/plugins \
- -v /lib/modules:/lib/modules \
- -v /etc/pki:/pki \
- {{registry}}{{imageNames.calico/node}}:{{releaseTitle}} /bin/calico-node -felix
-
-ExecStop=-/usr/bin/docker stop {{noderunning}}
-
-Restart=on-failure
-StartLimitBurst=3
-StartLimitInterval=60s
-
-[Install]
-WantedBy=multi-user.target
-```
-
-Upon start, the systemd service:
-
-- Confirms Docker is installed under the `[Unit]` section
-- Gets environment variables from the environment file above
-- Removes existing `{{nodecontainer}}` container (if it exists)
-- Starts `{{nodecontainer}}`
-
-The script also stops the `{{nodecontainer}}` container when the service is stopped.
-
-:::note
-
-Depending on how you've installed Docker, the name of the Docker service
-under the `[Unit]` section may be different (such as `docker-engine.service`).
-Be sure to check this before starting the service.
-
-:::
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_ebpf-value.mdx b/calico_versioned_docs/version-3.25/_includes/content/_ebpf-value.mdx
deleted file mode 100644
index ca2baf3587..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_ebpf-value.mdx
+++ /dev/null
@@ -1,12 +0,0 @@
-The eBPF dataplane mode has several advantages over standard Linux networking pipeline mode:
-
-- It scales to higher throughput.
-- It uses less CPU per GBit.
-- It has native support for Kubernetes services (without needing kube-proxy) that:
-
- - Reduces first packet latency for packets to services.
- - Preserves external client source IP addresses all the way to the pod.
- - Supports DSR (Direct Server Return) for more efficient service routing.
- - Uses less CPU than kube-proxy to keep the dataplane in sync.
-
-To learn more and see performance metrics from our test environment, see the blog, [Introducing the Calico eBPF dataplane](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/).
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_endpointport.mdx b/calico_versioned_docs/version-3.25/_includes/content/_endpointport.mdx
deleted file mode 100644
index 05dde33612..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_endpointport.mdx
+++ /dev/null
@@ -1,15 +0,0 @@
-An EndpointPort associates a name with a particular TCP/UDP/SCTP port of the endpoint, allowing it to
-be referenced as a named port in [policy rules](../../reference/resources/networkpolicy.mdx#entityrule).
-
-| Field | Description | Accepted Values | Schema | Default |
-| -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------- | ------ | ------- |
-| name | The name to attach to this port, allowing it to be referred to in [policy rules](../../reference/resources/networkpolicy.mdx#entityrule). Names must be unique within an endpoint. | | string | |
-| protocol | The protocol of this named port. | `TCP`, `UDP`, `SCTP` | string | |
-| port | The workload port number. | `1`-`65535` | int | |
-
-:::note
-
-On their own, EndpointPort entries don't result in any change to the connectivity of the port.
-They only have an effect if they are referred to in policy.
-
-:::
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_entityrule.mdx b/calico_versioned_docs/version-3.25/_includes/content/_entityrule.mdx
deleted file mode 100644
index e013a5e9f4..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_entityrule.mdx
+++ /dev/null
@@ -1,34 +0,0 @@
-Entity rules specify the attributes of the source or destination of a packet that must match for the rule as a whole
-to match. Packets are matched on their IPs and ports. If the rule contains multiple match criteria (for example, an
-IP and a port) then all match criteria must match for the rule as a whole to match.
-
-[Selectors](#selectors) offer a powerful way to select the source or destination to match based on labels.
-Selectors can match [workload endpoints](../../reference/resources/workloadendpoint.mdx), host endpoint and
-([namespaced](../../reference/resources/networkset.mdx) or
-[global](../../reference/resources/globalnetworkset.mdx)) network sets.
-
-| Field | Description | Accepted Values | Schema | Default |
-| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | ------------------------------------------- | ------- |
-| nets | Match packets with IP in any of the listed CIDRs. | List of valid IPv4 CIDRs or list of valid IPv6 CIDRs (IPv4 and IPv6 CIDRs shouldn't be mixed in one rule) | list of cidrs |
-| notNets | Negative match on CIDRs. Match packets with IP not in any of the listed CIDRs. | List of valid IPv4 CIDRs or list of valid IPv6 CIDRs (IPv4 and IPv6 CIDRs shouldn't be mixed in one rule) | list of cidrs |
-| selector | Positive match on selected endpoints. If a `namespaceSelector` is also defined, the set of endpoints this applies to is limited to the endpoints in the selected namespaces. | Valid selector | [selector](#selectors) | |
-| notSelector | Negative match on selected endpoints. If a `namespaceSelector` is also defined, the set of endpoints this applies to is limited to the endpoints in the selected namespaces. | Valid selector | [selector](#selectors) | |
-| namespaceSelector | Positive match on selected namespaces. If specified, only workload endpoints in the selected Kubernetes namespaces are matched. Matches namespaces based on the labels that have been applied to the namespaces. Defines the scope that selectors will apply to, if not defined then selectors apply to the NetworkPolicy's namespace. Match a specific namespace by name using the `projectcalico.org/name` label. Select the non-namespaced resources like GlobalNetworkSet(s), host endpoints to which this policy applies by using `global()` selector. | Valid selector | [selector](#selectors) | |
-| ports | Positive match on the specified ports | | list of [ports](#ports) | |
-| notPorts | Negative match on the specified ports | | list of [ports](#ports) | |
-| serviceAccounts | Match endpoints running under service accounts. If a `namespaceSelector` is also defined, the set of service accounts this applies to is limited to the service accounts in the selected namespaces. | | [ServiceAccountMatch](#serviceaccountmatch) | |
-| services | Match the specified service(s). If specified on egress rule destinations, no other selection criteria can be set. If specified on ingress rule sources, only positive or negative matches on ports can be specified. | | [ServiceMatch](#servicematch) | |
-
-When using selectors in network policy, remember that selectors only match (known) resources, but _rules_ match
-packets. A rule with a selector `all()` won't match "all packets", it will match "packets from all in-scope
-endpoints and network sets". To match all packets, do not include a selector in the rule at all.
-
-:::note
-
-`notSelector` is somewhat subtle because the `not` in `notSelector` negates the packet match
-rather than the selector:
-
-- `selector: !has(foo)` matches packets from/to endpoints and network sets that do not have the label "foo".
-- `notSelector: has(foo)` matches packets from/to **anywhere** (including outside the cluster), **except** traffic from/to endpoints and network sets that have the label "foo".
-
-:::
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_felix-init-datastore.mdx b/calico_versioned_docs/version-3.25/_includes/content/_felix-init-datastore.mdx
deleted file mode 100644
index 9439c58242..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_felix-init-datastore.mdx
+++ /dev/null
@@ -1,19 +0,0 @@
-You should configure a `node` resource for each
-host running Felix. In this case, the database is initialized after
-creating the first `node` resource. For a deployment that does not include
-the {{prodname}}/BGP integration, the specification of a node resource just
-requires the name of the node; for most deployments this will be the same as the
-hostname.
-
-```bash
-calicoctl create -f - <
-EOF
-```
-
-The Felix logs should transition from periodic notifications
-that Felix is in the state `wait-for-ready` to a stream of initialization
-messages.
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_httpmatch.mdx b/calico_versioned_docs/version-3.25/_includes/content/_httpmatch.mdx
deleted file mode 100644
index 613bb97c3d..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_httpmatch.mdx
+++ /dev/null
@@ -1,23 +0,0 @@
-An HTTPMatch matches attributes of an HTTP request. The presence of an HTTPMatch clause on a Rule will cause that rule to only match HTTP traffic. Other application layer protocols will not match the rule.
-
-Example:
-
-```yaml
-http:
- methods: ['GET', 'PUT']
- paths:
- - exact: '/projects/calico'
- - prefix: '/users'
-```
-
-| Field | Description | Schema |
-| ------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------- |
-| methods | Match HTTP methods. Case sensitive. [Standard HTTP method descriptions.](https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html) | list of strings |
-| paths | Match HTTP paths. Case sensitive. | list of [HTTPPathMatch](#httppathmatch) |
-
-### HTTPPathMatch
-
-| Syntax | Example | Description |
-| ------ | ------------------- | ------------------------------------------------------------------------------- |
-| exact | `exact: "/foo/bar"` | Matches the exact path as written, not including the query string or fragments. |
-| prefix | `prefix: "/keys"` | Matches any path that begins with the given prefix. |
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_icmp.mdx b/calico_versioned_docs/version-3.25/_includes/content/_icmp.mdx
deleted file mode 100644
index 1adb456472..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_icmp.mdx
+++ /dev/null
@@ -1,4 +0,0 @@
-| Field | Description | Accepted Values | Schema | Default |
-| ----- | ------------------- | -------------------- | ------- | ------- |
-| type | Match on ICMP type. | Can be integer 0-254 | integer |
-| code | Match on ICMP code. | Can be integer 0-255 | integer |
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_ipnat.mdx b/calico_versioned_docs/version-3.25/_includes/content/_ipnat.mdx
deleted file mode 100644
index 9cfa2fb904..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_ipnat.mdx
+++ /dev/null
@@ -1,6 +0,0 @@
-IPNAT contains a single NAT mapping for a WorkloadEndpoint resource.
-
-| Field | Description | Accepted Values | Schema | Default |
-| ---------- | ------------------------------------------- | ------------------ | ------ | ------- |
-| internalIP | The internal IP address of the NAT mapping. | A valid IP address | string | |
-| externalIP | The external IP address. | A valid IP address | string | |
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_openstack-etcd-auth.mdx b/calico_versioned_docs/version-3.25/_includes/content/_openstack-etcd-auth.mdx
deleted file mode 100644
index 6dcce7f8fe..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_openstack-etcd-auth.mdx
+++ /dev/null
@@ -1,16 +0,0 @@
-## Configuration for etcd authentication
-
-If your etcd cluster has authentication enabled, you must also configure the
-relevant {{prodname}} components with an etcd user name and password. You
-can create a single etcd user for {{prodname}} that has permission to read
-and write any key beginning with `/calico/`, or you can create specific etcd
-users for each component, with more precise permissions.
-
-This table sets out where to configure each component of {{prodname}} for
-OpenStack, and the detailed access permissions that each component needs:
-
-| Component | Configuration | Access |
-| -------------- | -------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- |
-| Felix | `CALICO_ETCD_USERNAME` and `CALICO_ETCD_PASSWORD` variables in Felix's environment on each compute node. | [See here](../../reference/etcd-rbac/calico-etcdv3-paths.mdx#felix-as-a-stand-alone-process) |
-| Neutron driver | `etcd_username` and `etcd_password` in `[calico]` section of `/etc/neutron/neutron.conf` on each control node. | [See here](../../reference/etcd-rbac/calico-etcdv3-paths.mdx#openstack-calico-driver-for-neutron) |
-| DHCP agent | `etcd_username` and `etcd_password` in `[calico]` section of `/etc/neutron/neutron.conf` on each compute node. | [See here](../../reference/etcd-rbac/calico-etcdv3-paths.mdx#openstack-calico-dhcp-agent) |
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_ports.mdx b/calico_versioned_docs/version-3.25/_includes/content/_ports.mdx
deleted file mode 100644
index 82f6bc3365..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_ports.mdx
+++ /dev/null
@@ -1,33 +0,0 @@
-{{prodname}} supports the following syntaxes for expressing ports.
-
-| Syntax | Example | Description |
-| --------- | ---------- | ------------------------------------------------------------------- |
-| int | 80 | The exact (numeric) port specified |
-| start:end | 6040:6050 | All (numeric) ports within the range start ≤ x ≤ end |
-| string | named-port | A named port, as defined in the ports list of one or more endpoints |
-
-An individual numeric port may be specified as a YAML/JSON integer. A port range or
-named port must be represented as as a string. For example, this would be a valid list of ports:
-
-```yaml
-ports: [8080, '1234:5678', 'named-port']
-```
-
-#### Named ports
-
-Using a named port in an `EntityRule`, instead of a numeric port, gives a layer of indirection,
-allowing for the named port to map to different numeric values for each endpoint.
-
-For example, suppose you have multiple HTTP servers running as workloads; some exposing their HTTP
-port on port 80 and others on port 8080. In each workload, you could create a named port called
-`http-port` that maps to the correct local port. Then, in a rule, you could refer to the name
-`http-port` instead of writing a different rule for each type of server.
-
-:::note
-
-Since each named port may refer to many endpoints (and {{prodname}} has to expand a named port into
-a set of endpoint/port combinations), using a named port is considerably more expensive in terms
-of CPU than using a simple numeric port. We recommend that they are used sparingly, only where
-the extra indirection is required.
-
-:::
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_rule.mdx b/calico_versioned_docs/version-3.25/_includes/content/_rule.mdx
deleted file mode 100644
index fdbdc317fb..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_rule.mdx
+++ /dev/null
@@ -1,46 +0,0 @@
-A single rule matches a set of packets and applies some action to them. When multiple rules are specified, they
-are executed in order.
-
-| Field | Description | Accepted Values | Schema | Default |
-| ----------- | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------ | ----------------------------- | ------- |
-| metadata | Per-rule metadata. | | [RuleMetadata](#rulemetadata) | |
-| action | Action to perform when matching this rule. | `Allow`, `Deny`, `Log`, `Pass` | string | |
-| protocol | Positive protocol match. | `TCP`, `UDP`, `ICMP`, `ICMPv6`, `SCTP`, `UDPLite`, `1`-`255` | string \| integer | |
-| notProtocol | Negative protocol match. | `TCP`, `UDP`, `ICMP`, `ICMPv6`, `SCTP`, `UDPLite`, `1`-`255` | string \| integer | |
-| icmp | ICMP match criteria. | | [ICMP](#icmp) | |
-| notICMP | Negative match on ICMP. | | [ICMP](#icmp) | |
-| ipVersion | Positive IP version match. | `4`, `6` | integer | |
-| source | Source match parameters. | | [EntityRule](#entityrule) | |
-| destination | Destination match parameters. | | [EntityRule](#entityrule) | |
-| http | Match HTTP request parameters. Application layer policy must be enabled to use this field. | | [HTTPMatch](#httpmatch) | |
-
-After a `Log` action, processing continues with the next rule; `Allow` and `Deny` are immediate
-and final and no further rules are processed.
-
-An `action` of `Pass` in a `NetworkPolicy` or `GlobalNetworkPolicy` will skip over the remaining policies and jump to the
-first [profile](../../reference/resources/profile.mdx) assigned to the endpoint, applying the policy configured in the
-profile; if there are no Profiles configured for the endpoint the default applied action is `Deny`.
-
-### RuleMetadata
-
-Metadata associated with a specific rule (rather than the policy as a whole). The contents of the metadata does not affect how a rule is interpreted or enforced; it is
-simply a way to store additional information for use by operators or applications that interact with {{prodname}}.
-
-| Field | Description | Schema | Default |
-| ----------- | ----------------------------------- | ----------------------- | ------- |
-| annotations | Arbitrary non-identifying metadata. | map of string to string | |
-
-Example:
-
-```yaml
-metadata:
- annotations:
- app: database
- owner: devops
-```
-
-Annotations follow the
-[same rules as Kubernetes for valid syntax and character set](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set).
-
-On Linux with the iptables dataplane, rule annotations are rendered as comments in the form `-m comment --comment "="` on the iptables rule(s) that correspond
-to the {{prodname}} rule.
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_selector-scopes.mdx b/calico_versioned_docs/version-3.25/_includes/content/_selector-scopes.mdx
deleted file mode 100644
index 9d9fbc8c54..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_selector-scopes.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
-Understanding scopes and the `all()` and `global()` operators: selectors have a scope of resources
-that they are matched against, which depends on the context in which they are used. For example:
-
-- The `nodeSelector` in an `IPPool` selects over `Node` resources.
-
-- The top-level selector in a `NetworkPolicy` selects over the workloads _in the same namespace_ as the
- `NetworkPolicy`.
-- The top-level selector in a `GlobalNetworkPolicy` doesn't have the same restriction, it selects over all endpoints
- including namespaced `WorkloadEndpoint`s and non-namespaced `HostEndpoint`s.
-
-- The `namespaceSelector` in a `NetworkPolicy` (or `GlobalNetworkPolicy`) _rule_ selects over the labels on namespaces
- rather than workloads.
-
-- The `namespaceSelector` determines the scope of the accompanying `selector` in the entity rule. If no `namespaceSelector`
- is present then the rule's `selector` matches the default scope for that type of policy. (This is the same namespace
- for `NetworkPolicy` and all endpoints/network sets for `GlobalNetworkPolicy`)
-- The `global()` operator can be used (only) in a `namespaceSelector` to change the scope of the main `selector` to
- include non-namespaced resources such as [GlobalNetworkSet](../../reference/resources/globalnetworkset.mdx).
- This allows namespaced `NetworkPolicy` resources to refer to global non-namespaced resources, which would otherwise
- be impossible.
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_selectors.mdx b/calico_versioned_docs/version-3.25/_includes/content/_selectors.mdx
deleted file mode 100644
index fca343f628..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_selectors.mdx
+++ /dev/null
@@ -1,50 +0,0 @@
-A label selector is an expression which either matches or does not match a resource based on its labels.
-
-{{prodname}} label selectors support a number of operators, which can be combined into larger expressions
-using the boolean operators and parentheses.
-
-| Expression | Meaning |
-| ------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| **Logical operators** |
-| `( )` | Matches if and only if `` matches. (Parentheses are used for grouping expressions.) |
-| `! ` | Matches if and only if `` does not match. **Tip:** `!` is a special character at the start of a YAML string, if you need to use `!` at the start of a YAML string, enclose the string in quotes. |
-| ` && ` | "And": matches if and only if both ``, and, `` matches |
-| \ || \ | "Or": matches if and only if either ``, or, `` matches. |
-| **Match operators** |
-| `all()` | Match all in-scope resources. To match _no_ resources, combine this operator with `!` to form `!all()`. |
-| `global()` | Match all non-namespaced resources. Useful in a `namespaceSelector` to select global resources such as global network sets. |
-| `k == 'v'` | Matches resources with the label 'k' and value 'v'. |
-| `k != 'v'` | Matches resources without label 'k' or with label 'k' and value _not_ equal to `v` |
-| `has(k)` | Matches resources with label 'k', independent of value. To match pods that do not have label `k`, combine this operator with `!` to form `!has(k)` |
-| `k in { 'v1', 'v2' }` | Matches resources with label 'k' and value in the given set |
-| `k not in { 'v1', 'v2' }` | Matches resources without label 'k' or with label 'k' and value _not_ in the given set |
-| `k contains 's'` | Matches resources with label 'k' and value containing the substring 's' |
-| `k starts with 's'` | Matches resources with label 'k' and value starting with the substring 's' |
-| `k ends with 's'` | Matches resources with label 'k' and value ending with the substring 's' |
-
-Operators have the following precedence:
-
-- **Highest**: all the match operators
-- Parentheses `( ... )`
-- Negation with `!`
-- Conjunction with `&&`
-- **Lowest**: Disjunction with `||`
-
-For example, the expression
-
-```
-! has(my-label) || my-label starts with 'prod' && role in {'frontend','business'}
-```
-
-Would be "bracketed" like this:
-
-```
-((!(has(my-label)) || ((my-label starts with 'prod') && (role in {'frontend','business'}))
-```
-
-It would match:
-
-- Any resource that did not have label "my-label".
-- Any resource that both:
- - Has a value for `my-label` that starts with "prod", and,
- - Has a role label with value either "frontend", or "business".
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_serviceaccountmatch.mdx b/calico_versioned_docs/version-3.25/_includes/content/_serviceaccountmatch.mdx
deleted file mode 100644
index 66cd92ee7c..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_serviceaccountmatch.mdx
+++ /dev/null
@@ -1,6 +0,0 @@
-A ServiceAccountMatch matches service accounts in an EntityRule.
-
-| Field | Description | Schema |
-| -------- | ------------------------------- | ---------------------- |
-| names | Match service accounts by name | list of strings |
-| selector | Match service accounts by label | [selector](#selectors) |
diff --git a/calico_versioned_docs/version-3.25/_includes/content/_servicematch.mdx b/calico_versioned_docs/version-3.25/_includes/content/_servicematch.mdx
deleted file mode 100644
index 2d47fed02c..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/content/_servicematch.mdx
+++ /dev/null
@@ -1,6 +0,0 @@
-A ServiceMatch matches a service in an EntityRule.
-
-| Field | Description | Schema |
-| --------- | ------------------------ | ------ |
-| name | The service's name. | string |
-| namespace | The service's namespace. | string |
diff --git a/calico_versioned_docs/version-3.25/_includes/release-notes/.gitkeep b/calico_versioned_docs/version-3.25/_includes/release-notes/.gitkeep
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.0-release-notes.mdx b/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.0-release-notes.mdx
deleted file mode 100644
index b35cb7284c..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.0-release-notes.mdx
+++ /dev/null
@@ -1,140 +0,0 @@
-09 Jan 2023
-
-### eBPF Dataplane Stability: Connect Time Load Balancing (CTLB)
-
-In certain scenarios, Calico would not update rapidly changing pods and IPs properly. We have added
-some large changes to the eBPF dataplane to ensure that connect time load balancing works
-in larger, rapidly changing environments.
-
-Pull Requests:
-
-- ebpf: ipv4 and ipv6 code separated to different object files so the v6 code gets never loaded outside tests. [calico #7093](https://github.com/projectcalico/calico/pull/7093) (@tomastigera)
-- ebpf: CTLB resolves service when ipv4 is masked as ipv6. Commonly happens with grpc. [calico #7087](https://github.com/projectcalico/calico/pull/7087) (@tomastigera)
-- ebpf: we can apply the CTLB-turned-off workaround just to UDP [calico #6783](https://github.com/projectcalico/calico/pull/6783) (@tomastigera)
-- ebpf: host can accesses services without CTLB - gated feature [calico #6527](https://github.com/projectcalico/calico/pull/6527) (@tomastigera)
-
-### Improvements for clusters running at high scale
-
-This release includes several enhancements to Typha, Calico’s caching API server proxy, targeting high-scale clusters.
-Typha acts as a proxy for the components within calico-node (such as Felix) when they watch resources in the
-Kubernetes API server. This both reduces load on the API server and (by filtering out updates that Calico doesn't care about)
-it reduces load in calico-node.
-
-- Typha now supports graceful shutdown. Rather than disconnecting all clients and shutting down immediately, it will
- observe the `terminationGracePeriodSeconds` (which can now be set via the operator’s `Installation` resource). Typha
- will disconnect clients gradually over the graceful shutdown window. This reduces disruption by avoiding restarting
- many calico-node components at once.
-
- Since this is the first release with this feature, the benefit will only be seen when doing an upgrade _from_ this release.
-
-- Typha now supports compression on its protocol; this gives a 5:1 reduction in bandwidth use and snapshot data size.
- Compression is automatically enabled when a supporting client (i.e. clients from this release onward) connects.
-- Typha now shares computed (and compressed) snapshots between clients that connect at approximately the same time.
- This significantly reduces CPU usage and the time to service all clients when many clients connect at once. In a
- cluster with 150k Pods, generating a snapshot can take 2-3s of CPU time so, if 100 clients connect at once, there
- can be a 200-300 second saving in CPU used, and a corresponding increase in throughput. Typha has prometheus metrics
- to monitor the size of snapshots (typha_snapshot_raw_bytes / typha_snapshot_compressed_bytes) and the number of
- snapshots that are reused for more than one client (typha_snapshots_reused).
-- Typha now exports a Prometheus metric (typha_cache_size) for the size of its internal cache.
-- Typha's Prometheus metrics have been improved and split by client type. Previously the metrics would mix
- "high traffic" clients (such as Felix), with "low traffic" clients, making the metrics much less useful.
-
-### Bug fixes
-
-#### General
-
-- Fix incorrect cleanup in the service policy index after having both ingress and egress rules that reference the same service, resulting in missed IP set updates after one rule was deactivated. [calico #7148](https://github.com/projectcalico/calico/pull/7148) (@fasaxc)
-- Fix panic in calico-node when invalid spoofed IP range provided on a pod. [calico #7076](https://github.com/projectcalico/calico/pull/7076) (@caseydavenport)
-- fixed felix docs for bpf config options [calico #7065](https://github.com/projectcalico/calico/pull/7065) (@tomastigera)
-- Fix missing nsswitch files in Typha causing localhost lookup fails [calico #6971](https://github.com/projectcalico/calico/pull/6971) (@wdoekes)
-- Fix that Calico would try to use the IPV6 VXLAN or Wireguard tunnel devices for its BGP connections. [calico #6929](https://github.com/projectcalico/calico/pull/6929) (@coutinhop)
-- Fix that Calico would try to use the VXLAN tunnel device for its BGP connections. [calico #6902](https://github.com/projectcalico/calico/pull/6902) (@caseydavenport)
-- Add missing Auto option for IptablesBackend FelixConfiguration field [calico #6871](https://github.com/projectcalico/calico/pull/6871) (@huiyizzz)
-- Fix an issue that caused annotations and labels to be overwritten during a calicoctl patch command [calico #6791](https://github.com/projectcalico/calico/pull/6791) (@mgleung)
-- Fixed SyncLabels validation for Kubernetes datastore. [calico #6786](https://github.com/projectcalico/calico/pull/6786) (@huiyizzz)
-- Fix issues with OCP installs using the wrong operator manifest. [calico #6724](https://github.com/projectcalico/calico/pull/6724) (@mgleung)
-- Fix bug in IPv6 router ID calculation on IPv6 single-stack clusters that resulted in invalid router IDs being calculated. Note that this change will result in new router IDs being used for some IPv6 single-stack nodes. [calico #6674](https://github.com/projectcalico/calico/pull/6674) (@ramanujadasu)
-- Fix that `calicoctl ipam release` could only release IPAM handles when running in etcd mode. [calico #6650](https://github.com/projectcalico/calico/pull/6650) (@fasaxc)
-- Fix issue in L3RouteResolver CIDRTrie which could result in crashes when the IPv6 trie had a node with a /63 prefix. [calico #6532](https://github.com/projectcalico/calico/pull/6532) (@coutinhop)
-- Fix nil error logged from kube-controllers health reporter [calico #6513](https://github.com/projectcalico/calico/pull/6513) (@caseydavenport)
-- Fix that kube-controllers health checks didn't include a timeout on HTTP calls [calico #6513](https://github.com/projectcalico/calico/pull/6513) (@caseydavenport)
-- Set IPIPMode and VXLANMode to the default "Never" if they are empty strings in IPPools. [calico #6498](https://github.com/projectcalico/calico/pull/6498) (@coutinhop)
-- Fix that single-IP entries on BGPConfiguration LoadBalancerIPs were not advertised according to external traffic policy. [calico #6282](https://github.com/projectcalico/calico/pull/6282) (@mtryfoss)
-- fix: ErrorActionPreference must continue for kubectl commands Issue #6127 [calico #6257](https://github.com/projectcalico/calico/pull/6257) (@chrisjohnson00)
-
-#### eBPF
-
-- ebpf: fix error setting accept_local - device may get stuck dirty [calico #7071](https://github.com/projectcalico/calico/pull/7071) (@tomastigera)
-- ebpf: no src fixup on host iface for traffic returning from pod to the nodeport tunnel [calico #7039](https://github.com/projectcalico/calico/pull/7039) (@tomastigera)
-- ebpf: XDP (notrack) policy debug output is removed/cleaned up when XDP program is removed (fix) [calico #6994](https://github.com/projectcalico/calico/pull/6994) (@tomastigera)
-- ebpf: fixes ifstate leak when devices go down [calico #6946](https://github.com/projectcalico/calico/pull/6946) (@tomastigera)
-
-#### Windows
-
-- Fixed issue when Calico Windows hostprocess installation would fail to clean up a previous manual install of Calico Windows. [calico #6952](https://github.com/projectcalico/calico/pull/6952) (@coutinhop)
-- Fix issues with the windows node names in GCE [calico #6470](https://github.com/projectcalico/calico/pull/6470) (@lmm)
-
-#### Wireguard
-
-- Limit rate of logging 'Wireguard is not supported' to fix log spam issues. [calico #6534](https://github.com/projectcalico/calico/pull/6534) (@coutinhop)
-
-### Other changes
-
-#### General
-
-- Felix now supports overriding the timeouts of its internal readiness/liveness watchdog. This is useful for dealing with issues "in prod" without needing a new release. The timeouts have also been tuned to reduce false positives. [calico #7061](https://github.com/projectcalico/calico/pull/7061) (@fasaxc)
-- Typha now shares snapshots between clients that connect at roughly the same time. This dramatically reduces load when many clients connect at once. [calico #7047](https://github.com/projectcalico/calico/pull/7047) (@fasaxc)
-- By default, skip bridge interface created by `docker network create` command in IP auto-detection [calico #7045](https://github.com/projectcalico/calico/pull/7045) (@masap)
-- The Typha protocol now supports compression. This is enabled automatically if client and server both support it. [calico #7043](https://github.com/projectcalico/calico/pull/7043) (@fasaxc)
-- Add ignorable interfaces via the BGPConfiguration API [calico #7006](https://github.com/projectcalico/calico/pull/7006) (@huiyizzz)
-- Typha now supports graceful shut down, disconnecting calico-node pods at a configured rate instead of all at once. [calico #6973](https://github.com/projectcalico/calico/pull/6973) (@fasaxc)
-- Update installation documentation for AWS to include information regarding and links for CSI driver installation [calico #6967](https://github.com/projectcalico/calico/pull/6967) (@Josh-Tigera)
-- Update golang from 1.18.7 to 1.18.8 to avoid CVEs. [calico #6961](https://github.com/projectcalico/calico/pull/6961) (@Behnam-Shobiri)
-- By default, skip 'podman' interface in IP auto-detection [calico #6950](https://github.com/projectcalico/calico/pull/6950) (@OrvilleQ)
-- By default, skip 'nodelocaldns' interface in IP auto-detection [calico #6942](https://github.com/projectcalico/calico/pull/6942) (@cyclinder)
-- ebpf: faster program loading for workload endpoint - unused programs not loaded. [calico #6933](https://github.com/projectcalico/calico/pull/6933) (@tomastigera)
-- Remove problematic terminology from the codebase. [calico #6912](https://github.com/projectcalico/calico/pull/6912) (@fasaxc)
-- Update Istio support to include Istio v1.15.2 [calico #6890](https://github.com/projectcalico/calico/pull/6890) (@frozenprocess)
-- Add generalized TTL security mechanism (GTSM) via BGPPeer API [calico #6862](https://github.com/projectcalico/calico/pull/6862) (@Josh-Tigera)
-- Retain OpenSSL FIPS dependent files in calico-node image. [calico #6852](https://github.com/projectcalico/calico/pull/6852) (@hjiawei)
-- Disable VXLAN checksum offload by default for all kernels. If this was fixed, it has since been regressed. [calico #6842](https://github.com/projectcalico/calico/pull/6842) (@fasaxc)
-- Improve formatting of logged-out health reports from components such as Felix. [calico #6833](https://github.com/projectcalico/calico/pull/6833) (@fasaxc)
-- Update golang to 1.18.7 to avoid new CVEs. [calico #6824](https://github.com/projectcalico/calico/pull/6824) (@Behnam-Shobiri)
-- Updated documentation list of images to pull for deploying from private registry (now includes node-driver-registrar) [calico #6812](https://github.com/projectcalico/calico/pull/6812) (@Josh-Tigera)
-- Match full interface names in IP auto-detection default exclude list. [calico #6760](https://github.com/projectcalico/calico/pull/6760) (@neoaggelos)
-- Update multiple golang dependencies. [calico #6719](https://github.com/projectcalico/calico/pull/6719) (@Behnam-Shobiri)
-- Update the go version used to build the binaries from 1.18.5 to 1.18.6 [calico #6717](https://github.com/projectcalico/calico/pull/6717) (@Behnam-Shobiri)
-- Calico now uses a faster JSON parsing library; this reduces CPU load and improves start-up latency. [calico #6705](https://github.com/projectcalico/calico/pull/6705) (@fasaxc)
-- Reduce parsing overhead when parsing key/value pairs from Typha. [calico #6703](https://github.com/projectcalico/calico/pull/6703) (@fasaxc)
-- Many of Typha's Prometheus metrics are now split by syncer (client) type, represented by a label "syncer" on the metrics. This prevents cross-talk where the syncers would all share the same metrics and the last writer to the metric would "win". [calico #6675](https://github.com/projectcalico/calico/pull/6675) (@fasaxc)
-- The vxlanEnabled attribute from FelixConfiguration is now ignored for IPv6 VXLAN pools, allowing VXLAN to have IPv4 enabled independently from IPv6. [calico #6671](https://github.com/projectcalico/calico/pull/6671) (@muff1nman)
-- Typha now uses a B-tree for its internal cache, which allows it to export a Prometheus metric, typha_snapshot_size, that gives the total size of its current snapshot of the Calico datastore. [calico #6666](https://github.com/projectcalico/calico/pull/6666) (@fasaxc)
-- Use exponential backoff for kube-controllers health check timeout, retry sooner if failed. [calico #6610](https://github.com/projectcalico/calico/pull/6610) (@caseydavenport)
-- Bump K8S_VERSION and KUBECTL_VERSION to v1.24.3 in metadata.mk [calico #6606](https://github.com/projectcalico/calico/pull/6606) (@coutinhop)
-- Update Installation CRD to include new CSI changes introduced by recent operator API changes. [calico #6596](https://github.com/projectcalico/calico/pull/6596) (@Josh-Tigera)
-- Helm: imagePullSecrets now also applied to tigera-operator serviceaccount [calico #6591](https://github.com/projectcalico/calico/pull/6591) (@tamcore)
-- Retry kube-controllers initialization on failure [calico #6566](https://github.com/projectcalico/calico/pull/6566) (@tmjd)
-- Update the base images to alpine 3.16 for the flexvolume and CSI driver [calico #6559](https://github.com/projectcalico/calico/pull/6559) (@mgleung)
-- Windows quickstart install script creates calico service account token secret if missing [calico #6464](https://github.com/projectcalico/calico/pull/6464) (@lmm)
-- Updating the dependencies - to avoid indirect vulnerabilities (CVE) detection from scanners. [calico #6452](https://github.com/projectcalico/calico/pull/6452) (@Behnam-Shobiri)
-- added FeatureGates to Felix [calico #6381](https://github.com/projectcalico/calico/pull/6381) (@tomastigera)
-- eBPF: Add BPF counters to XDP programs, and also load XDP programs using Libbpf instead of iproute2. [calico #6371](https://github.com/projectcalico/calico/pull/6371) (@mazdakn)
-- The arm64 image of calico-kube-controllers now runs as non-root by default (similar to the amd64 image). [calico #6346](https://github.com/projectcalico/calico/pull/6346) (@ialidzhikov)
-
-#### eBPF
-
-- ebpf: Include enPxxxxxx in the default BPFDataIfacePattern [calico #7077](https://github.com/projectcalico/calico/pull/7077) (@TrevorTaoARM)
-- ebpf: cleanup previously attached programs when BPFDataIfacePattern changes. [calico #7008](https://github.com/projectcalico/calico/pull/7008) (@tomastigera)
-- ebpf : BPFDisableLinuxConntrack added to FelixConfiguration resource. [calico #6641](https://github.com/projectcalico/calico/pull/6641) (@mazdakn)
-- ebpf: New felix config bpfL3IfacePattern allows to specify non calico L3 devices such as wireguard, vxlan. [calico #6612](https://github.com/projectcalico/calico/pull/6612) (@sridhartigera)
-
-#### Windows
-
-- Update Windows NSSM version [calico #6861](https://github.com/projectcalico/calico/pull/6861) (@song-jiang)
-- windows: ensure calico-managed kubelet starts after the calico network has been initialized [calico #6656](https://github.com/projectcalico/calico/pull/6656) (@vitaliy-leschenko)
-
-#### OpenStack
-
-- Calico for OpenStack: remove iptables programming by the DHCP agent that is no longer needed, and that was increasing the need for Felix to resync Calico's iptables programming. Existing users will see issues - i.e. a VM failing to learn its IP address at boot time - if their VM OS is old enough to have unfixed DHCP client software. In that case the remedy is to update the VM OS. For example, in Tigera's own testing, we updated from CirrOS 0.3.4 to CirrOS 0.6.0. [calico #6857](https://github.com/projectcalico/calico/pull/6857) (@tj90241)
-- Calico for OpenStack: prime the project (aka tenant) data cache on Neutron server startup [calico #6839](https://github.com/projectcalico/calico/pull/6839) (@tj90241)
-- Allow Calico to set MTU in OpenStack [calico #6725](https://github.com/projectcalico/calico/pull/6725) (@nelljerram)
diff --git a/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.1-release-notes.mdx b/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.1-release-notes.mdx
deleted file mode 100644
index 6d2341fc0d..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.1-release-notes.mdx
+++ /dev/null
@@ -1,16 +0,0 @@
-31 Mar 2023
-
-### Bug fixes
-
- - Prevents Node kube-controller's internal pod cache from getting out-of-sync thus leaking memory. [calico #7503](https://github.com/projectcalico/calico/pull/7503) (@dilyevsky)
- - Fix a panic in BPF mode when iterating over a per-CPU map with Debug enabled. [calico #7379](https://github.com/projectcalico/calico/pull/7379) (@fasaxc)
- - Fix that the tunnel IP allocator did not respond to changes in the IP pool's allowedUses field. [calico #7360](https://github.com/projectcalico/calico/pull/7360) (@fasaxc)
- - s390x: fix image mislabel in cni, typha and kube-controllers [calico #7315](https://github.com/projectcalico/calico/pull/7315) (@huoqifeng)
- - Fix generation of `operator-crds.yaml` manifest. [calico #7217](https://github.com/projectcalico/calico/pull/7217) (@caseydavenport)
-
-### Other changes
-
- - ebpf: Jumpmap version incremented to prevent failures when upgrading from earlier calico versions [calico #7487](https://github.com/projectcalico/calico/pull/7487) (@tomastigera)
- - Performance: on kernel 4.10+, use kernel-side route filtering when listing routes. Dramatically reduces CPU usage (and garbage collection) on systems with many interfaces and/or routes. [calico #7381](https://github.com/projectcalico/calico/pull/7381) (@fasaxc)
- - ocp.tgz now hosted on GitHub [calico #7214](https://github.com/projectcalico/calico/pull/7214) (@caseydavenport)
- - Enable s390x architecture support in 3.25 [calico #7210](https://github.com/projectcalico/calico/pull/7210) (@huoqifeng)
diff --git a/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.2-release-notes.mdx b/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.2-release-notes.mdx
deleted file mode 100644
index 2269ba29a1..0000000000
--- a/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.2-release-notes.mdx
+++ /dev/null
@@ -1,24 +0,0 @@
-6 September 2023
-
-#### Bug fixes
-
- - Typha: move TLS handshake to per-connection goroutine to prevent main goroutine from stalling on an unclean handshake. [calico #7976](https://github.com/projectcalico/calico/pull/7976) (@fasaxc)
- - Fix panic when running 'calicoctl get nodes' when ASNumber was not present in the default BGPConfiguration. [calico #7861](https://github.com/projectcalico/calico/pull/7861) (@coutinhop)
- - ebpf: fixes felix panic upon restart in debug mode when there are existing policy counters [calico #7798](https://github.com/projectcalico/calico/pull/7798) (@tomastigera)
- - ebpf: fix applyOnforward=false in global policies [calico #7725](https://github.com/projectcalico/calico/pull/7725) (@tomastigera)
- - Update pin to use fixed calico/bird image to fix node ST failures. [calico #7563](https://github.com/projectcalico/calico/pull/7563) (@coutinhop)
- - Prevents Node kube-controller's internal pod cache from getting out-of-sync thus leaking memory. [calico #7503](https://github.com/projectcalico/calico/pull/7503) (@dilyevsky)
- - Fix the auto iptables detection if ip_tables.ko preloaded on RHEL/CentOS 8 [calico #7460](https://github.com/projectcalico/calico/pull/7460) (@yankay)
-
-#### Other changes
-
- - Update Calico VPP to v3.25.1 [calico #7535](https://github.com/projectcalico/calico/pull/7535) (@sknat)
- - Remove usage of deprecated '--logtostderr' command line flag. [calico #7515](https://github.com/projectcalico/calico/pull/7515) (@coutinhop)
-
-#### Known issues
-
-- Calico panics if kube-proxy or other components are using native `nftables` rules instead of the `iptables-nft` compatibility shim.
-
- Until Calico supports native nftables mode, we recommend that you continue to use the iptables-nft compatibility layer for all components. (The compatibility layer was the only option before Kubernetes v1.29 added alpha-level `nftables` support.)
-
- Do not run Calico in "legacy" iptables mode on a system that is also using `nftables`. Although this combination does not panic or fail (at least on kernels that support both), the interaction between `iptables` "legacy" mode and `nftables` is confusing: both `iptables` and `nftables` rules can be executed on the same packet, leading to policy verdicts being "overturned". Note that this issue applies to all previous versions of {{prodname}}.
\ No newline at end of file
diff --git a/calico_versioned_docs/version-3.25/about/about-ebpf.mdx b/calico_versioned_docs/version-3.25/about/about-ebpf.mdx
deleted file mode 100644
index e764291b4b..0000000000
--- a/calico_versioned_docs/version-3.25/about/about-ebpf.mdx
+++ /dev/null
@@ -1,163 +0,0 @@
----
-description: Learn about eBPF!
----
-
-# About eBPF
-
-
-
-:::note
-
-This guide provides optional background education, including
-education that is not specific to {{prodname}}.
-
-:::
-
-eBPF is a Linux kernel feature that allows fast yet safe mini-programs to be loaded into the kernel to
-customise its operation.
-
-In this guide you will learn:
-
-- General background on eBPF.
-- Various uses of eBPF.
-- How {{prodname}} uses eBPF in the eBPF dataplane.
-
-## What is eBPF?
-
-eBPF is a virtual machine embedded within the Linux kernel. It allows small programs to be loaded into the kernel,
-and attached to hooks, which are triggered when some event occurs. This allows the behaviour of the kernel to be
-(sometimes heavily) customised. While the eBPF virtual machine is the same for each type of hook, the capabilities
-of the hooks vary considerably. Since loading programs into the kernel could be dangerous; the kernel runs all
-programs through a very strict static verifier; the verifier sandboxes the program, ensuring it can only access
-allowed parts of memory and ensuring that it must terminate quickly.
-
-## Why is it called eBPF?
-
-eBPF stands for "extended Berkeley Packet Filter". The Berkeley Packet Filter was an earlier, more specialised
-virtual machine that was tailored for filtering packets. Tools such as `tcpdump` use this "classic" BPF VM to select
-packets that should be sent to userspace for analysis. eBPF is a considerably extended version of BPF that
-is suitable for general purpose use inside the kernel. While the name has stuck, eBPF can be used for a lot more
-than just packet filtering.
-
-## What can eBPF do?
-
-### Types of eBPF program
-
-There are several classes of hooks to which eBPF programs can be attached within the kernel. The capabilities of an
-eBPF program depend hugely on the hook to which it is attached:
-
-- **Tracing** programs can be attached to a significant proportion of the functions in the kernel. Tracing
- programs are useful for collecting statistics and deep-dive debugging of the kernel. _Most_ tracing hooks only allow
- read-only access to the data that the function is processing but there are some that allow data to be modified.
- The {{prodname}} team use tracing programs to help debug {{prodname}} during development; for example,
- to figure out why the kernel unexpectedly dropped a packet.
-
-- **Traffic Control** (`tc`) programs can be attached at ingress and egress to a given network device. The kernel
- executes the programs once for each packet. Since the hooks are for packet processing, the kernel allows
- the programs to modify or extend the packet, drop the packet, mark it for queueing, or redirect the packet to
- another interface. {{prodname}}'s eBPF dataplane is based on this type of hook; we use tc programs to load
- balance Kubernetes services, to implement network policy, and, to create a fast-path for traffic of established
- connections.
-
-- **XDP**, or "eXpress Data Path", is actually the name of an eBPF hook. Each network device has an XDP ingress hook
- that is triggered once for each incoming packet before the kernel allocates a socket buffer for the packet. XDP
- can give outstanding performance for use cases such as DoS protection (as supported in {{prodname}}'s standard Linux
- dataplane) and ingress load balancing (as used in facebook's Katran). The downside of XDP is that it requires
- network device driver support to get good performance. XDP isn't sufficient on its own to implement all of the logic
- needed for Kubernetes pod networking, but a combination of XDP and Traffic Control hooks works well.
-
-- Several types of **socket** programs hook into various operations on sockets, allowing the eBPF program to, for
- example, change the destination IP of a newly-created socket, or force a socket to bind to the "correct" source
- IP address. {{prodname}} uses such programs to do connect-time load balancing of Kubernetes Services; this
- reduces overhead because there is no [DNAT](about-networking.mdx#NAT) on the packet processing path.
-
-- There are various security-related hooks that allow for program behaviour to be policed in various ways. For
- example, the **seccomp** hooks allow for syscalls to be policed in fine-grained ways.
-
-- And... probably a few more hooks by the time you read this; eBPF is under heavy development in the kernel.
-
-The kernel exposes the capabilities of each hook via "helper functions". For example, the `tc` hook has a helper
-function to resize the packet, but that helper would not be available in a tracing hook. One of the challenges of
-working with eBPF is that different kernel versions support different helpers and lack of a helper can make it
-impossible to implement a particular feature.
-
-### BPF maps
-
-Programs attached to eBPF hooks are able to access BPF "maps". BPF maps have two main uses:
-
-- They allow BPF programs to store and retrieve long-lived data.
-
-- They allow communication between BPF programs and user-space programs. BPF programs can read data that was written
- by userspace and vice versa.
-
-There are many types of BPF maps, including some special types that allow jumping between programs, and, some that act
-as queues and stacks rather than strictly as key/value maps. {{prodname}} uses maps to keep track of active
-connections, and, to configure the BPF programs with policy and service NAT information. Since map accesses can be
-relatively expensive, {{prodname}} aims to do a single map lookup only for each packet on an established flow.
-
-The contents of bpf maps can be inspected using the command-line tool, `bpftool`, which is provided with the kernel.
-
-## {{prodname}}'s eBPF dataplane
-
-{{prodname}}'s eBPF dataplane is an alternative to our standard Linux dataplane (which is iptables based).
-While the standard dataplane focuses on compatibility by inter-working with kube-proxy, and your own iptables rules,
-the eBPF dataplane focuses on performance, latency and improving user experience with features that aren't possible
-in the standard dataplane. As part of that, the eBPF dataplane replaces kube-proxy with an eBPF implementation.
-The main "user experience" feature is to preserve the source IP of traffic from outside the cluster when traffic hits a
-NodePort; this makes your server-side logs and network policy much more useful on that path.
-
-### Feature comparison
-
-While the eBPF dataplane has some features that the standard Linux dataplane lacks, the reverse is also true:
-
-| Factor | Standard Linux Dataplane | eBPF dataplane |
-| ------------------------------------ | ----------------------------------------------------- | -------------------------------------------------------------- |
-| Throughput | Designed for 10GBit+ | Designed for 40GBit+ |
-| First packet latency | Low (kube-proxy service latency is bigger factor) | Lower |
-| Subsequent packet latency | Low | Lower |
-| Preserves source IP within cluster | Yes | Yes |
-| Preserves external source IP | Only with `externalTrafficPolicy: Local` | Yes |
-| Direct Server Return | Not supported | Supported (requires compatible underlying network) |
-| Connection tracking | Linux kernel's conntrack table (size can be adjusted) | BPF map (fixed size) |
-| Policy rules | Mapped to iptables rules | Mapped to BPF instructions |
-| Policy selectors | Mapped to IP sets | Mapped to BPF maps |
-| Kubernetes services | kube-proxy iptables or IPVS mode | BPF program and maps |
-| IPIP | Supported | Supported (no performance advantage due to kernel limitations) |
-| VXLAN | Supported | Supported |
-| Wireguard | Supported (IPv4 and IPv6) | Supported (IPv4) |
-| Other routing | Supported | Supported |
-| Supports third party CNI plugins | Yes (compatible plugins only) | Yes (compatible plugins only) |
-| Compatible with other iptables rules | Yes (can write rules above or below other rules) | Partial; iptables bypassed for workload traffic |
-| Host endpoint policy | Supported | Supported |
-| Enterprise version | Available | Available |
-| XDP DoS Protection | Supported | Supported |
-| IPv6 | Supported | Not supported (yet) |
-
-### Architecture overview
-
-{{prodname}}'s eBPF dataplane attaches eBPF programs to the `tc` hooks on each {{prodname}} interface as
-well as your data and tunnel interfaces. This allows {{prodname}} to spot workload packets early and handle them
-through a fast-path that bypasses iptables and other packet processing that the kernel would normally do.
-
-![Diagram showing the packet path for pod-to-pod networking; a BPF program is attached to the client pod's veth interface; it does a conntrack lookup in a BPF map, and forwards the packet to the second pod directly, bypassing iptables](/img/calico/bpf-pod-to-pod.svg 'Pod-to-pod packet path with eBPF enabled')
-
-The logic to implement load balancing and packet parsing is pre-compiled ahead of time and relies on a set of BPF
-maps to store the NAT frontend and backend information. One map stores the metadata of the service, allowing
-for `externalTrafficPolicy` and "sticky" services to be honoured. A second map stores the IPs of the backing pods.
-
-In eBPF mode, {{prodname}} converts your policy into optimised eBPF bytecode, using BPF maps to store the IP sets
-matched by policy selectors.
-
-![Detail of BPF program showing that packets are sent to a separate (generated) policy program,](/img/calico/bpf-policy.svg 'Expanded view of tc program showing policy.')
-
-To improve performance for services, {{prodname}} also does connect-time load balancing by hooking into the
-socket BPF hooks. When a program tries to connect to a Kubernetes service, {{prodname}} intercepts the connection
-attempt and configures the socket to connect directly to the backend pod's IP instead. This removes _all_
-NAT overhead from service connections.
-
-![Diagram showing BPF program attached to socket connect call; it does NAT at connect time,](/img/calico/bpf-connect-time.svg 'BPF program attached to socket connect call.')
-
-## Additional resources
-
-- For more information and performance metrics for the eBPF dataplane, see the [announcement blog post](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/).
-- If you'd like to try eBPF mode in your Kubernetes cluster, follow the [Enable the eBPF dataplane](../operations/ebpf/enabling-ebpf.mdx) guide.
diff --git a/calico_versioned_docs/version-3.25/about/about-k8s-networking.mdx b/calico_versioned_docs/version-3.25/about/about-k8s-networking.mdx
deleted file mode 100644
index f8257baedf..0000000000
--- a/calico_versioned_docs/version-3.25/about/about-k8s-networking.mdx
+++ /dev/null
@@ -1,132 +0,0 @@
----
-description: Learn about Kubernetes networking!
----
-
-# About Kubernetes Networking
-
-
-
-:::note
-
-This guide provides optional background education, not specific to {{prodname}}.
-
-:::
-
-Kubernetes defines a network model that helps provide simplicity and consistency across a range of networking
-environments and network implementations. The Kubernetes network model provides the foundation for understanding how
-containers, pods, and services within Kubernetes communicate with each other. This guide explains the key concepts and
-how they fit together.
-
-In this guide you will learn:
-
-- The fundamental network behaviors the Kubernetes network model defines.
-- How Kubernetes works with a variety of different network implementations.
-- What Kubernetes Services are.
-- How DNS works within Kubernetes.
-- What "NAT outgoing" is and when you would want to use it.
-- What "dual stack" is.
-
-## The Kubernetes network model
-
-The Kubernetes network model specifies:
-
-- Every pod gets its own IP address
-- Containers within a pod share the pod IP address and can communicate freely with each other
-- Pods can communicate with all other pods in the cluster using pod IP addresses (without
- [NAT](about-networking.mdx#nat))
-- Isolation (restricting what each pod can communicate with) is defined using network policies
-
-As a result, pods can be treated much like VMs or hosts (they all have unique IP addresses), and the containers within
-pods very much like processes running within a VM or host (they run in the same network namespace and share an IP
-address). This model makes it easier for applications to be migrated from VMs and hosts to pods managed by Kubernetes.
-In addition, because isolation is defined using network policies rather than the structure of the network, the network
-remains simple to understand. This style of network is sometimes referred to as a "flat network".
-
-Note that, although very rarely needed, Kubernetes does also support the ability to map host ports through to pods, or
-to run pods directly within the host network namespace sharing the host's IP address.
-
-## Kubernetes network implementations
-
-Kubernetes built in network support, kubenet, can provide some basic network connectivity. However, it is more common to
-use third party network implementations which plug into Kubernetes using the CNI (Container Network Interface) API.
-
-There are lots of different kinds of CNI plugins, but the two main ones are:
-
-- network plugins, which are responsible for connecting pod to the network
-- IPAM (IP Address Management) plugins, which are responsible for allocating pod IP addresses.
-
-{{prodname}} provides both network and IPAM plugins, but can also integrate and work seamlessly with some other CNI
-plugins, including AWS, Azure, and Google network CNI plugins, and the host local IPAM plugin. This flexibility allows
-you to choose the best networking options for your specific needs and deployment environment. You can read more about
-this in the {{prodname}} [determine best networking option](../networking/determine-best-networking.mdx)
-guide.
-
-## Kubernetes Services
-
-Kubernetes [Services](https://kubernetes.io/docs/concepts/services-networking/service/) provide a way of abstracting access to a group of pods as a network service.
-The group of pods is usually defined using a [label selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels).
-Within the cluster the network service is usually represented as virtual IP address, and kube-proxy load balances connections to the virtual IP across the group of pods backing the service.
-The virtual IP is discoverable through Kubernetes DNS.
-The DNS name and virtual IP address remain constant for the life time of the service, even though the pods backing the service may be created or destroyed, and the number of pods backing the service may change over time.
-
-Kubernetes Services can also define how a service is accessed from outside of the cluster, for example using
-
-- a node port, where the service can be accessed via a specific port on every node
-- or a load balancer, whether a network load balancer provides a virtual IP address that the service can be accessed via
- from outside the cluster.
-
-Note that when using {{prodname}} in on-prem deployments you can also [advertise service IP addresses](../networking/configuring/advertise-service-ips.mdx)
-, allowing services to be conveniently accessed without
-going via a node port or load balancer.
-
-## Kubernetes DNS
-
-Each Kubernetes cluster provides a DNS service. Every pod and every service is discoverable through the Kubernetes DNS
-service.
-
-For example:
-
-- Service: `my-svc.my-namespace.svc.cluster-domain.example`
-- Pod: `pod-ip-address.my-namespace.pod.cluster-domain.example`
-- Pod created by a deployment exposed as a service:
- `pod-ip-address.deployment-name.my-namespace.svc.cluster-domain.example`.
-
-The DNS service is implemented as Kubernetes Service that maps to one or more DNS server pods (usually CoreDNS), that
-are scheduled just like any other pod. Pods in the cluster are configured to use the DNS service, with a DNS search list
-that includes the pod's own namespace and the cluster's default domain.
-
-This means that if there is a service named `foo` in Kubernetes namespace `bar`, then pods in the same namespace can
-access the service as `foo`, and pods in other namespaces can access the service as `foo.bar`
-
-Kubernetes supports a rich set of options for controlling DNS in different scenarios. You can read more about these in
-the Kubernetes guide [DNS for Services and Pods](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/).
-
-## NAT outgoing
-
-The Kubernetes network model specifies that pods must be able to communicate with each other directly using pod IP
-addresses. But it does not mandate that pod IP addresses are routable beyond the boundaries of the cluster. Many
-Kubernetes network implementations use [overlay networks](about-networking.mdx#overlay-networks).
-Typically for these deployments, when a pod initiates a connection to an IP address outside of the cluster, the node
-hosting the pod will SNAT (Source Network Address Translation) map the source address of the packet from the pod IP to
-the node IP. This enables the connection to be routed across the rest of the network to the destination (because the
-node IP is routable). Return packets on the connection are automatically mapped back by the node replacing the node IP
-with the pod IP before forwarding the packet to the pod.
-
-When using {{prodname}}, depending on your environment, you can generally choose whether you prefer to run an
-overlay network, or prefer to have fully routable pod IPs. You can read more about this in the {{prodname}}
-[determine best networking option](../networking/determine-best-networking.mdx) guide. {{prodname}} also
-allows you to [configure outgoing NAT](../networking/configuring/workloads-outside-cluster.mdx) for specific IP address
-ranges if more granularity is desired.
-
-## Dual stack
-
-If you want to use a mix of IPv4 and IPv6 then you can enable Kubernetes [dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/) mode. When enabled, all
-pods will be assigned both an IPv4 and IPv6 address, and Kubernetes Services can specify whether they should be exposed
-as IPv4 or IPv6 addresses.
-
-## Additional resources
-
-- [The Kubernetes Network Model](https://kubernetes.io/docs/concepts/cluster-administration/networking/#the-kubernetes-network-model)
-- [Video: Everything you need to know about Kubernetes networking on AWS](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-pod-networking-on-aws/)
-- [Video: Everything you need to know about Kubernetes networking on Azure](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-azure/)
-- [Video: Everything you need to know about Kubernetes networking on Google Cloud](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-google-cloud/)
diff --git a/calico_versioned_docs/version-3.25/about/about-kubernetes-egress.mdx b/calico_versioned_docs/version-3.25/about/about-kubernetes-egress.mdx
deleted file mode 100644
index 7ee249252a..0000000000
--- a/calico_versioned_docs/version-3.25/about/about-kubernetes-egress.mdx
+++ /dev/null
@@ -1,113 +0,0 @@
----
-description: Learn about Kubernetes egress!
----
-
-# About Kubernetes egress
-
-:::note
-
-This guide provides optional background education, including
-education that is not specific to {{prodname}}.
-
-:::
-
-In this guide you will learn:
-
-- What is Kubernetes egress?
-- Why should you restrict egress traffic and how can you do it?
-- What is "NAT outgoing" and when is it used?
-- What is an egress gateway, and why might you want to use one?
-
-## What is Kubernetes egress?
-
-In this guide we are using the term Kubernetes egress to describe connections being made from pods to anything outside of the cluster.
-
-In contrast to ingress traffic, where Kubernetes has the [Ingress](about-kubernetes-ingress.mdx)
-resource type to help manage the traffic, there is no Kubernetes Egress resource. Instead, how the egress traffic is
-handled at a networking level is determined by the Kubernetes network implementation / CNI plugin being used by the
-cluster. In addition, if a service mesh is being used, this can add egress behaviors on top of those the
-network implementation provides.
-
-There are three areas of behavior worth understanding for egress traffic, so you can choose a networking and/or service
-mesh setup that best suits your needs:
-
-- Restricting egress traffic
-- Outgoing NAT behavior
-- Egress gateways
-
-## Restricting egress traffic
-
-It's a common security requirement and best practice to restrict outgoing connections from the cluster. This is normally
-achieved using [Network Policy](about-network-policy.mdx) to define egress rules for each
-microservice, often in conjunction with a [default deny](about-network-policy.mdx#default-deny)
-policy that ensures outgoing connections are denied by default, until a policy is defined to explicitly allow specific
-traffic.
-
-One limitation when using Kubernetes Network Policy to restrict access to specific external resources, is that the external
-resources need to be specified as IP addresses (or IP address ranges) within the policy rules. If the IP addresses
-associated with an external resource change, then every policy that referenced those IP addresses needs to be updated with
-the new IP addresses. This limitation can be circumvented using Calico [Network Sets](../network-policy/policy-rules/external-ips-policy.mdx)
-, or Calico Enterprise's support for domain names in policy rules.
-
-In addition to using network policy, service meshes typically allow you to configure which external services each pod
-can access. In the case of Istio, {{prodname}} can be integrated to enforce network policy at the service mesh
-layer, including [L5-7 rules](../network-policy/istio/http-methods.mdx), as another alternative to using IP addresses in rules. To
-learn more about the benefits of this kind of approach, read our [Adopt a zero trust network model for security ](../network-policy/adopt-zero-trust.mdx)
- guide.
-
-Note in addition to everything mentioned so far, perimeter firewalls can also be used to restrict outgoing connections,
-for example to allow connections only to particular external IP address ranges, or external services. However, since
-perimeter firewalls typically cannot distinguish individual pods, the rules apply equally to all pods in the cluster.
-This provides some defense in depth, but cannot replace the requirement for network policy.
-
-## NAT outgoing
-
-Network Address Translation ([NAT](https://en.wikipedia.org/wiki/Network_address_translation)) is the process of mapping an IP address in a packet
-to a different IP address as the packet passes through the device performing the NAT. Depending on the use case, NAT can
-apply to the source or destination IP address, or to both addresses.
-
-In the context of Kubernetes egress, NAT is used to allow pods to connect to services outside of the cluster if the pods
-have IP addresses that are not routable outside of the cluster (for example, if the pod network is an overlay).
-
-For example, if a pod in an overlay network attempts to connect to an IP address outside of the cluster, then the
-node hosting the pod uses SNAT (Source Network Address Translation) to map the non-routable source IP address of the
-packet to the node's IP address before forwarding on the packet. The node then maps response packets coming in the
-opposite direction back to the original pod IP address, so packets flow end-to-end in both directions, with neither
-pod or external service being aware the mapping is happening.
-
-In most clusters this NAT behavior is configured statically across the whole of the cluster. When using
-{{prodname}}, the NAT behavior can be configured at a more granular level for particular address ranges using [IP pools](../reference/resources/ippool.mdx)
-. This effectively allows the scope of "non-routable" to be more
-tightly defined than just "inside the cluster vs outside the cluster", which can be useful in some enterprise deployment
-scenarios.
-
-## Egress gateways
-
-Another approach to Kubernetes egress is to route all outbound connections via one or more egress gateways. The gateways
-SNAT (Source Network Address Translation) the connections so the external service being connected to sees the connection
-as coming from the egress gateway. The main use case is to improve security, either with the egress gateway performing a
-direct security role in terms of what connections it allows, or in conjunction with perimeter firewalls (or other
-external entities). For example, so that perimeter firewalls see the connections coming from well known IP
-addresses (the egress gateways) rather than from dynamic pod IP addresses they don't understand.
-
-Egress gateways are not a native concept in Kubernetes itself, but are implemented by some Kubernetes network
-implementations and some service meshes. For example, Calico Enterprise provides egress gateway functionality, plus the
-ability to map namespaces (or even individual pods) to specific egress gateways. Perimeter firewalls (or other external
-security entities) can then effectively provide per namespace security controls, even though they do not have visibility
-to dynamic pod IP addresses.
-
-As an alternative approach to egress gateways, {{prodname}} allows you to control pod IP address ranges based on
-namespace, or node, or even at the individual pod level. Assuming no outgoing NAT is required, this provides a very
-simple way for perimeter firewalls (or other external security entities) to integrate with Kubernetes for both ingress
-and egress traffic. (Note that this approach relies on having enough address space available to sensibly assign IP
-address ranges, for example to each namespace, so it can lead to IP address range exhaustion challenges for large scale
-deployments. In these scenarios, using egress gateways is likely to be a better option.)
-
-## Additional resources
-
-- [Adopt a zero trust network model for security](../network-policy/adopt-zero-trust.mdx)
-- [Use external IPs or networks rules in policy](../network-policy/policy-rules/external-ips-policy.mdx)
-- [Enforce network policy using Istio](../network-policy/istio/app-layer-policy.mdx)
-- [Use HTTP methods and paths in policy rules](../network-policy/istio/http-methods.mdx)
-- [Restrict a pod to use an IP address in a specific range](../networking/ipam/legacy-firewalls.mdx)
-- [Assign IP addresses based on topology](../networking/ipam/assign-ip-addresses-topology.mdx)
\ No newline at end of file
diff --git a/calico_versioned_docs/version-3.25/about/about-kubernetes-ingress.mdx b/calico_versioned_docs/version-3.25/about/about-kubernetes-ingress.mdx
deleted file mode 100644
index 2ea2ea7e62..0000000000
--- a/calico_versioned_docs/version-3.25/about/about-kubernetes-ingress.mdx
+++ /dev/null
@@ -1,136 +0,0 @@
----
-description: Learn about Kubernetes Ingress!
----
-
-# About Kubernetes Ingress
-
-:::note
-
-This guide provides optional background education, including
-education that is not specific to {{prodname}}.
-
-:::
-
-In this guide you will learn:
-
-- What is Kubernetes Ingress?
-- Why use ingress?
-- What are the differences between different ingress implementations?
-- How does ingress and network policy interact?
-- How does ingress and services fit together under the covers?
-
-## What is Kubernetes Ingress?
-
-Kubernetes Ingress builds on top of Kubernetes [Services](about-kubernetes-services.mdx) to provide
-load balancing at the application layer, mapping HTTP and HTTPS requests with particular domains or URLs to Kubernetes
-services. Ingress can also be used to terminate SSL / TLS before load balancing to the service.
-
-The details of how Ingress is implemented depend on which [Ingress Controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) you are using. The Ingress
-Controller is responsible for monitoring Kubernetes [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) resources and provisioning / configuring one
-or more ingress load balancers to implement the desired load balancing behavior.
-
-Unlike Kubernetes services, which are handled at the network layer (L3-4), ingress load balancers operate at the
-application layer (L5-7). Incoming connections are terminated at the load balancer so it can inspect the individual HTTP /
-HTTPS requests. The requests are then forwarded via separate connections from the load balancer to the chosen service
-backing pods. As a result, network policy applied to the backing pods can restrict access to only allow connections from the load
-balancer, but cannot restrict access to specific original clients.
-
-## Why use Kubernetes Ingress?
-
-Given that Kubernetes [Services](about-kubernetes-services.mdx) already provide a mechanism for load
-balancing access to services from outside of the cluster, why might you want to use Kubernetes Ingress?
-
-The mainline use case is if you have multiple HTTP / HTTPS services that you want to expose through a single external IP
-address, perhaps with each service having a different URL path, or perhaps as multiple different domains. This is lot
-simpler from a client configuration point of view than exposing each service outside of the cluster using Kubernetes
-Services, which would give each service a separate external IP address.
-
-If on the other hand, your application architecture is fronted by a single "front end" microservice then Kubernetes
-Services likely already meet your needs. In this case you might prefer to not add Ingress to the picture, both from a
-simplicity point of view, and potentially also so you can more easily restrict access to specific clients using network
-policy. In effect, your "front end" microservice already plays the role of Kubernetes Ingress, in a way that is not that
-dissimilar to [in-cluster ingress](#in-cluster-ingress-solutions) solutions discussed below.
-
-## Types of Ingress solutions
-
-Broadly speaking there are two types of ingress solutions:
-
-- In-cluster ingress - where ingress load balancing is performed by pods within the cluster itself.
-- External ingress - where ingress load balancing is implemented outside of the cluster by
- appliances or cloud provider capabilities.
-
-### In-cluster ingress solutions
-
-In-cluster ingress solutions use software load balancers running in pods within the cluster itself. There are many
-different ingress controllers to consider that follow this pattern, including for example the NGINX ingress controller.
-
-The advantages of this approach are that you can:
-
-- horizontally scale your ingress solution up to the limits of Kubernetes
-- choose the ingress controller that best suits your specific needs, for example, with particular load balancing
- algorithms, or security options.
-
-To get your ingress traffic to the in-cluster ingress pods, the ingress pods are normally exposed externally as a
-Kubernetes service, so you can use any of the standard ways of accessing the service from outside of the cluster. A
-common approach is use an external network load balancer or service IP advertisement, with `externalTrafficPolicy:local`.
-This minimizes the number of network hops, and retains the client source IP address, which allows network policy to be used
-to restrict access to the ingress pods to particular clients if desired.
-
-![In-cluster ingress](/img/calico/ingress-in-cluster.svg)
-
-### External ingress solutions
-
-External ingress solutions use application load balancers outside of the cluster. The exact details and
-features depend on which ingress controller you are using, but most cloud providers include an ingress controller that
-automates the provisioning and management of the cloud provider's application load balancers to provide ingress.
-
-The advantages of this type of ingress solution is that your cloud provider handles the operational complexity of the
-ingress for you. The downsides are a potentially more limited set of features compared to the rich range of in-cluster
-ingress solutions, and the maximum number of services exposed by ingress being constrained by cloud provider specific
-limits.
-
-![External ingress](/img/calico/ingres-external.svg)
-
-Note that most application load balancers support a basic mode of operation of forwarding traffic to the chosen service
-backing pods via the [node port](about-kubernetes-services.mdx#node-port-services) of the
-corresponding service.
-
-In addition to this basic approach of load balancing to service node ports, some cloud providers support a second mode
-of application layer load balancing, which load balances directly to the pods backing each service, without going via
-node-ports or other kube-proxy service handling. This has the advantage of eliminating the potential second network hop
-associated with node ports load balancing to a pod on a different node. The potential disadvantage is that if you are
-operating at very high scales, for example with hundreds of pods backing a service, you may exceed the application layer
-load balancers maximum limit of IPs it can load balance to in this mode. In this case switching to an in-cluster ingress
-solution is likely the better fit for you.
-
-## Show me everything!
-
-All the above diagrams focus on connection level (L5-7) representation of ingress and services. You can learn more about
-the network level (L3-4) interactions involved in handling the connections, including which scenarios client source IP
-addresses are maintained, in the [About Kubernetes Services](about-kubernetes-services.mdx) guide.
-
-If you are already up to speed on how services work under the covers, here are some more complete diagrams that show details of how services are load balanced at the network layer (L3-4).
-
-:::note
-
-You can successfully use ingress without needing to understand this next level of detail! So feel free to skip
-over these diagrams if you don't want to dig deeper into how services and ingress interact under the covers.
-
-:::
-
-**In-cluster ingress solution exposed as service type `LoadBalancer` with `externalTrafficPolicy:local`**
-
-![In-cluster ingress with NLB local](/img/calico/ingress-in-cluster-nlb-local.svg)
-
-**External ingress solution via node ports**
-
-![External ingress via node port](/img/calico/ingress-external-node-ports.svg)
-
-**External ingress solution direct to pods**
-
-![External ingress direct to pods](/img/calico/ingress-external-direct-to-pods.svg)
-
-## Additional resources
-
-- [Video: Everything you need to know about Kubernetes Ingress networking ](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-ingress-networking/)
-- [Video: Everything you need to know about Kubernetes Services networking ](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-services-networking/)
diff --git a/calico_versioned_docs/version-3.25/about/about-kubernetes-services.mdx b/calico_versioned_docs/version-3.25/about/about-kubernetes-services.mdx
deleted file mode 100644
index 7b54f0fc9e..0000000000
--- a/calico_versioned_docs/version-3.25/about/about-kubernetes-services.mdx
+++ /dev/null
@@ -1,142 +0,0 @@
----
-description: Learn about Kubernetes services!
----
-
-# About Kubernetes Services
-
-:::note
-
-This guide provides optional background education, including
-education that is not specific to {{prodname}}.
-
-:::
-
-In this guide you will learn:
-
-- What are Kubernetes Services?
-- What are the differences between the three main service types and what do you use them for?
-- How do services and network policy interact?
-- Some options for optimizing how services are handled.
-
-## What are Kubernetes Services?
-
-Kubernetes [Services](https://kubernetes.io/docs/concepts/services-networking/service/) provide a way of abstracting access to a group
-of pods as a network service. The group of pods backing each service is usually defined using a [label selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels).
-
-When a client connects to a Kubernetes service, the connection is load balanced to one of the pods backing the service,
-as illustrated in this conceptual diagram:
-
-![Kubernetes Service conceptual diagram](/img/calico/k8s-service-concept.svg)
-
-There are three main types of Kubernetes services:
-
-- Cluster IP - which is the usual way of accessing a service from inside the cluster
-- Node port - which is the most basic way of accessing a service from outside the cluster
-- Load balancer - which uses an external load balancer as a more sophisticated way to access a service from outside the
- cluster.
-
-## Cluster IP services
-
-The default service type is `ClusterIP`. This allows a service to be accessed within the cluster via a virtual IP
-address, known as the service Cluster IP. The Cluster IP for a service is discoverable through Kubernetes DNS. For
-example, `my-svc.my-namespace.svc.cluster-domain.example`. The DNS name and Cluster IP address remain constant for the
-life time of the service, even though the pods backing the service may be created or destroyed, and the number of pods
-backing the service may change over time.
-
-In a typical Kubernetes deployment, kube-proxy runs on every node and is responsible for intercepting connections to
-Cluster IP addresses and load balancing across the group of pods backing each service. As part of this process
-[DNAT](about-networking.mdx#nat) is used to map the destination IP address from the Cluster IP to the
-chosen backing pod. Response packets on the connection then have the NAT reverse on their way back to the pod that
-initiated the connection.
-
-![kube-proxy cluster IP](/img/calico/kube-proxy-cluster-ip.svg)
-
-Importantly, network policy is enforced based on the pods, not the service Cluster IP. (i.e. Egress network policy is
-enforced for the client pod after the DNAT has changed the connection's destination IP to the chosen service backing
-pod. And because only the destination IP for the connection is changed, ingress network policy for the backing pod sees the
-original client pod as the source of the connection.)
-
-## Node port services
-
-The most basic way to access a service from outside the cluster is to use a service of type `NodePort`. A Node Port is a
-port reserved on each node in the cluster through which the service can be accessed. In a typical Kubernetes deployment,
-kube-proxy is responsible for intercepting connections to Node Ports and load balancing them across the pods backing
-each service.
-
-As part of this process [NAT](about-networking.mdx#nat) is used to map the destination IP address and
-port from the node IP and Node Port, to the chosen backing pod and service port. In addition the source IP address is
-mapped from the client IP to the node IP, so that response packets on the connection flow back via the original node,
-where the NAT can be reversed. (It's the node which performed the NAT that has the connection tracking state needed to
-reverse the NAT.)
-
-![kube-proxy node port](/img/calico/kube-proxy-node-port.svg)
-
-Note that because the connection source IP address is SNATed to the node IP address, ingress network policy for the
-service backing pod does not see the original client IP address. Typically this means that any such policy is limited to
-restricting the destination protocol and port, and cannot restrict based on the client / source IP. This limitation can
-be circumvented in some scenarios by using [externalTrafficPolicy](#externaltrafficpolicylocal) or by using
-{{prodname}}'s eBPF dataplane [native service handling](#calico-ebpf-native-service-handling) (rather than kube-proxy) which preserves source IP address.
-
-## Load balancer services
-
-Services of type `LoadBalancer` expose the service via an external network load balancer (NLB). The exact type of
-network load balancer depends on which public cloud provider or, if on-prem, which specific hardware load balancer integration is
-integrated with your cluster.
-
-The service can be accessed from outside of the cluster via a specific IP address on the network load balancer, which by
-default will load balance evenly across the nodes using the service node port.
-
-![kube-proxy load balancer](/img/calico/kube-proxy-load-balancer.svg)
-
-Most network load balancers preserve the client source IP address, but because the service then goes via a node port,
-the backing pods themselves do not see the client IP, with the same implications for network policy. As with node
-ports, this limitation can be circumvented in some scenarios by using [externalTrafficPolicy](#externaltrafficpolicylocal)
-or by using {{prodname}}'s eBPF dataplane [native service handling](#calico-ebpf-native-service-handling) (rather
-than kube-proxy) which preserves source IP address.
-
-## Advertising service IPs
-
-One alternative to using node ports or network load balancers is to advertise service IP addresses over BGP. This
-requires the cluster to be running on an underlying network that supports BGP, which typically means an on-prem
-deployment with standard Top of Rack routers.
-
-{{prodname}} supports advertising service Cluster IPs, or External IPs for services configured with one. If you are
-not using Calico as your network plugin then [MetalLB](https://github.com/metallb/metallb) provides similar capabilities that work with a variety of different network
-plugins.
-
-![kube-proxy service advertisement](/img/calico/kube-proxy-service-advertisement.svg)
-
-## externalTrafficPolicy:local
-
-By default, whether using service type `NodePort` or `LoadBalancer` or advertising service IP addresses over BGP,
-accessing a service from outside the cluster load balances evenly across all the pods backing the service, independent
-of which node the pods are on. This behavior can be changed by configuring the service with
-`externalTrafficPolicy:local` which specifies that connections should only be load balanced to pods backing the service
-on the local node.
-
-When combined with services of type `LoadBalancer` or with {{prodname}} service IP address advertising, traffic is
-only directed to nodes that host at least one pod backing the service. This reduces the potential extra network hop
-between nodes, and perhaps more importantly, to maintain the source IP address all the way to the pod, so network policy
-can restrict specific external clients if desired.
-
-![kube-proxy service advertisement](/img/calico/kube-proxy-service-local.svg)
-
-Note that in the case of services of type `LoadBalancer`, not all Load Balancers support this mode. And in the case of
-service IP advertisement, the evenness of the load balancing becomes topology dependent. In this case, pod anti-affinity
-rules can be used to ensure even distribution of backing pods across your topology, but this does add some complexity to
-deploying the service.
-
-## Calico eBPF native service handling
-
-As an alternative to using Kubernetes standard kube-proxy, {{prodname}}'s [eBPF dataplane](../operations/ebpf/enabling-ebpf.mdx)
- supports native service handling. This preserves source IP to
-simplify network policy, offers DSR (Direct Server Return) to reduce the number of network hops for return traffic, and
-provides even load balancing independent of topology, with reduced CPU and latency compared to kube-proxy.
-
-![kube-proxy service advertisement](/img/calico/calico-native-service-handling.svg)
-
-# Additional resources
-
-- [Video: Everything you need to know about Kubernetes Services networking ](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-services-networking/)
-- [Blog: Introducing the Calico eBPF dataplane](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/)
-- [Blog: Hands on with Calico eBPF native service handling](https://www.projectcalico.org/hands-on-with-calicos-ebpf-service-handling/)
diff --git a/calico_versioned_docs/version-3.25/about/about-network-policy.mdx b/calico_versioned_docs/version-3.25/about/about-network-policy.mdx
deleted file mode 100644
index 78696632db..0000000000
--- a/calico_versioned_docs/version-3.25/about/about-network-policy.mdx
+++ /dev/null
@@ -1,244 +0,0 @@
----
-description: Learn about network policy!
----
-
-# About Network Policy
-
-:::note
-
-This guide provides optional background education, including
-education that is not specific to {{prodname}}.
-
-:::
-
-Kubernetes and {{prodname}} provide network policy APIs to help you secure your workloads.
-
-In this guide you will learn:
-
-- What network policy is and why it is important.
-- The differences between Kubernetes and Calico network policies and when you might want to use each.
-- Some best practices for using network policy.
-
-## What is network policy?
-
-Network policy is the primary tool for securing a Kubernetes network. It allows you to easily restrict the network
-traffic in your cluster so only the traffic that you want to flow is allowed.
-
-To understand the significance of network policy, let's briefly explore how network security was typically achieved
-prior to network policy. Historically in enterprise networks, network security was provided by designing a physical
-topology of network devices (switches, routers, firewalls) and their associated configuration. The physical topology
-defined the security boundaries of the network. In the first phase of virtualization, the same network and network
-device constructs were virtualized in the cloud, and the same techniques for creating specific network topologies of
-(virtual) network devices were used to provide network security. Adding new applications or services often required
-additional network design to update the network topology and network device configuration to provide the desired
-security.
-
-In contrast, the [Kubernetes network model](about-k8s-networking.mdx) defines a "flat"
-network in which every pod can communicate with all other pods in the cluster using pod IP addresses. This approach
-massively simplifies network design and allows new workloads to be scheduled dynamically anywhere in the cluster with no
-dependencies on the network design.
-
-In this model, rather than network security being defined by network topology boundaries, it is defined using network
-policies that are independent of the network topology. Network policies are further abstracted from the network by using
-label selectors as their primary mechanism for defining which workloads can talk to which workloads, rather than IP
-addresses or IP address ranges.
-
-## Why is network policy important?
-
-In an age where attackers are becoming more and more sophisticated, network security as a line of defense is more important
-than ever.
-
-While you can (and should) use firewalls to restrict traffic at the perimeters of your network (commonly referred to as
-north-south traffic), their ability to police Kubernetes traffic is often limited to a granularity of the cluster as a
-whole, rather than to specific groups of pods, due to the dynamic nature of pod scheduling and pod IP addresses. In
-addition, the goal of most attackers once they gain a small foothold inside the perimeter is to move laterally (commonly
-referred to as east-west) to gain access to higher value targets, which perimeter based firewalls can't police against.
-
-Network policy on the other hand is designed for the dynamic nature of Kubernetes by following the standard Kubernetes
-paradigm of using label selectors to define groups of pods, rather than IP addresses. And because network policy is
-enforced within the cluster itself it can police both north-south and east-west traffic.
-
-Network policy represents an important evolution of network security, not just because it handles the dynamic nature of
-modern microservices, but because it empowers dev and devops engineers to easily define network security themselves,
-rather than needing to learn low-level networking details or raise tickets with a separate team responsible for managing
-firewalls. Network policy makes it easy to define intent, such as "only this microservice gets to connect to the
-database", write that intent as code (typically in YAML files), and integrate authoring of network policies into git
-workflows and CI/CD processes.
-
-:::note
-
-Note: Calico and Calico Enterprise offer capabilities that can help perimeter firewalls integrate
-more tightly with Kubernetes. However, this does not remove the need or value of network policies within the cluster itself.)
-
-:::
-
-## Kubernetes network policy
-
-Kubernetes network policies are defined using the Kubernetes [NetworkPolicy](https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/) resource.
-
-The main features of Kubernetes network policies are:
-
-- Policies are namespace scoped (i.e. you create them within the context of a specific namespace just like, for example, pods)
-- Policies are applied to pods using label selectors
-- Policy rules can specify the traffic that is allowed to/from other pods, namespaces, or CIDRs
-- Policy rules can specify protocols (TCP, UDP, SCTP), named ports or port numbers
-
-Kubernetes itself does not enforce network policies, and instead delegates their enforcement to network plugins. Most
-network plugins implement the mainline elements of Kubernetes network policies, though not all implement every feature
-of the specification. (Calico does implement every feature, and was the original reference implementation of Kubernetes
-network policies.)
-
-To learn more about Kubernetes network policies, read the [Get started with Kubernetes network policy](../network-policy/get-started/kubernetes-policy/kubernetes-network-policy.mdx)
- guide.
-
-## Calico network policy
-
-In addition to enforcing Kubernetes network policy, {{prodname}} supports its own
-namespaced [NetworkPolicy](../reference/resources/networkpolicy.mdx) and non-namespaced
-[GlobalNetworkPolicy](../reference/resources/globalnetworkpolicy.mdx) resources, which provide additional
-features beyond those supported by Kubernetes network policy. This includes support for:
-
-- policy ordering/priority
-- deny and log actions in rules
-- more flexible match criteria for applying policies and in policy rules, including matching on Kubernetes
- ServiceAccounts, and (if using Istio & Envoy) cryptographic identity and layer 5-7 match criteria such as HTTP & gRPC URLs.
-- ability to reference non-Kubernetes workloads in polices, including matching on
- [NetworkSets](../reference/resources/networkset.mdx) in policy rules
-
-While Kubernetes network policy applies only to pods, Calico network policy can be applied to multiple types of
-endpoints including pods, VMs, and host interfaces.
-
-To learn more about Calico network policies, read the [Get started with Calico network policy](../network-policy/get-started/calico-policy/calico-network-policy.mdx)
- guide.
-
-## Benefits of using {{prodname}} for network policy
-
-### Full Kubernetes network policy support
-
-Unlike some other network policy implementations, Calico implements the full set of Kubernetes network policy features.
-
-### Richer network policy
-
-Calico network policies allow even richer traffic control than Kubernetes network policies if you need it. In addition,
-Calico network policies allow you to create policy that applies across multiple namespaces using GlobalNetworkPolicy
-resources.
-
-### Mix Kubernetes and Calico network policy
-
-Kubernetes and Calico network policies can be mixed together seamlessly. One common use case for this is to split
-responsibilities between security / cluster ops teams and developer / service teams. For example, giving the security /
-cluster ops team RBAC permissions to define Calico policies, and giving developer / service teams RBAC permissions to
-define Kubernetes network policies in their specific namespaces. As Calico policy rules can be ordered to be enforced
-either before or after Kubernetes network policies, and can include actions such as deny and log, this allows the
-security / cluster ops team to define basic higher-level more-general purpose rules, while empowering the developer /
-service teams to define their own fine-grained constraints on the apps and services they are responsible for.
-
-For more flexible control and delegation of responsibilities between two or more teams, Calico Enterprise extends this
-model to provide [hierarchical policy](#hierarchical-policy).
-
-![Example mix of network policy types](/img/calico/example-k8s-calico-policy-mix.svg)
-
-### Ability to protect hosts and VMs
-
-As {{prodname}} policies can be enforce on host interfaces, you can use them to protect your Kubernetes nodes (not
-just your pods), including for example, limiting access to node ports from outside of the cluster. To learn more, check
-out the {{prodname}} [policy for hosts](../network-policy/hosts/index.mdx) guides.
-
-### Integrates with Istio
-
-When used with Istio service mesh, {{prodname}} policy engine enforces the same policy model at the host networking
-layer and at the service mesh layer, protecting your infrastructure from compromised workloads and protecting your
-workloads from compromised infrastructure. This also avoids the need for dual provisioning of security at the service
-mesh and infrastructure layers, or having to learn different policy models for each layer.
-
-### Extendable with Calico Enterprise
-
-Calico Enterprise adds even richer network policy capabilities, with the ability
-to specify hierarchical policies, with each team have particular boundaries of trust, and FQDN / domain names in policy
-rules for restricting access to specific external services.
-
-## Best practices for network policies
-
-### Ingress and egress
-
-At a minimum we recommend that every pod is protected by network policy ingress rules that restrict what is allowed
-to connect to the pod and on which ports. The best practice is also to define network policy egress rules that restrict
-the outgoing connections that are allowed by pods themselves. Ingress rules protect your pod from attacks outside of the
-pod. Egress rules help protect everything outside of the pod if the pod gets compromised, reducing the attack surface to
-make moving laterally (east-west) or to prevent an attacker from exfiltrating compromised data from your cluster (north-south).
-
-### Policy schemas
-
-Due to the flexibility of network policy and labelling, there are often multiple different ways of labelling and writing
-policies that can achieve the same particular goal. One of the most common approaches is to have a small number of
-global policies that apply to all pods, and then a single pod specific policy that defines all the ingress and egress
-rules that are particular to that pod.
-
-For example:
-
-```yaml
-kind: NetworkPolicy
-apiVersion: networking.k8s.io/v1
-metadata:
- name: front-end
- namespace: staging
-spec:
- podSelector:
- matchLabels:
- app: back-end
- ingress:
- - from:
- - podSelector:
- matchLabels:
- app: front-end
- ports:
- - protocol: TCP
- port: 443
- egress:
- - to:
- - podSelector:
- matchLabels:
- app: database
- ports:
- - protocol: TCP
- port: 27017
-
-```
-
-### Default deny
-
-One approach to ensuring these best practices are being followed is to define [default deny](../network-policy/get-started/kubernetes-default-deny.mdx)
- network policies. These ensure that if no other policy is
-defined that explicitly allows traffic to/from a pod, then the traffic will be denied. As a result, anytime a team
-deploys a new pod, they are forced to also define network policy for the pod. It can be useful to use a {{prodname}}
-GlobalNetworkPolicy for this (rather than needing to define a policy every time a new namespace is created) and to
-include some exceptions to the default deny (for example to allow pods to access DNS).
-
-For example, you might use the following policy to default-deny all (non-system) pod traffic except for DNS queries to kube-dns/core-dns.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: default-app-policy
-spec:
- namespaceSelector: has(projectcalico.org/name) && projectcalico.org/name not in {"kube-system", "calico-system", "calico-apiserver"}
- types:
- - Ingress
- - Egress
- egress:
- - action: Allow
- protocol: UDP
- destination:
- selector: k8s-app == "kube-dns"
- ports:
- - 53
-```
-
-### Hierarchical policy
-
-[Calico Enterprise](/calico-enterprise/latest/network-policy/policy-tiers/tiered-policy) supports hierarchical network policy using policy tiers. RBAC
-for each tier can be defined to restrict who can interact with each tier. This can be used to delegate trust across
-multiple teams.
-
-![Example tiers](/img/calico/example-tiers.svg)
diff --git a/calico_versioned_docs/version-3.25/about/about-networking.mdx b/calico_versioned_docs/version-3.25/about/about-networking.mdx
deleted file mode 100644
index 0508e962f3..0000000000
--- a/calico_versioned_docs/version-3.25/about/about-networking.mdx
+++ /dev/null
@@ -1,166 +0,0 @@
----
-description: Learn about networking!
----
-
-# About Networking
-
-:::note
-
-This guide provides optional background education, not specific to {{prodname}}.
-
-:::
-
-You can get up and running with Calico by following any of the {{prodname}} [install guides](../getting-started/index.mdx)
- without needing to be a networking expert. Calico hides the complexities for
-you. However, if you would like to learn more about networking so you can better understand what is happening under the
-covers, this guide provides a short introduction to some of the key fundamental networking concepts for anyone who is
-not already familiar with them.
-
-In this guide you will learn:
-
-- The terms used to described different layers of the network.
-- The anatomy of a network packet.
-- What MTU is and why it makes a difference.
-- How IP addressing, subnets, and IP routing works.
-- What an overlay network is.
-- What DNS and NAT are.
-
-## Network layers
-
-The process of sending and receiving data over a network is commonly categorized into 7 layers (referred to as the [OSI model](https://en.wikipedia.org/wiki/OSI_model)). The layers are
-typically abbreviated as L1 - L7. You can think of data as passing through each of these layers in turn as it is sent or
-received from an application, with each layer being responsible for a particular part of the processing required to
-send or receive the data over the network.
-
-![OSI network layers diagram](/img/calico/osi-network-layers.svg)
-
-In a modern enterprise or public cloud network, the layers commonly map as follows:
-
-- L5-7: all the protocols most application developers are familiar with. e.g. HTTP, FTP, SSH, SSL, DNS.
-- L4: TCP or UDP, including source and destination ports.
-- L3: IP packets and IP routing.
-- L2: Ethernet packets and Ethernet switching.
-
-## Anatomy of a network packet
-
-When sending data over the network, each layer in the network stack adds its own header containing the control/metadata
-the layer needs to process the packet as it traverses the network, passing the resulting packet on to the next
-layer of the stack. In this way the complete packet is produced, which includes all the control/metadata required by
-every layer of the stack, without any layer understanding the data or needing to process the control/metadata of
-adjacent network layers.
-
-![Anatomy of a network packet](/img/calico/anatomy-of-a-packet.svg)
-
-## IP addressing, subnets and IP routing
-
-The L3 network layer introduces IP addresses and typically marks the boundary between the part of networking that
-application developers care about, and the part of networking that network engineers care about. In particular
-application developers typically regard IP addresses as the source and destination of the network traffic, but have much
-less of a need to understand L3 routing or anything lower in the network stack, which is more the domain of network
-engineers.
-
-There are two variants of IP addresses: IPv4 and IPv6.
-
-- IPv4 addresses are 32 bits long and the most commonly used. They are typically represented as 4 bytes in decimal (each
- 0-255) separated by dots. e.g. `192.168.27.64`. There are several ranges of IP addresses that are reserved as
- "private", that can only be used within local private networks, are not routable across the internet. These can be
- reused by enterprises as often as they want to. In contrast "public" IP addresses are globally unique across the whole
- of the internet. As the number of network devices and networks connected to the internet has grown, public IPv4
- addresses are now in short supply.
-- IPv6 addresses are 128 bits long and designed to overcome the shortage of IPv4 address space. They are typically
- represented by 8 groups of 4 digit hexadecimal numbers. e.g. `1203:8fe0:fe80:b897:8990:8a7c:99bf:323d`. Due to the 128
- bit length, there's no shortage of IPv6 addresses. However, many enterprises have been slow to adopt IPv6, so for now
- at least, IPv4 remains the default for many enterprise and data center networks.
-
-Groups of IP addresses are typically represented using [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) that consists of an IP address and number of
-significant bits on the IP address separated by a `/`. For example, `192.168.27.0/24` represents the group of 256 IP
-addresses from `192.168.27.0` to `192.168.27.255`.
-
-A group of IP addresses within a single L2 network is referred to as a subnet. Within a subnet, packets can be sent
-between any pair of devices as a single network hop, based solely on the L2 header (and footer).
-
-To send packets beyond a single subnet requires L3 routing, with each L3 network device (router) being responsible for
-making decisions on the path to send the packet based on L3 routing rules. Each network device acting as a router has
-routes that determine where a packet for a particular CIDR should be sent next. So for example, in a Linux system, a
-route of `10.48.0.128/26 via 10.0.0.12 dev eth0` indicates that packets with destination IP address in `10.48.0.128/26`
-should be routed to a next network hop of `10.0.0.12` over the `eth0` interface.
-
-Routes can be configured statically by an administrator, or programmed dynamically using routing protocols. When using
-routing protocols each network device typically needs to be configured to tell it which other network devices it should
-be exchanging routes with. The routing protocol then handles programming the right routes across the whole of the
-network as devices are added or removed, or network links come in or out of service.
-
-One common routing protocol used in large enterprise and data center networks is [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol). BGP is one of the main protocols that powers
-the internet, so scales incredibly well, and is very widely supported by modern routers.
-
-## Overlay networks
-
-An overlay network allows network devices to communicate across an underlying network (referred to as the underlay)
-without the underlay network having any knowledge of the devices connected to the overlay network. From the point of
-view of the devices connected to the overlay network, it looks just like a normal network. There are many different
-kinds of overlay networks that use different protocols to make this happen, but in general they share the same common
-characteristic of taking a network packet, referred to as the inner packet, and encapsulating it inside an outer network
-packet. In this way the underlay sees the outer packets without needing to understand how to handle the inner packets.
-
-How the overlay knows where to send packets varies by overlay type and the protocols they use. Similarly exactly how the
-packet is wrapped varies between different overlay types. In the case of VXLAN for example, the inner packet is wrapped
-and sent as UDP in the outer packet.
-
-![Anatomy of an overlay network packet](/img/calico/anatomy-of-an-overlay-packet.svg)
-
-Overlay networks have the advantage of having minimal dependencies on the underlying network infrastructure, but have
-the downsides of:
-
-- having a small performance impact compared to non-overlay networking, which you might want to avoid if running
- network intensive workloads
-- workloads on the overlay are not easily addressable from the rest of the network. so NAT gateways or load balancers
- are required to bridge between the overlay and the underlay network for any ingress to, or egress from, the overlay.
-
-{{prodname}} networking options are exceptionally flexible, so in general you can choose whether you prefer
-{{prodname}} to provide an overlay network, or non-overlay network. You can read more about this in the {{prodname}}
-[determine best networking option](../networking/determine-best-networking.mdx) guide.
-
-## DNS
-
-While the underlying network packet flow across the network is determined using IP addresses, users and applications
-typically want to use well known names to identify network destinations that remain consistent over time, even if the
-underlying IP addresses change. For example, to map `google.com` to `216.58.210.46`. This translation from name to IP
-address is handled by [DNS](https://en.wikipedia.org/wiki/Domain_Name_System). DNS runs on top of the base networking described so far. Each device connected to a network is typically configured
-with the IP addresses of one or more DNS servers. When an application wants to connect to a domain name, a DNS message is
-sent to the DNS server, which then responds with information about which IP address(es) the domain name maps to. The
-application can then initiate its connection to the chosen IP address.
-
-## NAT
-
-Network Address Translation ([NAT](https://en.wikipedia.org/wiki/Network_address_translation)) is the process of mapping an IP address in a packet
-to a different IP address as the packet passes through the device performing the NAT. Depending on the use case, NAT can
-apply to the source or destination IP address, or to both addresses.
-
-One common use case for NAT is to allow devices with private IP address to talk to devices with public IP address across
-the internet. For example, if a device with a private IP address attempts to connect to a public IP address, then the
-router at the border of the private network will typically use SNAT (Source Network Address Translation) to map the
-private source IP address of the packet to the router's own public IP address before forwarding it on to the internet.
-The router then maps response packets coming in the opposite direction back to the original private IP address, so
-packets flow end-to-end in both directions, with neither source or destination being aware the mapping is happening. The
-same technique is commonly used to allow devices connected to an overlay network to connect with devices outside of the
-overlay network.
-
-Another common use case for NAT is load balancing. In this case the load balancer performs DNAT (Destination Network
-Address Translation) to change the destination IP address of the incoming connection to the IP address of the chosen
-device it is load balancing to. The load balancer then reverses this NAT on response packets so neither source or
-destination device is aware the mapping is happening.
-
-## MTU
-
-The Maximum Transmission Unit ([MTU](https://en.wikipedia.org/wiki/Maximum_transmission_unit)) of a network link is the maximum size of packet that
-can be sent across that network link. It is common for all links in a network to be configured with the same MTU to
-reduce the need to fragment packets as they traverse the network, which can significantly lower the performance of the
-network. In addition, TCP tries to learn path MTUs, and adjust packet sizes for each network path based on the smallest
-MTU of any of the links in the network path. When an application tries to send more data than can fit in a single
-packet, TCP will fragment the data into multiple TCP segments, so the MTU is not exceeded.
-
-Most networks have links with an MTU of 1,500 bytes, but some networks support MTUs of 9,000 bytes. In a Linux system,
-larger MTU sizes can result in lower CPU being used by the Linux networking stack when sending large amounts of data,
-because it has to process fewer packets for the same amount of data. Depending on the network interface hardware being
-used, some of this overhead may be offloaded to the network interface hardware, so the impact of small vs large MTU
-sizes varies from device to device.
diff --git a/calico_versioned_docs/version-3.25/about/index.mdx b/calico_versioned_docs/version-3.25/about/index.mdx
deleted file mode 100644
index fe62185374..0000000000
--- a/calico_versioned_docs/version-3.25/about/index.mdx
+++ /dev/null
@@ -1,331 +0,0 @@
----
-description: The value of using Calico for networking and network security for workloads and hosts.
----
-
-# About Calico
-
-
-
-
-
-
-
- What is {{ prodname }}?
-
-
- {{ prodname }} is an open source networking and network security solution for containers, virtual machines, and
- native host-based workloads. {{ prodname }} supports a broad range of platforms including Kubernetes, OpenShift,
- Mirantis Kubernetes Engine (MKE), OpenStack, and bare metal services.
-
-
- Whether you opt to use {{ prodname }}'s eBPF data plane or Linux’s standard networking pipeline, {{ prodname }}{' '}
- delivers blazing fast performance with true cloud-native scalability. {{ prodname }} provides developers and
- cluster operators with a consistent experience and set of capabilities whether running in public cloud or on-prem,
- on a single node, or across a multi-thousand node cluster.
-
-
-
-
-
-
-
-
Why use {{ prodname }}?
-
-
-
-
-
-
-
- Choice of dataplanes
-
-
- {{ prodname }} gives you a choice of dataplanes, including a pure Linux eBPF dataplane, a standard Linux
- networking dataplane, and a Windows HNS dataplane. Whether you prefer cutting edge features of eBPF, or the
- familiarity of the standard primitives that existing system administrators already know, Calico has you covered.
-
-
- Whichever choice is right for you, you’ll get the same, easy to use, base networking, network policy and IP
- address management capabilities, that have made Calico the most trusted networking and network policy solution for
- mission-critical cloud-native applications.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Best practices for network security
-
-
- {{ prodname }}’s rich network policy model makes it easy to lock down communication so the only traffic that flows
- is the traffic you want to flow. Plus with built in support for Wireguard encryption, securing your pod-to-pod
- traffic across the network has never been easier.
-
-
- {{ prodname }}’s policy engine can enforce the same policy model at the host networking layer and (if using Istio
- & Envoy) at the service mesh layer, protecting your infrastructure from compromised workloads and protecting your
- workloads from compromised infrastructure.
-
-
-
-
-
-
-
-
-
- Performance
-
-
- Depending on your preference, {{ prodname }} uses either Linux eBPF or the Linux kernel's highly optimized
- standard networking pipeline to deliver high performance networking. {{ prodname }}'s networking options are
- flexible enough to run without using overlays in most environments, avoiding the overheads of packet encap/decap.{' '}
- {{ prodname }}’s control plane and policy engine has been fine tuned over many years of production use to minimize
- overall CPU usage and occupancy.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Scalability
-
-
- {{ prodname }}’s core design principles leverage best practice cloud-native design patterns combined with proven
- standards based network protocols trusted worldwide by the largest internet carriers. The result is a solution
- with exceptional scalability that has been running at scale in production for years. {{ prodname }}’s development
- test cycle includes regularly testing multi-thousand node clusters. Whether you are running a 10 node cluster, 100
- node cluster, or more, you reap the benefits of the improved performance and scalability characteristics demanded
- by the largest Kubernetes clusters.
-
-
-
-
-
-
-
-
-
- Interoperability
-
-
- {{ prodname }} enables Kubernetes workloads and non-Kubernetes or legacy workloads to communicate seamlessly and
- securely. Kubernetes pods are first class citizens on your network and able to communicate with any other workload
- on your network. In addition {{ prodname }} can seamlessly extend to secure your existing host based workloads
- (whether in public cloud or on-prem on VMs or bare metal servers) alongside Kubernetes. All workloads are subject
- to the same network policy model so the only traffic that is allowed to flow is the traffic you expect to flow.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Real world production hardened
-
-
- {{ prodname }} is trusted and running in production at large enterprises including SaaS providers, financial
- services companies, and manufacturers. The largest public cloud providers have selected {{ prodname }} to provide
- network security for their hosted Kubernetes services (Amazon EKS, Azure AKS, Google GKE, and IBM IKS) running
- across tens of thousands of clusters.
-
-
-
-
-
-
-
-
-
- Full Kubernetes network policy support
-
-
- {{ prodname }}’s network policy engine formed the original reference implementation of Kubernetes network policy
- during the development of the API. {{ prodname }} is distinguished in that it implements the full set of features
- defined by the API giving users all the capabilities and flexibility envisaged when the API was defined. And for
- users that require even more power, {{ prodname }} supports an extended set of network policy capabilities that
- work seamlessly alongside the Kubernetes API giving users even more flexibility in how they define their network
- policies.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Contributor community
-
-
- The Calico open source project is what it is today thanks to 200+ contributors across a broad range of companies.
- In addition {{ prodname }} is backed by Tigera, founded by the original Calico engineering team, and committed to
- maintaining {{ prodname }} as the leading standard for Kubernetes network security.
-
-
-
-
-
-
-
-
-
- Calico Cloud compatible
-
-
- Calico Cloud builds on top of open source Calico to provide Kubernetes security and observability features and
- capabilities:
-
Security policy preview, staging, and recommendation
-
Compliance reporting and alerts
-
Intrusion detection & prevention (IDS / IPS) for Kubernetes
-
SIEM Integrations
-
Application Layer (L7) observability
-
Dynamic packet capture
-
DNS dashboards
-
-
-
-
-
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/bare-metal/about.mdx b/calico_versioned_docs/version-3.25/getting-started/bare-metal/about.mdx
deleted file mode 100644
index 08b3c38ce7..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/bare-metal/about.mdx
+++ /dev/null
@@ -1,58 +0,0 @@
----
-description: Install Calico on hosts not in a cluster with network policy, or networking and network policy.
----
-
-# About non-cluster hosts
-
-## Big picture
-
-Secure non-cluster hosts by installing {{prodname}} for networking and/or networking policy.
-
-## Value
-
-Not all hosts in your environment run pods/workloads. You may have physical machines or legacy applications that you cannot move into a Kubernetes cluster, but still need to securely communicate with pods in your cluster. {{prodname}} lets you enforce policy on these **non-cluster hosts** using the same robust {{prodname}} network policy that you use for pods.
-
-## Concepts
-
-### Non-cluster hosts and host endpoints
-
-A **non-cluster host** is a computer that is running an application that is _not part of a Kubernetes cluster_. Using {{prodname}} network policy, you can secure these host interfaces using **host endpoints**. Host endpoints can have labels, and work the same as labels on pods/workload endpoints.
-
-The advantage is, you can write network policy rules to apply to both workload endpoints and host endpoints using label selectors; where each selector can refer to the either type (or be a mix of the two). For example, you can write a cluster-wide policy for non-cluster hosts that is immediately applied to every host. To learn how to restrict traffic to/from hosts using {{prodname}} network policy see, [Protect hosts](../../network-policy/hosts/protect-hosts.mdx).
-
-If you are using the etcd3 database, you can also install {{prodname}} with networking as described below.
-
-### Install options for non-cluster hosts
-
-| Install {{prodname}} with... | Requires | Use case | Supported install methods |
-| ----------------------------- | -------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------- |
-| Policy only | An etcd3 or Kubernetes datastore | Use {{prodname}} network policy to control firewalls on non-cluster hosts. | Binary install with/ without a package manager |
-| Networking and network policy | An etcd3 datastore | **Networking** Use {{prodname}} networking (BGP, or overlay with VXLAN or IP-in-IP) to handle these communications: - pod ↔ pod - pod ↔ host
**Note**: {{prodname}} does not handle host ↔ host networking; your underlying network must already be set up to handle this.
**Policy** Use {{prodname}} network policy to control firewalls on your non-cluster hosts. | Docker container |
-
-## Before you begin
-
-**Supported**
-
-- All platforms in this release, except Windows
-
-**Required**
-
-- Non-cluster host meets [system requirements](requirements.mdx) for {{prodname}}. If you want to use a package manager for installation, the non-cluster host must be a system derived from Ubuntu or RedHat.
-- Set up a datastore; if {{prodname}} is installed on a cluster, you already have a datastore
-- Install `kubectl` or [`calicoctl`](../../operations/calicoctl/index.mdx). (`kubectl` works only with the Kubernetes datastore.)
-
-## Next steps
-
-Select an install method.
-
-:::note
-
-{{prodname}} must be installed on each non-cluster host that you want to control with networking and/or policy.
-
-:::
-
-| Install method | Networking | Policy |
-| ------------------------------------------------------------------ | ---------- | ------ |
-| [Docker container](installation/container.mdx) | ✓ | ✓ |
-| [Binary install with package manager](installation/binary-mgr.mdx) | | ✓ |
-| [Binary install without package manager](installation/binary.mdx) | | ✓ |
diff --git a/calico_versioned_docs/version-3.25/getting-started/bare-metal/index.mdx b/calico_versioned_docs/version-3.25/getting-started/bare-metal/index.mdx
deleted file mode 100644
index c854afe88d..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/bare-metal/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Install Calico on hosts to secure host communications.
-hide_table_of_contents: true
----
-
-# Non-cluster hosts
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/binary-mgr.mdx b/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/binary-mgr.mdx
deleted file mode 100644
index 587b38de48..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/binary-mgr.mdx
+++ /dev/null
@@ -1,74 +0,0 @@
----
-description: Install Calico on non-cluster host using a package manager.
----
-
-# Binary install with package manager
-
-import FelixInitDatastore from '@site/calico_versioned_docs/version-3.25/_includes/content/_felix-init-datastore.mdx';
-
-## Big picture
-
-Install {{prodname}} on non-cluster hosts using a package manager.
-
-## Value
-
-Packaged binaries of {{prodname}} are easy to consume and upgrade. This method automatically configures the init system to keep Felix running.
-
-## Before you begin...
-
-1. Ensure the {{prodname}} datastore is up and accessible from the host
-1. Ensure the host meets the minimum [system requirements](../requirements.mdx)
-1. If your system is not an Ubuntu- or RedHat-derived system, you will need to choose a different install method.
-1. If you want to install {{prodname}} with networking (so that you can communicate with cluster workloads), you should choose the [container install method](container.mdx)
-1. Install `kubectl` (for Kubernetes datastore) or [Install and configure `calicoctl`](../../../operations/calicoctl/index.mdx) for etcd3 datastore.
-
-## How to
-
-This guide covers installing Felix, the {{prodname}} daemon that handles network policy.
-
-### Step 1: Install binaries
-
-```bash
-sudo add-apt-repository ppa:project-calico/{{ ppa_repo_name }}
-sudo apt-get update
-sudo apt-get upgrade
-sudo apt-get install calico-felix
-```
-
-_RPM requires_: RedHat 7-derived distribution
-
-```bash
-cat > /etc/yum.repos.d/calico.repo <
-
-Modify the included init system unit to include the `EnvironmentFile`. For example, on systemd, add the following line to the `[Service]` section of the `calico-felix` unit.
-
-```bash
-EnvironmentFile=/etc/calico/calico.env
-```
-
-### Step 3: Initialize the datastore
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/binary.mdx b/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/binary.mdx
deleted file mode 100644
index 4f962884b5..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/binary.mdx
+++ /dev/null
@@ -1,117 +0,0 @@
----
-description: Install Calico binary on non-cluster hosts without a package manager.
----
-
-# Binary install without package manager
-
-import FelixInitDatastore from '@site/calico_versioned_docs/version-3.25/_includes/content/_felix-init-datastore.mdx';
-
-## Big picture
-
-Install {{prodname}} binary on non-cluster hosts without a package manager.
-
-## Value
-
-Install {{prodname}} directly when a package manager isn't available, or your provisioning system can easily handle copying binaries to hosts.
-
-## Before you begin...
-
-1. Ensure the {{prodname}} datastore is up and accessible from the host
-1. Ensure the host meets the minimum [system requirements](../requirements.mdx)
-1. If you want to install {{prodname}} with networking (so that you can communicate with cluster workloads), you should choose the [container install method](container.mdx)
-1. Install `kubectl` (for Kubernetes datastore) or [Install and configure `calicoctl`](../../../operations/calicoctl/index.mdx) for etcd3 datastore.
-
-## How to
-
-This guide covers installing Felix, the {{prodname}} daemon that handles network policy.
-
-### Step 1: Download and extract the binary
-
-This step requires Docker, but it can be run from any machine with Docker installed. It doesn't have to be the host you will run it on (i.e your laptop is fine).
-
-1. Use the following command to download the {{nodecontainer}} image.
-
- ```bash
- docker pull {{nodecontainer}}:{{releases.0.components.calico/node.version}}
- ```
-
-1. Confirm that the image has loaded by typing `docker images`.
-
- ```bash
- REPOSITORY TAG IMAGE ID CREATED SIZE
- {{nodecontainer}} {{releases.0.components.calico/node.version}} e07d59b0eb8a 2 minutes ago 42MB
- ```
-
-1. Create a temporary {{nodecontainer}} container.
-
- ```bash
- docker create --name container {{nodecontainer}}:{{releases.0.components.calico/node.version}}
- ```
-
-1. Copy the calico-node binary from the container to the local file system.
-
- ```bash
- docker cp container:/bin/calico-node calico-node
- ```
-
-1. Delete the temporary container.
-
- ```bash
- docker rm container
- ```
-
-1. Set the extracted binary file to be executable.
-
- ```
- chmod +x calico-node
- chown root:root calico-node
- ```
-
-### Step 2: Copy the `calico-node` binary
-
-Copy the binary from Step 1 to the target machine, using any means (`scp`, `ftp`, USB stick, etc.).
-
-### Step 3: Create environment file
-
-
-
-### Step 4: Create a start-up script
-
-Felix should be started at boot by your init system and the init system
-**must** be configured to restart Felix if it stops. Felix relies on
-that behavior for certain configuration changes.
-
-If your distribution uses systemd, then you could use the following unit
-file:
-
-```bash
-[Unit]
-Description=Calico Felix agent
-After=syslog.target network.target
-
-[Service]
-User=root
-EnvironmentFile=/etc/calico/calico.env
-ExecStartPre=/usr/bin/mkdir -p /var/run/calico
-ExecStart=/usr/local/bin/calico-node -felix
-KillMode=process
-Restart=on-failure
-LimitNOFILE=32000
-
-[Install]
-WantedBy=multi-user.target
-```
-
-Once you've configured Felix, start it up via your init system.
-
-```bash
-service calico-felix start
-```
-
-### Step 5: Initialize the datastore
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/container.mdx b/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/container.mdx
deleted file mode 100644
index 8c29b1c3d5..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/container.mdx
+++ /dev/null
@@ -1,27 +0,0 @@
----
-description: Install Calico on non-cluster hosts using a Docker container.
----
-
-# Docker container install
-
-import DockerContainerService from '@site/calico_versioned_docs/version-3.25/_includes/content/_docker-container-service.mdx';
-
-## Big picture
-
-Install {{prodname}} on non-cluster hosts using a Docker container for both networking and policy.
-
-## Value
-
-Installing {{prodname}} with a Docker container includes everything you need for both networking and policy. It also automatically adds the appropriate per-node configuration to the datastore.
-
-## Before you begin...
-
-1. Ensure Docker is installed
-1. Ensure the {{prodname}} datastore is up and accessible from the host
-1. Ensure the host meets the minimum [system requirements](../requirements.mdx)
-
-## How to
-
-The `{{nodecontainer}}` container should be started at boot time by your init system and the init system must be configured to restart it if stopped. {{prodname}} relies on that behavior for certain configuration changes.
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/index.mdx b/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/index.mdx
deleted file mode 100644
index ae7bd06532..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Install Calico on hosts to secure host communications.
-hide_table_of_contents: true
----
-
-# Install on non-cluster hosts
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/bare-metal/requirements.mdx b/calico_versioned_docs/version-3.25/getting-started/bare-metal/requirements.mdx
deleted file mode 100644
index 965123df80..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/bare-metal/requirements.mdx
+++ /dev/null
@@ -1,9 +0,0 @@
----
-description: Review node requirements for installing Calico.
----
-
-# System requirements
-
-
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/index.mdx b/calico_versioned_docs/version-3.25/getting-started/index.mdx
deleted file mode 100644
index 1100c10605..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Install Calico on nodes and hosts for popular orchestrators, and install the calicoctl command line interface (CLI) tool.
-hide_table_of_contents: true
----
-
-# Install Calico
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/index.mdx
deleted file mode 100644
index 16fa32322f..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Use Calico network policy on top of flannel networking.
-hide_table_of_contents: true
----
-
-# Flannel
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/install-for-flannel.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/install-for-flannel.mdx
deleted file mode 100644
index 25911d385f..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/install-for-flannel.mdx
+++ /dev/null
@@ -1,105 +0,0 @@
----
-description: If you use flannel for networking, you can install Calico network policy to secure cluster communications.
----
-
-# Install Calico for policy and flannel (aka Canal) for networking
-
-## Before you begin
-
-:::note
-
-Calico includes native VXLAN capabilities without the need for flannel. If you're planning on using flannel for VXLAN, we recommend instead installing Calico using IP-in-IP or VXLAN mode. See how to [determine the best networking option](../../../networking/determine-best-networking.mdx) for your cluster.
-If you're already using flannel for networking, you can [migrate your existing clusters to Calico networking](migration-from-flannel.mdx).
-
-:::
-
-Ensure that you have a Kubernetes cluster that meets the
-{{prodname}} [system requirements](../requirements.mdx). If you don't,
-follow the steps in [Installing kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/).
-
-## Installing {{prodname}} for policy and flannel (aka Canal) for networking
-
-### Selecting a datastore type
-
-The procedure differs according to your datastore type. Refer to the section that matches your type.
-
-- [Kubernetes API datastore](#installing-with-the-kubernetes-api-datastore-recommended) (recommended)
-
-- [etcd datastore](#installing-with-the-etcd-datastore)
-
-### Installing with the Kubernetes API datastore (recommended)
-
-1. Ensure that the Kubernetes controller manager has the following flags
- set:
- `--cluster-cidr=` and `--allocate-node-cidrs=true`.
-
- :::tip
-
- On kubeadm, you can pass `--pod-network-cidr=`
- to kubeadm to set both Kubernetes controller flags.
-
- :::
-
-1. Download the flannel networking manifest for the Kubernetes API datastore.
-
- ```bash
- curl {{manifestsUrl}}/manifests/canal.yaml -O
- ```
-
-1. If your cluster is configured to use pod CIDR `10.244.0.0/16`, skip to the next step.
- If your cluster is configured to use a different pod CIDR, replace `10.244.0.0/16` in the downloaded manifest with the correct pod CIDR.
-
-1. Issue the following command to install {{prodname}}.
-
- ```bash
- kubectl apply -f canal.yaml
- ```
-
-1. If you wish to enforce application layer policies and secure workload-to-workload
- communications with mutual TLS authentication, continue to [Enable application layer policy](../../../network-policy/istio/app-layer-policy.mdx) (optional).
-
-The geeky details of what you get:
-
-
-
-### Installing with the etcd datastore
-
-We strongly recommend using the Kubernetes API datastore, but if you prefer to use
-etcd, complete the following steps.
-
-1. Download the {{prodname}} networking manifest.
-
- ```bash
- curl {{manifestsUrl}}/manifests/canal-etcd.yaml -O
- ```
-
-1. If your cluster is configured to use pod CIDR `10.244.0.0/16`, skip to the next step.
- If your cluster is configured to use a different pod CIDR, replace `10.244.0.0/16` in the downloaded manifest with the correct pod CIDR.
-
-1. In the `ConfigMap` named `calico-config`, set the value of
- `etcd_endpoints` to the IP address and port of your etcd server.
-
- :::tip
-
- You can specify more than one using commas as delimiters.
-
- :::
-
-1. Apply the manifest using the following command.
-
- ```bash
- kubectl apply -f canal-etcd.yaml
- ```
-
-1. If you wish to enforce application layer policies and secure workload-to-workload
- communications with mutual TLS authentication, continue to [Enable application layer policy](../../../network-policy/istio/app-layer-policy.mdx) (optional).
-
-The geeky details of what you get:
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/migration-from-flannel.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/migration-from-flannel.mdx
deleted file mode 100644
index 77a35b4438..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/migration-from-flannel.mdx
+++ /dev/null
@@ -1,193 +0,0 @@
----
-description: Preserve your existing VXLAN networking in Calico, but take full advantage of Calico IP address management (IPAM) and advanced network policy features.
----
-
-# Migrate a Kubernetes cluster from flannel/Canal to Calico
-
-## Big picture
-
-Migrate an existing Kubernetes cluster with flannel/Canal to {{prodname}} networking.
-
-## Value
-
-If you are already using flannel for networking, it is easy to migrate to {{prodname}}'s native VXLAN networking. {{prodname}} VXLAN is fully equivalent to flannel vxlan, but you get the benefits of the broader range of features offered by {{prodname}} with an active maintainer community.
-
-## Concepts
-
-### Limitations of host-local IPAM in flannel
-
-Flannel networking uses the host-local IPAM (IP address management) CNI plugin, which provides simple IP address management for your cluster. Although simple, it has limitations:
-
-- When you create a node, it is pre-allocated a CIDR. If the number of pods per-node exceeds the number of IP addresses available per node, you must recreate the cluster. Conversely, if the number of pods is much smaller than the number of addresses available per node, IP address space is not efficiently used; as you scale out and IP addresses are depleted, inefficiencies become a pain point.
-
-- Because each node has a pre-allocated CIDR, pods must always have an IP address assigned based on the node it is running on. Being able to allocate IP addresses based on other attributes (for example, the pod’s namespace), provides flexibility to meet use cases that arise.
-
-Migrating to {{prodname}} IPAM solves these use cases and more. For advantages of Calico IPAM, see [Blog: Live Migration from Flannel to Calico](https://www.projectcalico.org/live-migration-from-flannel-to-calico/).
-
-### Methods for migrating to {{prodname}} networking
-
-There are two ways to switch your cluster to use {{prodname}} networking. Both methods give you a fully-functional {{prodname}} cluster using VXLAN networking between pods.
-
-- **Create a new cluster using {{prodname}} and migrate existing workloads**
-
- If you have the ability to migrate workloads from one cluster to the next without caring about downtime, this is the easiest method: [create a new cluster using {{prodname}}](../quickstart.mdx).
-
-- **Live migration on an existing cluster**
-
- If your workloads are already in production, or downtime is not an option, use the live migration tool that performs a rolling update of each node in the cluster.
-
-## Before you begin...
-
-**Required**
-
-- A cluster with flannel for networking using the VXLAN backend.
-- Flannel version v0.9.1 or higher (Canal version v3.7.0 or greater).
-- Flannel must have been installed using a **Kubernetes daemon set** and configured:
- - To use the Kubernetes API for storing its configuration (as opposed to etcd)
- - With `DirectRouting` disabled (default)
-- Cluster must allow for:
- - Adding/deleting/modifying node labels
- - Modifying and deleting of the flannel daemon set. For example, it must not be installed using the Kubernetes Addon-manager.
-
-## How to
-
-- [Migrate from flannel networking to Calico networking, live migration](#migrate-from-flannel-networking-to-calico-networking-live-migration)
-- [Modify flannel configuration](#modify-flannel-configuration)
-- [View migration status](#view-migration-status)
-- [View migration logs](#view-migration-logs)
-- [Revert migration](#revert-migration)
-
-### Migrate from flannel networking to Calico networking, live migration
-
-1. Install {{prodname}}.
-
- ```
- kubectl apply -f {{manifestsUrl}}/manifests/flannel-migration/calico.yaml
- ```
-
-1. Start the migration controller.
-
- ```
- kubectl apply -f {{manifestsUrl}}/manifests/flannel-migration/migration-job.yaml
- ```
-
- You will see nodes begin to update one at a time.
-
-1. Monitor the migration.
-
- ```
- kubectl get jobs -n kube-system flannel-migration
- ```
-
- When the host node is upgraded, the migration controller may be rescheduled several times. The installation is complete when the output of the above command shows 1/1 completions. For example:
-
- ```
- NAME COMPLETIONS DURATION AGE
- flannel-migration 1/1 2m59s 5m9s
- ```
-
-1. Delete the migration controller.
-
- ```
- kubectl delete -f {{manifestsUrl}}/manifests/flannel-migration/migration-job.yaml
- ```
-
-### Modify flannel configuration
-
-The migration controller autodetects your flannel configuration, and in most cases, does not require
-additional configuration. If you require special configuration, the migration tool provides the following options,
-which can be set as environment variables within the pod.
-
-| Configuration options | Description | Default |
-| ------------------------- | ---------------------------------------------------------------- | ------------------------ |
-| FLANNEL_NETWORK | IPv4 network CIDR used by flannel for the cluster. | Automatically detected |
-| FLANNEL_IPV6_NETWORK | IPv6 network CIDR used by flannel for the cluster. | Automatically detected |
-| FLANNEL_DAEMONSET_NAME | Name of the flannel daemon set in the kube-system namespace. | kube-flannel-ds |
-| FLANNEL_MTU | MTU for the flannel VXLAN device. | Automatically detected |
-| FLANNEL_IP_MASQ | Whether masquerading is enabled for outbound traffic. | Automatically detected |
-| FLANNEL_SUBNET_LEN | Per-node IPv4 subnet length used by flannel. | 24 |
-| FLANNEL_IPV6_SUBNET_LEN | Per-node IPv6 subnet length used by flannel. | 64 |
-| FLANNEL_ANNOTATION_PREFIX | Value provided via the kube-annotation-prefix option to flannel. | flannel.alpha.coreos.com |
-| FLANNEL_VNI | The VNI used for the flannel network. | 1 |
-| FLANNEL_PORT | UDP port used for VXLAN. | 8472 |
-| CALICO_DAEMONSET_NAME | Name of the calico daemon set in the kube-system namespace. | calico-node |
-| CNI_CONFIG_DIR | Full path on the host in which to search for CNI config files. | /etc/cni/net.d |
-
-### View migration status
-
-View the controller's current status.
-
-```
-kubectl get pods -n kube-system -l k8s-app=flannel-migration-controller
-```
-
-### View migration logs
-
-View migration logs to see if any actions are required.
-
-```
-kubectl logs -n kube-system -l k8s-app=flannel-migration-controller
-```
-
-### Revert migration
-
-If you need to revert a cluster from {{prodname}} back to flannel, follow these steps.
-
-1. Remove the migration controller and {{prodname}}.
-
- ```
- kubectl delete -f {{manifestsUrl}}/manifests/flannel-migration/migration-job.yaml
- kubectl delete -f {{manifestsUrl}}/manifests/flannel-migration/calico.yaml
- ```
-
-1. Determine the nodes that were migrated to {{prodname}}.
-
- ```
- kubectl get nodes -l projectcalico.org/node-network-during-migration=calico
- ```
-
-Then, for each node found above, run the following commands to delete Calico.
-
-1. Cordon and drain the node.
-
- ```
- kubectl drain
- ```
-
-1. Log in to the node and remove the CNI configuration.
-
- ```
- rm /etc/cni/net.d/10-calico.conflist
- ```
-
-1. Reboot the node.
-
-1. Enable flannel on the node.
-
- ```
- kubectl label node projectcalico.org/node-network-during-migration=flannel --overwrite
- ```
-
-1. Uncordon the node.
-
- ```
- kubectl uncordon
- ```
-
-After the above steps have been completed on each node, perform the following steps.
-
-1. Remove the `nodeSelector` from the flannel daemonset.
-
- ```
- kubectl patch ds/kube-flannel-ds -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": null}}}}'
- ```
-
-1. Remove the migration label from all nodes.
-
- ```
- kubectl label node --all projectcalico.org/node-network-during-migration-
- ```
-
-## Next steps
-
-Learn about [{{prodname}} IP address management](../../../networking/ipam/index.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/configure-bgp-peering.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/configure-bgp-peering.mdx
deleted file mode 100644
index f923d1c55e..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/configure-bgp-peering.mdx
+++ /dev/null
@@ -1,160 +0,0 @@
----
-description: Quick review of BGP peering options.
----
-
-# Configure BGP peering
-
-We have configured {{prodname}} to distribute routing information over the
-Border Gateway Protocol (BGP). This scalable protocol powers routing on the global
-public Internet.
-
-In many on-premise data centers, each server connects to a top-of-rack (ToR) router
-operating at the IP layer (layer 3). In that situation, we would need to peer each node
-with its corresponding ToR router, so that the ToR learns routes to the containers. That
-configuration is beyond the scope of this guide.
-
-Since we are running in an AWS VPC within a single subnet, the hosts have Ethernet (layer 2)
-connectivity with one another, meaning there are no routers between them. Thus, they can peer
-directly with each other.
-
-On one of the nodes in your cluster where you have `calicoctl` installed, check the status.
-
-```bash
-sudo calicoctl node status
-```
-
-Result
-
-```
-Calico process is running.
-
-IPv4 BGP status
-+---------------+-------------------+-------+----------+-------------+
-| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
-+---------------+-------------------+-------+----------+-------------+
-| 172.31.40.217 | node-to-node mesh | up | 17:38:47 | Established |
-| 172.31.40.30 | node-to-node mesh | up | 17:40:09 | Established |
-| 172.31.45.29 | node-to-node mesh | up | 17:40:20 | Established |
-| 172.31.37.123 | node-to-node mesh | up | 17:40:29 | Established |
-+---------------+-------------------+-------+----------+-------------+
-
-IPv6 BGP status
-No IPv6 peers found.
-```
-
-Alternatively, you can create a [`CalicoNodeStatus` resource](../../../reference/resources/caliconodestatus.mdx) to get BGP session status for the node.
-
-Notice there are four BGP sessions, one to each other node in the cluster. In a small cluster, this
-works well and is highly resilient. However, the total number of BGP sessions scales as the square
-of the number of nodes, and in a large cluster this creates a lot of overhead.
-
-In this lab we will configure a fixed number of _route reflectors_. Route reflectors announce their
-own routes and the routes they receive from other peers. This means nodes only need to peer with the
-route reflectors to get all the routes in the cluster. This peering arrangement means that the number
-of BGP sessions scales linearly with the number of nodes.
-
-## Choose and label nodes
-
-We will establish three route reflectors, which means we avoid a single point of failure even if we take down
-a route reflector node for maintenance. In a five node cluster that means that only one BGP session is not
-needed, since the two non-reflector nodes don't need to peer with one another, but it will save lots of overhead
-in a large cluster.
-
-Choose three nodes and perform the following for each of them.
-
-Save the node YAML.
-
-```bash
-calicoctl get node -o yaml --export > node.yaml
-```
-
-Edit the YAML to add
-
-```yaml
-metadata:
- labels:
- calico-route-reflector: ''
-spec:
- bgp:
- routeReflectorClusterID: 224.0.0.1
-```
-
-Reapply the YAML
-
-```bash
-calicoctl apply -f node.yaml
-```
-
-## Configure peering
-
-Configure all non-reflector nodes to peer with all route reflectors
-
-```bash
-calicoctl apply -f - < pool1.yaml < pool2.yaml <
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/install-cni-plugin.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/install-cni-plugin.mdx
deleted file mode 100644
index 1ce709ee69..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/install-cni-plugin.mdx
+++ /dev/null
@@ -1,197 +0,0 @@
----
-description: Steps to install the Calico Container Network Interface (CNI)
----
-
-# Install CNI plugin
-
-Kubernetes uses the Container Network Interface (CNI) to interact with networking providers like {{prodname}}.
-The {{prodname}} binary that presents this API to Kubernetes is called the **CNI plugin** and must be installed
-on every node in the Kubernetes cluster.
-
-To understand how the Container Network Interface (CNI) works with Kubernetes, and how it enhances Kubernetes networking, read our [Kubernetes CNI guide](https://www.tigera.io/learn/guides/kubernetes-networking/kubernetes-cni/).
-
-## Provision Kubernetes user account for the plugin
-
-The CNI plugin interacts with the Kubernetes API server while creating pods, both to obtain additional information
-and to update the datastore with information about the pod.
-
-On the Kubernetes control plane node, create a key for the CNI plugin to authenticate with and certificate signing request.
-
-```bash
-openssl req -newkey rsa:4096 \
- -keyout cni.key \
- -nodes \
- -out cni.csr \
- -subj "/CN=calico-cni"
-```
-
-We will sign this certificate using the main Kubernetes CA.
-
-```bash
-sudo openssl x509 -req -in cni.csr \
- -CA /etc/kubernetes/pki/ca.crt \
- -CAkey /etc/kubernetes/pki/ca.key \
- -CAcreateserial \
- -out cni.crt \
- -days 365
-sudo chown $(id -u):$(id -g) cni.crt
-```
-
-Next, we create a kubeconfig file for the CNI plugin to use to access Kubernetes. Copy this `cni.kubeconfig` file **to every node** in the cluster.
-
-```bash
-APISERVER=$(kubectl config view -o jsonpath='{.clusters[0].cluster.server}')
-kubectl config set-cluster kubernetes \
- --certificate-authority=/etc/kubernetes/pki/ca.crt \
- --embed-certs=true \
- --server=$APISERVER \
- --kubeconfig=cni.kubeconfig
-
-kubectl config set-credentials calico-cni \
- --client-certificate=cni.crt \
- --client-key=cni.key \
- --embed-certs=true \
- --kubeconfig=cni.kubeconfig
-
-kubectl config set-context default \
- --cluster=kubernetes \
- --user=calico-cni \
- --kubeconfig=cni.kubeconfig
-
-kubectl config use-context default --kubeconfig=cni.kubeconfig
-```
-
-## Provision RBAC
-
-Define a cluster role the CNI plugin will use to access Kubernetes.
-
-```bash
-kubectl apply -f - < /etc/cni/net.d/10-calico.conflist <`
-1. Copy admin credentials
-1. Test Access
-
- 1. Run
-
- `kubectl get nodes`
-
- Verify all nodes have joined. At this point nodes have joined but they are in `NotReady` state, because Kubernetes can't find a networking provider and configuration.
-
-## Next
-
-[The Calico datastore](the-calico-datastore.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/test-network-policy.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/test-network-policy.mdx
deleted file mode 100644
index 3d260dadd4..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/test-network-policy.mdx
+++ /dev/null
@@ -1,13 +0,0 @@
----
-description: Verify that network policy works correctly.
----
-
-# Test network policy
-
-In this lab we will test network policy.
-
-Follow the instructions in the [Simple policy tutorial](../../../network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx) to verify policy works correctly.
-
-## Next
-
-[End user RBAC](end-user-rbac.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/test-networking.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/test-networking.mdx
deleted file mode 100644
index 0739244287..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/test-networking.mdx
+++ /dev/null
@@ -1,172 +0,0 @@
----
-description: Test that networking works correctly.
----
-
-# Test networking
-
-In this lab we will test the {{prodname}} cluster to demonstrate networking is working correctly.
-
-## Pod to pod pings
-
-Create three busybox instances
-
-```bash
-kubectl create deployment pingtest --image=busybox --replicas=3 -- sleep infinity
-```
-
-Check their IP addresses
-
-```bash
-kubectl get pods --selector=app=pingtest --output=wide
-```
-
-Result
-
-```
-NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
-pingtest-b4b6f8cf-b5z78 1/1 Running 0 3m28s 192.168.38.128 ip-172-31-37-123
-pingtest-b4b6f8cf-jmzq6 1/1 Running 0 3m28s 192.168.45.193 ip-172-31-40-217
-pingtest-b4b6f8cf-rn9nm 1/1 Running 0 3m28s 192.168.60.64 ip-172-31-45-29
-```
-
-Note the IP addresses of the second two pods, then exec into the first one. For example
-
-```bash
-kubectl exec -ti pingtest-b4b6f8cf-b5z78 -- sh
-```
-
-From inside the pod, ping the other two pod IP addresses. For example
-
-```bash
-ping 192.168.45.193 -c 4
-```
-
-Result
-
-```
-PING 192.168.45.193 (192.168.45.193): 56 data bytes
-64 bytes from 192.168.45.193: seq=0 ttl=62 time=1.847 ms
-64 bytes from 192.168.45.193: seq=1 ttl=62 time=0.684 ms
-64 bytes from 192.168.45.193: seq=2 ttl=62 time=0.488 ms
-64 bytes from 192.168.45.193: seq=3 ttl=62 time=0.442 ms
-
---- 192.168.45.193 ping statistics ---
-4 packets transmitted, 4 packets received, 0% packet loss
-round-trip min/avg/max = 0.442/0.865/1.847 ms
-```
-
-## Check routes
-
-From one of the nodes, verify that routes exist to each of the `pingtest` pods' IP addresses. For example
-
-```bash
-ip route get 192.168.38.128
-```
-
-Result
-
-```
-192.168.38.128 via 172.31.37.123 dev eth0 src 172.31.42.47 uid 1000
- cache
-```
-
-The `via 172.31.37.123` in this example indicates the next-hop for this pod IP, which matches the IP address of the node the
-pod is scheduled on, as expected.
-
-## IPAM allocations from different pools
-
-Recall that we created two IP pools, but left one disabled.
-
-```bash
-calicoctl get ippools -o wide
-```
-
-Result
-
-```
-NAME CIDR NAT IPIPMODE VXLANMODE DISABLED SELECTOR
-pool1 192.168.0.0/18 true Never Never false all()
-pool2 192.168.192.0/19 true Never Never true all()
-```
-
-Enable the second pool.
-
-```bash
-calicoctl apply -f - <
-```
-
-From one of the original pingtest pods, ping the IP address.
-
-```bash
-ping 192.168.219.0 -c 4
-```
-
-Result
-
-```
-PING 192.168.219.0 (192.168.219.0): 56 data bytes
-64 bytes from 192.168.219.0: seq=0 ttl=62 time=0.524 ms
-64 bytes from 192.168.219.0: seq=1 ttl=62 time=0.459 ms
-64 bytes from 192.168.219.0: seq=2 ttl=62 time=0.505 ms
-64 bytes from 192.168.219.0: seq=3 ttl=62 time=0.492 ms
-
---- 192.168.219.0 ping statistics ---
-4 packets transmitted, 4 packets received, 0% packet loss
-round-trip min/avg/max = 0.459/0.495/0.524 ms
-```
-
-## Clean up
-
-```bash
-kubectl delete deployments.apps pingtest
-kubectl delete pod pingtest-pool2
-```
-
-## Next
-
-[Test network policy](test-network-policy.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/the-calico-datastore.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/the-calico-datastore.mdx
deleted file mode 100644
index 69e9ca32c2..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/the-calico-datastore.mdx
+++ /dev/null
@@ -1,109 +0,0 @@
----
-description: The central datastore for your clusters' operational and configuration state.
----
-
-# The Calico datastore
-
-{{prodname}} stores the data about the operational and configuration state of your cluster in a central datastore. If the datastore is unavailable
-your {{prodname}} network continues operating, but cannot be updated (no new pods can be networked, no policy changes can be applied, etc.).
-
-{{prodname}} has two datastore drivers you can choose from
-
-- **etcd** - for direct connection to an etcd cluster
-- **Kubernetes** - for connection to a Kubernetes API server
-
-## Using Kubernetes as the datastore
-
-This guide uses the Kubernetes API datastore driver. The advantages of this driver when using {{prodname}} on Kubernetes are
-
-- Doesn't require an extra datastore, so is simpler to manage
-- You can use Kubernetes RBAC to control access to {{prodname}} resources
-- You can use Kubernetes audit logging to generate audit logs of changes to {{prodname}} resources
-
-For completeness, the advantages of the etcd driver are
-
-- Allows you to run {{prodname}} on non-Kubernetes platforms (e.g. OpenStack)
-- Allows separation of concerns between Kubernetes and {{prodname}} resources, for example allowing you to scale the datastores independently
-- Allows you to run a {{prodname}} cluster that contains more than just a single Kubernetes cluster, for example, bare metal servers with {{prodname}}
- host protection interworking with a Kubernetes cluster; or multiple Kubernetes clusters.
-
-## Custom Resources
-
-When using the Kubernetes API datastore driver, most {{prodname}} resources are stored as [Kubernetes custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/).
-
-A few {{prodname}} resources are not stored as custom resources and instead are backed by corresponding native Kubernetes resources. For example, [workload endpoints](../../../reference/resources/workloadendpoint.mdx) are Kubernetes pods.
-
-To use Kubernetes as the {{prodname}} datastore, we need to define the custom resources {{prodname}} uses.
-
-Download and examine the list of {{prodname}} custom resource definitions, and open it in a file editor.
-
-```bash
-wget {{manifestsUrl}}/manifests/crds.yaml
-```
-
-Create the custom resource definitions in Kubernetes.
-
-```bash
-kubectl apply -f crds.yaml
-```
-
-## calicoctl
-
-To interact directly with the {{prodname}} datastore, use the `calicoctl` client tool.
-
-### Install
-
-1. Download the `calicoctl` binary to a Linux host with access to Kubernetes.
-
- ```bash
- wget -O calicoctl https://github.com/projectcalico/calico/releases/latest/download/calicoctl-linux-amd64
- chmod +x calicoctl
- sudo mv calicoctl /usr/local/bin/
- ```
-
-1. Configure `calicoctl` to access Kubernetes.
-
- ```bash
- export KUBECONFIG=/path/to/your/kubeconfig
- export DATASTORE_TYPE=kubernetes
- ```
-
- On most systems, kubeconfig is located at `~/.kube/config`. You may wish to add the `export` lines to your `~/.bashrc` so they will persist when you log in next time.
-
-### Test
-
-Verify `calicoctl` can reach your datastore by running
-
-```bash
-calicoctl get nodes
-```
-
-You should see output similar to
-
-```bash
-NAME
-ip-172-31-37-123
-ip-172-31-40-217
-ip-172-31-40-30
-ip-172-31-42-47
-ip-172-31-45-29
-```
-
-Nodes are backed by the Kubernetes node object, so you should see names that match `kubectl get nodes`.
-
-Try to get an object backed by a custom resource
-
-```bash
-calicoctl get ippools
-```
-
-You should see an empty result
-
-```bash
-NAME CIDR SELECTOR
-
-```
-
-## Next
-
-[Configure IP pools](configure-ip-pools.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/helm.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/helm.mdx
deleted file mode 100644
index 4d36fb6adb..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/helm.mdx
+++ /dev/null
@@ -1,126 +0,0 @@
----
-description: Install Calico on a Kubernetes cluster using Helm 3.
----
-
-# Install using Helm
-
-## Big picture
-
-Install {{prodname}} on a Kubernetes cluster using Helm 3.
-
-## Value
-
-Helm charts are a way to package up an application for Kubernetes (similar to `apt` or `yum` for operating systems). Helm is also used by tools like ArgoCD to manage applications in a cluster, taking care of install, upgrade (and rollback if needed), etc.
-
-## Before you begin
-
-**Required**
-
-- Install Helm 3
-- Kubernetes cluster meets these requirements:
- - Kubernetes is installed _without_ a CNI plugin **OR** cluster is running a compatible CNI for {{prodname}} to run in policy-only mode
- - x86-64, arm64, ppc64le, or s390x processors
- - RedHat Enterprise Linux 7.x+, CentOS 7.x+, Ubuntu 18.04+, or Debian 9.x+
-- `kubeconfig` is configured to work with your cluster (check by running `kubectl get nodes`)
-- {{prodname}} can manage `cali` and `tunl` interfaces on the hosts.
- If NetworkManager is present on the hosts, refer to
- [Configure NetworkManager](../../operations/troubleshoot/troubleshooting.mdx#configure-networkmanager).
-
-## Concepts
-
-### Operator based installation
-
-In this guide, you install the Tigera {{prodname}} operator and custom resource definitions using the Helm 3 chart. The Tigera operator provides lifecycle management for {{prodname}} exposed via the Kubernetes API defined as a custom resource definition.
-
-## How to
-
-### Download the Helm chart
-
-1. Add the {{prodname}} helm repo:
-
-```bash
-helm repo add projectcalico https://docs.tigera.io/calico/charts
-```
-
-### Customize the Helm chart
-
-If you are installing on a cluster installed by EKS, GKE, AKS or Mirantis Kubernetes Engine (MKE), or you need to customize TLS certificates, you **must** customize this Helm chart by creating a `values.yaml` file. Otherwise, you can skip this step.
-
-1. If you are installing on a cluster installed by EKS, GKE, AKS or Mirantis Kubernetes Engine (MKE), set the `kubernetesProvider` as described in the [Installation reference](../../reference/installation/api.mdx#operator.tigera.io/v1.Provider). For example:
-
- ```
- echo '{ installation: {kubernetesProvider: EKS }}' > values.yaml
- ```
-
- For Azure AKS cluster with no Kubernetes CNI pre-installed, create `values.yaml` with the following command:
-
- ```
- cat > values.yaml <
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/index.mdx
deleted file mode 100644
index 2b9ed15c57..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Get Calico up and running in your K3s cluster.
-hide_table_of_contents: true
----
-
-# K3s
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/multi-node-install.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/multi-node-install.mdx
deleted file mode 100644
index 1c85ce7b3d..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/multi-node-install.mdx
+++ /dev/null
@@ -1,222 +0,0 @@
----
-description: Install Calico on a multi node K3s cluster for testing or development.
----
-
-# K3s multi-node install
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-This tutorial gets you a multi node K3s cluster with {{prodname}} in approximately 10 minutes.
-
-## Value
-
-K3s is a lightweight implementation of Kubernetes packaged as a single binary.
-
-The geeky details of what you get:
-
-
-
-## Before you begin
-
-- Make sure you have a linux host that meets the following requirements
- - x86-64 processor
- - 1CPU
- - 1GB Ram
- - 10GB free disk space
- - Ubuntu 18.04 (amd64), Ubuntu 20.04 (amd64)
-
-:::note
-
-K3s supports ARM processors too, this tutorial was tested against x86-64 processor environment.
-For more detail please visit [this link](https://rancher.com/docs/k3s/latest/en/installation/installation-requirements/#operating-systems).
-
-:::
-
-## How to
-
-### Initializing control plane instance
-
-K3s installation script can be modified by [environment variables](https://rancher.com/docs/k3s/latest/en/installation/install-options/#options-for-installation-with-script). Here you are providing some extra arguments to disable `flannel`, disable k3s default network policy and change the pod ip CIDR.
-
-:::note
-
-Full list of arguments can be viewed [at this link](https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/).
-
-:::
-
- curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--flannel-backend=none --disable-network-policy --cluster-cidr=192.168.0.0/16" sh -
-
-:::caution
-
-If 192.168.0.0/16 is already in use within your network you must select a different pod network
-CIDR by replacing 192.168.0.0/16 in the above command.
-
-:::
-
-### Enable remote access to your K3s instance
-
-To set up remote access to your cluster first ensure you have installed `kubectl` on your system.
-
-:::note
-
-If you are not sure how to install kubectl in your OS [visit this link](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
-
-:::
-
-K3s stores a kubeconfig file in your server at `/etc/rancher/k3s/k3s.yaml`, copy all the content of `k3s.yaml` from your server into `~/.kube/config` on the system that you like to have remote access to the cluster.
-
-### Add extra nodes to K3s cluster
-
-To add additional nodes to your cluster you need two piece of information.
-
-- `K3S_URL` which is going to be your main node ip address.
-- `K3S_TOKEN` which is stored in `/var/lib/rancher/k3s/server/node-token` file in main Node [(Step 1)](#initializing-master-instance).
- Execute following command in your node instance and join it to the cluster.
-
-:::note
-
-Remember to change `serverip` and `mytoken`.
-
-:::
-
-```bash
-curl -sfL https://get.k3s.io | K3S_URL=https://serverip:6443 K3S_TOKEN=mytoken sh -
-```
-
-### Install {{prodname}}
-
-
-
-
-Install the {{prodname}} operator and custom resource definitions.
-
-```bash
-kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml
-```
-
-:::note
-
-Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`.
-
-:::
-
-Install {{prodname}} by creating the necessary custom resource. For more information on configuration options available in this manifest, see [the installation reference](../../../reference/installation/api.mdx).
-
-```bash
-kubectl create -f {{manifestsUrl}}/manifests/custom-resources.yaml
-```
-
-:::note
-
-Before creating this manifest, read its contents and make sure its settings are correct for your environment. For example,
-you may need to change the default IP pool CIDR to match your pod network CIDR.
-
-:::
-
-
-
-
-Install {{prodname}} by using the following command.
-
-```bash
-kubectl apply -f {{manifestsUrl}}/manifests/calico.yaml
-```
-
-:::note
-
-You can also
-[view the YAML in a new tab]({{manifestsUrl}}/manifests/calico.yaml).
-
-:::
-
-You should see the following output.
-
-```
- configmap/calico-config created
- customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
- clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
- clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
- clusterrole.rbac.authorization.k8s.io/calico-node created
- clusterrolebinding.rbac.authorization.k8s.io/calico-node created
- daemonset.apps/calico-node created
- serviceaccount/calico-node created
- deployment.apps/calico-kube-controllers created
- serviceaccount/calico-kube-controllers created
-```
-
-
-
-
-### Check the installation
-
-1. Confirm that all of the pods are running using the following command.
-
-
-
-
-```
-NAMESPACE NAME READY STATUS RESTARTS AGE
-tigera-operator tigera-operator-c9cf5b94d-gj9qp 1/1 Running 0 107s
-calico-system calico-typha-7dcd87597-npqsf 1/1 Running 0 88s
-calico-system calico-node-rdwwz 1/1 Running 0 88s
-kube-system local-path-provisioner-6d59f47c7-4q8l2 1/1 Running 0 2m14s
-kube-system metrics-server-7566d596c8-xf66d 1/1 Running 0 2m14s
-kube-system coredns-8655855d6-wfdbm 1/1 Running 0 2m14s
-calico-system calico-kube-controllers-89df8c6f8-7hxc5 1/1 Running 0 87s
-```
-
-
-
-
-```
-NAMESPACE NAME READY STATUS RESTARTS AGE
-kube-system {{noderunning}}-9hn9z 1/1 Running 0 23m
-kube-system local-path-provisioner-6d59f47c7-drznc 1/1 Running 0 38m
-kube-system calico-kube-controllers-789f6df884-928lt 1/1 Running 0 23m
-kube-system metrics-server-7566d596c8-qxlfz 1/1 Running 0 38m
-kube-system coredns-8655855d6-blzl5 1/1 Running 0 38m
-```
-
-
-
-
-1. Confirm that you now have two nodes in your cluster with the following command.
-
- ```bash
- kubectl get nodes -o wide
- ```
-
- It should return something like the following.
-
- ```
- NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
- k3s-master Ready master 40m v1.18.2+k3s1 172.16.2.128 Ubuntu 18.04.3 LTS 4.15.0-101-generic containerd://1.3.3-k3s2
- k3s-node1 Ready 30m v1.18.2+k3s1 172.16.2.129 Ubuntu 18.04.3 LTS 4.15.0-101-generic containerd://1.3.3-k3s2
- ```
-
-Congratulations! You now have a multi node K3s cluster
-equipped with {{prodname}} and Traefik.
-
-## Next steps
-
-- Try running the [Kubernetes Network policy demo](../../../network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx) to see live graphical view of network policy in action
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/quickstart.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/quickstart.mdx
deleted file mode 100644
index cca3c4bf10..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/quickstart.mdx
+++ /dev/null
@@ -1,201 +0,0 @@
----
-description: Install Calico on a single-node K3s cluster for testing or development in under 5 minutes.
----
-
-# Quickstart for Calico on K3s
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-This quickstart gets you a single-node K3s cluster with {{prodname}}
-in approximately 5 minutes. You can use this cluster for testing and
-development.
-
-## Value
-
-Use this quickstart to quickly and easily try {{prodname}} features. To deploy a cluster suitable for production, refer to [Multi-node install](multi-node-install.mdx).
-
-The geeky details of what you get:
-
-
-
-## Before you begin
-
-- Make sure you have a linux host that meets the following requirements
- - x86-64 processor
- - 1CPU
- - 1GB Ram
- - 10GB free disk space
- - Ubuntu 18.04 (amd64), Ubuntu 20.04 (amd64)
-
-:::note
-
-K3s supports ARM processors too, this quickstart was tested against x86-64 processor environment.
-For more detail please visit [this link](https://rancher.com/docs/k3s/latest/en/installation/installation-requirements/#operating-systems).
-
-:::
-
-## How to
-
-### Create a single-node K3s cluster
-
-- Initialize the control plane using the following command:
-
-```bash
-curl -sfL https://get.k3s.io | K3S_KUBECONFIG_MODE="644" INSTALL_K3S_EXEC="--flannel-backend=none --cluster-cidr=192.168.0.0/16 --disable-network-policy --disable=traefik" sh -
-```
-
-:::note
-
-- If 192.168.0.0/16 is already in use within your network you must select a different pod network
-CIDR by replacing 192.168.0.0/16 in the above command.
-
-- K3s installer generates `kubeconfig` file in `etc` directory with limited permissions, using `K3S_KUBECONFIG_MODE` environment
-you are assigning necessary permissions to the file and make it accessible for other users.
-
-:::
-
-### Install {{prodname}}
-
-
-
-
-1. Install the {{prodname}} operator and custom resource definitions.
-
-```bash
-kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml
-```
-
-:::note
-
-Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Therefore, it is recommended to use `kubectl create` or `kubectl replace`.
-
-:::
-
-2. Install {{prodname}} by creating the necessary custom resource. For more information on configuration options available in this manifest, see [the installation reference](../../../reference/installation/api.mdx).
-
-```bash
-kubectl create -f {{manifestsUrl}}/manifests/custom-resources.yaml
-```
-
-:::note
-
-Before creating this manifest, read its contents and make sure its settings are correct for your environment. For example,
-you may need to change the default IP pool CIDR to match your pod network CIDR.
-
-:::
-
-
-
-
-Install {{prodname}} by using the following command.
-
-```bash
-kubectl apply -f {{manifestsUrl}}/manifests/calico.yaml
-```
-
-:::note
-
-You can also
-[view the YAML in a new tab]({{manifestsUrl}}/manifests/calico.yaml).
-
-:::
-
-You should see the following output.
-
-```
- configmap/calico-config created
- customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
- customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
- clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
- clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
- clusterrole.rbac.authorization.k8s.io/calico-node created
- clusterrolebinding.rbac.authorization.k8s.io/calico-node created
- daemonset.apps/calico-node created
- serviceaccount/calico-node created
- deployment.apps/calico-kube-controllers created
- serviceaccount/calico-kube-controllers created
-```
-
-
-
-
-### Final checks
-
-1. Confirm that all of the pods are running using the following command.
-
-```bash
-watch kubectl get pods --all-namespaces
-```
-
-2. Wait until each pod shows the `STATUS` of `Running`.
-
-
-
-
-```
-NAMESPACE NAME READY STATUS RESTARTS AGE
-tigera-operator tigera-operator-c9cf5b94d-gj9qp 1/1 Running 0 107s
-calico-system calico-typha-7dcd87597-npqsf 1/1 Running 0 88s
-calico-system calico-node-rdwwz 1/1 Running 0 88s
-kube-system local-path-provisioner-6d59f47c7-4q8l2 1/1 Running 0 2m14s
-kube-system metrics-server-7566d596c8-xf66d 1/1 Running 0 2m14s
-kube-system coredns-8655855d6-wfdbm 1/1 Running 0 2m14s
-calico-system calico-kube-controllers-89df8c6f8-7hxc5 1/1 Running 0 87s
-```
-
-
-
-
-```
-NAMESPACE NAME READY STATUS RESTARTS AGE
-kube-system {{noderunning}}-9hn9z 1/1 Running 0 23m
-kube-system local-path-provisioner-6d59f47c7-drznc 1/1 Running 0 38m
-kube-system calico-kube-controllers-789f6df884-928lt 1/1 Running 0 23m
-kube-system metrics-server-7566d596c8-qxlfz 1/1 Running 0 38m
-kube-system coredns-8655855d6-blzl5 1/1 Running 0 38m
-```
-
-
-
-
-3. Press CTRL+C to exit `watch`.
-
-4. Confirm that you now have a node in your cluster with the
- following command.
-
-```bash
-kubectl get nodes -o wide
-```
-
-It should return something like the following.
-
-```
-NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
-k3s-master Ready master 40m v1.18.2+k3s1 172.16.2.128 Ubuntu 18.04.3 LTS 4.15.0-101-generic containerd://1.3.3-k3s2
-```
-
-Congratulations! You now have a single-node K3s cluster
-equipped with {{prodname}}.
-
-## Next steps
-
-- Try running the [Kubernetes Network policy demo](../../../network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx) to see live graphical view of network policy in action
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/aks.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/aks.mdx
deleted file mode 100644
index f87505064e..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/aks.mdx
+++ /dev/null
@@ -1,109 +0,0 @@
----
-description: Enable Calico network policy in AKS.
----
-
-# Microsoft Azure Kubernetes Service (AKS)
-
-## Big picture
-
-Enable {{prodname}} in AKS managed Kubernetes service.
-
-## Value
-
-AKS has built-in support for {{prodname}}, providing a robust implementation of the full Kubernetes Network Policy API. AKS users wanting to go beyond Kubernetes network policy capabilities can make full use of the {{prodname}} Network Policy API.
-
-You can also use {{prodname}} for networking on AKS in place of the default Azure VPC networking. This allows you to take advantage of the full set of {{prodname}} networking features.
-
-## How to
-
-### Install AKS with {{prodname}} for network policy
-
-The geeky details of what you get:
-
-
-
-To enable {{prodname}} network policy enforcement, follow these step-by-step instructions: [Create an AKS cluster and enable network policy](https://docs.microsoft.com/en-us/azure/aks/use-network-policies).
-
-### Install AKS with {{prodname}} networking
-
-**Limitations**
-
-- [Windows dataplane](../windows-calico/index.mdx) is not supported.
-- [eBPF dataplane](../../../operations/ebpf/use-cases-ebpf.mdx) is not supported.
-- [VPP dataplane](https://github.com/projectcalico/vpp-dataplane) is not supported.
-
-The geeky details of what you get:
-
-
-
-1. Create an Azure AKS cluster with no Kubernetes CNI pre-installed. Please refer to [Bring your own CNI with AKS](https://docs.microsoft.com/en-us/azure/aks/use-byo-cni?tabs=azure-cli) for details.
-
- ```
- # Create a resource group
- az group create --name my-calico-rg --location westcentralus
-
- az aks create --resource-group my-calico-rg --name my-calico-cluster --location westcentralus --pod-cidr 192.168.0.0/16 --network-plugin none
- ```
-
-1. Get credentials to allow you to access the cluster with `kubectl`:
-
- ```
- az aks get-credentials --resource-group my-calico-rg --name my-calico-cluster
- ```
-
-1. Now that you have a cluster configured, you can install {{prodname}}.
-
-1. Install the operator.
-
- ```bash
- kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml
- ```
-
-1. Configure the {{prodname}} installation.
-
- ```bash
- kubectl create -f - <
-
-
-1. First, create an Amazon EKS cluster.
-
- ```bash
- eksctl create cluster --name
- ```
-
-1. Install the operator.
-
- ```bash
- kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml
- ```
-
-1. Configure the {{prodname}} installation.
-
- ```bash
- kubectl create -f - < Ready master 52m v1.12.2 10.128.0.28 Ubuntu 18.04.1 LTS 4.15.0-1023-gcp docker://18.6.1
- ```
-
-### Install EKS with {{prodname}} networking
-
-The geeky details of what you get:
-
-
-
-:::note
-
-{{prodname}} networking cannot currently be installed on the EKS control plane nodes. As a result the control plane nodes
-will not be able to initiate network connections to {{prodname}} pods. (This is a general limitation of EKS's custom networking support,
-not specific to {{prodname}}.) As a workaround, trusted pods that require control plane nodes to connect to them, such as those implementing
-admission controller webhooks, can include `hostNetwork:true` in their pod spec. See the Kubernetes API
-[pod spec](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec)
-definition for more information on this setting.
-
-:::
-
-For these instructions, we will use `eksctl` to provision the cluster. However, you can use any of the methods in [Getting Started with Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html)
-
-Before you get started, make sure you have downloaded and configured the [necessary prerequisites](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html#eksctl-prereqs)
-
-1. First, create an Amazon EKS cluster without any nodes.
-
- ```bash
- eksctl create cluster --name my-calico-cluster --without-nodegroup
- ```
-
-1. Since this cluster will use {{prodname}} for networking, you must delete the `aws-node` daemon set to disable AWS VPC networking for pods.
-
- ```bash
- kubectl delete daemonset -n kube-system aws-node
- ```
-
-1. Now that you have a cluster configured, you can install {{prodname}}.
-
-
-
-
-1. Install the operator.
-
- ```bash
- kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml
- ```
-
-1. Configure the {{prodname}} installation.
-
- ```bash
- kubectl create -f - <
-
-
-1. Install the {{prodname}} manifest.
-
- ```bash
- kubectl apply -f {{manifestsUrl}}/manifests/calico-vxlan.yaml
- ```
-
-1. Configure {{prodname}} to disable AWS src/dst checks.
-
- ```bash
- kubectl -n kube-system set env daemonset/calico-node FELIX_AWSSRCDSTCHECK=Disable
- ```
-
-1. Finally, add nodes to the cluster.
-
- ```bash
- eksctl create nodegroup --cluster my-calico-cluster --node-type t3.medium --max-pods-per-node 100
- ```
-
-
-
-
-1. Add {{prodname}} into your Helm repository.
-
- ```batch
- helm repo add projectcalico https://docs.tigera.io/calico/charts
- ```
-
-1. If {{prodname}} is already added, update it to get the latest released version.
-
- ```batch
- helm repo update
- ```
-
-1. Install version {{releaseTitle}} of the {{prodname}} operator and custom resource definitions.
-
- ```batch
- helm install calico projectcalico/tigera-operator --version {{releaseTitle}}
- ```
-
-1. Patch the CNI type with value `Calico`.
-
- ```batch
- kubectl patch installation default --type='json' -p='[{"op": "replace", "path": "/spec/cni", "value": {"type":"Calico"} }]'
- ```
-
-1. Finally, add nodes to the cluster.
-
- ```batch
- eksctl create nodegroup --cluster my-calico-cluster --node-type t3.medium --max-pods-per-node 100
- ```
-
-
-
-
-:::tip
-
- Without the `--max-pods-per-node` option above, EKS will limit the [number of pods based on node-type](https://github.com/awslabs/amazon-eks-ami/blob/main/nodeadm/internal/kubelet/eni-max-pods.txt). See `eksctl create nodegroup --help` for the full set of node group options.
-
-:::
-
-## Next steps
-
-**Required**
-
-- [Install calicoctl command line tool](../../../operations/calicoctl/install.mdx)
-
-**Recommended**
-
-- [Video: Everything you need to know about Kubernetes pod networking on AWS](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-pod-networking-on-aws/)
-- [Get started with Kubernetes network policy](../../../network-policy/get-started/kubernetes-policy/kubernetes-network-policy.mdx)
-- [Get started with {{prodname}} network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx)
-- [Enable default deny for Kubernetes pods](../../../network-policy/get-started/kubernetes-default-deny.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/gke.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/gke.mdx
deleted file mode 100644
index 13c4c3b9fe..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/gke.mdx
+++ /dev/null
@@ -1,38 +0,0 @@
----
-description: Enable Calico network policy in GKE.
----
-
-# Google Kubernetes Engine (GKE)
-
-## Big picture
-
-Enable {{prodname}} in GKE managed Kubernetes service.
-
-## Value
-
-GKE has built-in support for {{prodname}}, providing a robust implementation of the full Kubernetes Network Policy API. GKE users wanting to go beyond Kubernetes network policy capabilities can make full use of the {{prodname}} Network Policy API.
-
-## How to
-
-To enable {{prodname}} network policy enforcement, follow these step-by-step instructions:
-[Enabling network policy enforcement](https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy).
-
-The geeky details of what you get:
-
-
-
-## Next steps
-
-**Required**
-
-- [Install calicoctl command line tool](../../../operations/calicoctl/install.mdx)
-
-**Recommended**
-
-- [Video: Everything you need to know about Kubernetes networking on Google cloud](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-google-cloud/)
-- [Get started with Kubernetes network policy](../../../network-policy/get-started/kubernetes-policy/kubernetes-network-policy.mdx)
-- [Get started with Calico network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx)
-- [Enable default deny for Kubernetes pods](../../../network-policy/get-started/kubernetes-default-deny.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/iks.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/iks.mdx
deleted file mode 100644
index 6312df7eee..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/iks.mdx
+++ /dev/null
@@ -1,36 +0,0 @@
----
-description: Use IKS with built-in support for Calico networking and network policy.
----
-
-# IBM Cloud Kubernetes Service (IKS)
-
-## Big picture
-
-Enable {{prodname}} in IKS managed Kubernetes service.
-
-## Value
-
-IKS has built-in support for {{prodname}}, providing a robust implementation of the full Kubernetes Network Policy API. IKS users wanting to go beyond Kubernetes network policy capabilities can make full use of the {{prodname}} Network Policy API. In addition to using {{prodname}} to secure Kubernetes pods, IKS also uses {{prodname}} host endpoint capabilities to provide additional security for the nodes in your cluster.
-
-## How to
-
-{{prodname}} networking and network policy are automatically installed and configured in your [IBM Cloud Kubernetes Service](https://www.ibm.com/products/kubernetes-service/). Default policies are created to protect your Kubernetes cluster, with the option to create your own policies to protect specific services.
-
-The geeky details of what you get:
-
-
-
-## Next steps
-
-**Required**
-
-- [Install calicoctl command line tool](../../../operations/calicoctl/install.mdx)
-
-**Recommended**
-
-- [Controlling traffic with network policies for IKS](https://cloud.ibm.com/docs/containers?topic=containers-network_policies)
-- [Get started with Kubernetes network policy](../../../network-policy/get-started/kubernetes-policy/kubernetes-network-policy.mdx)
-- [Get started with Calico network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/index.mdx
deleted file mode 100644
index 8b3bc22c42..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Enable Calico on EKS, GKE, AKS, or IKS.
-hide_table_of_contents: true
----
-
-# Managed public cloud
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/microk8s.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/microk8s.mdx
deleted file mode 100644
index a1e453423c..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/microk8s.mdx
+++ /dev/null
@@ -1,80 +0,0 @@
----
-description: Install Calico on a single-host MicroK8s cluster for testing or development in under 5 minutes.
----
-
-# Quickstart for Calico on MicroK8s
-
-## Big picture
-
-Install a single node MicroK8s cluster with {{prodname}} in approximately 5 minutes.
-
-## Value
-
-MicroK8s is a lightweight upstream Kubernetes distribution package to run as an immutable container.
-
-Use this quickstart to quickly and easily try {{prodname}} features with MicroK8s.
-
-## Before you begin
-
-- Make sure you have a linux host that meets the following requirements:
- - 4GB RAM
- - 20GB free disk space
- - Ubuntu 20.04 LTS, 18.04 LTS or 16.04 LTS (or another operating system that supports `snapd`)
-
-## How to
-
-1. Initialize the node using the following command.
-
- ```
- snap install microk8s --classic
- ```
-
- :::note
-
- You can check out other versions of Kubernetes MicroK8s implementation published in snap using `snap info microk8s` command.
-
- :::
-
-1. Enable dns services.
-
- ```
- microk8s enable dns
- ```
-
-1. Check your cluster status
-
- ```
- microk8s kubectl get pods -A
- ```
-
- You should see a result similar to
-
- ```
- NAMESPACE NAME READY STATUS RESTARTS AGE
- kube-system calico-node-b82zp 1/1 Running 0 64s
- kube-system calico-kube-controllers-555fc8cc5c-b7cp6 1/1 Running 0 64s
- kube-system coredns-588fd544bf-mbc7n 1/1 Running 0 39s
- ```
-
-The geeky details of what you get:
-
-
-
-## Next steps
-
-**Required**
-
-- [Install and configure calicoctl](../../operations/calicoctl/install.mdx)
-
-**Optional**
-
-- [Add another node to form a multi-node cluster](https://microk8s.io/docs/clustering)
-
-**Recommended tutorials**
-
-- [Secure a simple application using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx)
-- [Control ingress and egress traffic using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx)
-- [Run a tutorial that shows blocked and allowed connections in real time](../../network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/minikube.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/minikube.mdx
deleted file mode 100644
index c8c49267b0..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/minikube.mdx
+++ /dev/null
@@ -1,158 +0,0 @@
----
-description: Enable Calico on a single/multi-node minikube cluster for testing or development in under 1 minute.
----
-
-# Quickstart for Calico on minikube
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-This quickstart gets you a single-node minikube cluster with {{prodname}}
-in approximately 1 minute. You can use this cluster for testing and
-development.
-
-## Value
-
-Use this quickstart to quickly and easily try {{prodname}} features.
-
-## Before you begin
-
-- Install, but do not start, minikube. [How to install minikube](https://minikube.sigs.k8s.io/docs/start/#what-youll-need)
-- Install kubectl.[How to install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
-- Install a minikube driver. For example Docker. A full List of available drivers can be [found here.](https://minikube.sigs.k8s.io/docs/drivers/)
-
-## How to
-
-### Create a single-node minikube cluster
-
-
-
-
-Minikube offers a built-in {{prodname}} implementation, this is a quick way to checkout {{prodname}} features.
-
-:::note
-
-Enabling preinstalled {{prodname}} might be the quickest way for testing. However, if you like to checkout a more recent version or features of {{prodname}} you should consider using Manifest or Operator approach.
-
-:::
-
-```bash
-minikube start --network-plugin=cni --cni=calico
-```
-
-
-
-
-1. Start your minikube cluster with one control plane node using the following command.
-
-:::note
-
-If `192.168.0.0/16` is already in use within your network you must select a different pod network CIDR, by replacing `192.168.0.0/16` in the following command.
-
-:::
-
-```bash
-minikube start --cni=false --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=192.168.0.0/16 --subnet=172.16.0.0/24
-```
-
-2. Install the Tigera {{prodname}} operator and custom resource definitions.
-
-```bash
-kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml
-```
-
-:::note
-
-Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`.
-
-:::
-
-3. Install {{prodname}} by creating the necessary custom resource. For more information on configuration options available in this manifest, see [the installation reference](../../reference/installation/api.mdx).
-
-:::note
-
-Before creating this manifest, read its contents and make sure its settings are correct for your environment. For example,
-if you have replaced `pod-network-cidr` you must change it in this file as well.
-
-:::
-
-```bash
-kubectl create -f {{manifestsUrl}}/manifests/custom-resources.yaml
-```
-
-
-
-
-Start your minikube cluster with one control plane node using the following command:
-
-```bash
-minikube start --network-plugin=cni
-```
-
-Install {{prodname}}.
-
-```bash
-kubectl apply -f {{manifestsUrl}}/manifests/calico.yaml
-```
-
-
-
-
-### Verify {{prodname}} installation
-
-Verify {{prodname}} installation in your cluster using the following command:
-
-```bash
-watch kubectl get pods -l k8s-app=calico-node -A
-```
-
-You should see a result similar to the below. Note that the namespace might be different, depending on the method you followed.
-
-```
-NAMESPACE NAME READY STATUS RESTARTS AGE
-kube-system calico-node-mlqvs 1/1 Running 0 5m18s
-```
-
-Use `ctrl+c` to break out of watch.
-
-Congratulations you now have a minikube cluster equipped with {{prodname}}
-
-### Add an additional worker node
-
-:::note
-
-This as an optional step, you can safely skip this step if you do not require an additional worker node.
-
-:::
-
-```bash
-minikube node add
-```
-
-Verify nodes using the following command:
-
-```bash
-kubectl get nodes
-```
-
-### Clean up
-
-Delete the cluster using the following command:
-
-```bash
-minikube delete
-```
-
-## Next steps
-
-**Required**
-
-- [Install and configure calicoctl](../../operations/calicoctl/install.mdx)
-
-**Recommended tutorials**
-
-- [Secure a simple application using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx)
-- [Control ingress and egress traffic using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx)
-- [Run a tutorial that shows blocked and allowed connections in real time](../../network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/openshift/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/openshift/index.mdx
deleted file mode 100644
index b6bf7d2303..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/openshift/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Install Calico on OpenShift for networking and network policy.
-hide_table_of_contents: true
----
-
-# OpenShift
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/openshift/installation.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/openshift/installation.mdx
deleted file mode 100644
index 219bbc3a25..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/openshift/installation.mdx
+++ /dev/null
@@ -1,187 +0,0 @@
----
-description: Install Calico on an OpenShift 4 cluster.
----
-
-# Install an OpenShift 4 cluster with Calico
-
-## Big picture
-
-Install an OpenShift 4 cluster with {{prodname}}.
-
-## Value
-
-Augments the applicable steps in the [OpenShift documentation](https://cloud.redhat.com/openshift/install)
-to install {{prodname}}.
-
-## How to
-
-### Before you begin
-
-- Ensure that your environment meets the {{prodname}} [system requirements](requirements.mdx).
-
-- Ensure that you have a [RedHat account](https://cloud.redhat.com/). A RedHat account is required to get the pull secret necessary to provision an OpenShift cluster. Note that the OpenShift installer supports a subset of AWS regions.
-
-- If installing on AWS, ensure that you have:
-
- - Configured an AWS account appropriate for OpenShift 4
- - [Set up your AWS credentials](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/setup-credentials.html)
- - Generated a local SSH private key and added it to your ssh-agent
-
-### Create a configuration file for the OpenShift installer
-
-First, create a staging directory for the installation. This directory will contain the configuration file, along with cluster state files, that OpenShift installer will create:
-
-```
-mkdir openshift-tigera-install && cd openshift-tigera-install
-```
-
-Now run OpenShift installer to create a default configuration file:
-
-```
-openshift-install create install-config
-```
-
-:::note
-
-Refer to the [OpenShift installer documentation](https://cloud.redhat.com/openshift/install) for more information
-about the installer and any configuration changes required for your platform.
-
-:::
-
-Once the installer has finished, your staging directory will contain the configuration file `install-config.yaml`.
-
-### Update the configuration file to use {{prodname}}
-
-Override the OpenShift networking to use Calico and update the AWS instance types to meet the [system requirements](requirements.mdx):
-
-```bash
-sed -i 's/\(OpenShiftSDN\|OVNKubernetes\)/Calico/' install-config.yaml
-```
-
-### Generate the install manifests
-
-Now generate the Kubernetes manifests using your configuration file:
-
-```bash
-openshift-install create manifests
-```
-
-
-
-### Optionally provide additional configuration
-
-You may want to provide Calico with additional configuration at install-time. For example, BGP configuration or peers.
-You can use a Kubernetes ConfigMap with your desired Calico resources to set configuration as part of the installation.
-If you do not need to provide additional configuration, you can skip this section.
-
-To include [Calico resources](../../../reference/resources/index.mdx) during installation, edit `manifests/02-configmap-calico-resources.yaml` to add your own configuration.
-
-:::note
-
-If you have a directory with the Calico resources, you can create the file with the command:
-
-```
-oc create configmap -n tigera-operator calico-resources \
---from-file= --dry-run -o yaml \
-manifests/02-configmap-calico-resources.yaml
-```
-
-With recent versions of oc it is necessary to have a kubeconfig configured or add `--server='127.0.0.1:443'`
-even though it is not used.
-
-:::
-
-:::note
-
-If you have provided a `calico-resources` configmap and the tigera-operator pod fails to come up with `Init:CrashLoopBackOff`,
-check the output of the init-container with `oc logs -n tigera-operator -l k8s-app=tigera-operator -c create-initial-resources`.
-
-:::
-
-### Create the cluster
-
-Start the cluster creation with the following command and wait for it to complete.
-
-```bash
-openshift-install create cluster
-```
-
-Once the above command is complete, you can verify {{prodname}} is installed by verifying the components are available with the following command.
-
-```
-oc get tigerastatus
-```
-
-:::note
-
-To get more information, add `-o yaml` to the above command.
-
-:::
-
-### Optionally integrate with Operator Lifecycle Manager (OLM)
-
-In OpenShift Container Platform, the [Operator Lifecycle Manager](https://docs.openshift.com/container-platform/4.4/operators/understanding_olm/olm-understanding-olm.html#olm-overview_olm-understanding-olm) helps
-cluster administrators manage the lifecycle of operators in their cluster. Managing the {{prodname}}
-operator with OLM gives administrators a single place to manage operators.
-
-To register the running {{prodname}} operator with OLM, first you will need to create an OperatorGroup for the operator:
-
-```bash
-oc apply -f - <
-
-## OpenShift requirements
-
-{{prodname}} supports the [OpenShift Container Platform](https://docs.openshift.com/).
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/quickstart.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/quickstart.mdx
deleted file mode 100644
index e3067b56a8..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/quickstart.mdx
+++ /dev/null
@@ -1,169 +0,0 @@
----
-description: Install Calico on a single-host Kubernetes cluster for testing or development in under 15 minutes.
----
-
-# Quickstart for Calico on Kubernetes
-
-## Big picture
-
-This quickstart gets you a single-host Kubernetes cluster with {{prodname}} in approximately 15 minutes.
-
-## Value
-
-Use this quickstart to quickly and easily try {{prodname}} features. To deploy a cluster suitable for production, refer to [{{prodname}} on Kubernetes](../kubernetes/index.mdx).
-
-## Before you begin
-
-**Required**
-
-- A Linux host that meets the following requirements:
-
- - x86-64, arm64, ppc64le, or s390x processor
- - 2CPU
- - 2GB RAM
- - 10GB free disk space
- - RedHat Enterprise Linux 7.x+, CentOS 7.x+, Ubuntu 18.04+, or Debian 9.x+
-
-- {{prodname}} can manage `cali` and `tunl` interfaces on the host
-
- If NetworkManager is present on the host, see [Configure NetworkManager](../../operations/troubleshoot/troubleshooting.mdx#configure-networkmanager).
-
-## Concepts
-
-### Operator based installation
-
-This quickstart guide uses the Tigera operator to install {{prodname}}. The operator provides lifecycle management for Calico
-exposed via the Kubernetes API defined as a custom resource definition.
-
-:::note
-
-It is also possible to install Calico without an operator using Kubernetes manifests directly.
-For platforms and guides that do not use the Tigera operator, you may notice some differences in the steps and Kubernetes
-resources compared to those presented in this guide.
-
-:::
-
-## How to
-
-The geeky details of what you get:
-
-
-
-### Create a single-host Kubernetes cluster
-
-1. [Follow the Kubernetes instructions to install kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/)
-
- :::note
-
- After installing kubeadm, do not power down or restart
- the host. Instead, continue directly to the next step.
-
- :::
-
-1. As a regular user with sudo privileges, open a terminal on the host that you installed kubeadm on.
-
-1. Initialize the control plane using the following command.
-
- ```
- sudo kubeadm init --pod-network-cidr=192.168.0.0/16
- ```
-
- :::note
-
- If 192.168.0.0/16 is already in use within your network you must select a different pod network
- CIDR, replacing 192.168.0.0/16 in the above command.
-
- :::
-
-1. Execute the following commands to configure kubectl (also returned by `kubeadm init`).
-
- ```
- mkdir -p $HOME/.kube
- sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
- sudo chown $(id -u):$(id -g) $HOME/.kube/config
- ```
-
-### Install {{prodname}}
-
-1. Install the Tigera {{prodname}} operator and custom resource definitions.
-
- ```
- kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml
- ```
-
- :::note
-
- Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`.
-
- :::
-
-1. Install {{prodname}} by creating the necessary custom resource. For more information on configuration options available in this manifest, see [the installation reference](../../reference/installation/api.mdx).
-
- ```
- kubectl create -f {{manifestsUrl}}/manifests/custom-resources.yaml
- ```
-
- :::note
-
- Before creating this manifest, read its contents and make sure its settings are correct for your environment. For example,
- you may need to change the default IP pool CIDR to match your pod network CIDR.
-
- :::
-
-1. Confirm that all of the pods are running with the following command.
-
- ```
- watch kubectl get pods -n calico-system
- ```
-
- Wait until each pod has the `STATUS` of `Running`.
-
- :::note
-
- The Tigera operator installs resources in the `calico-system` namespace. Other install methods may use
- the `kube-system` namespace instead.
-
- :::
-
-1. Remove the taints on the control plane so that you can schedule pods on it.
-
- ```bash
- kubectl taint nodes --all node-role.kubernetes.io/control-plane-
- kubectl taint nodes --all node-role.kubernetes.io/master-
- ```
-
- It should return the following.
-
- ```
- node/ untainted
- ```
-
-1. Confirm that you now have a node in your cluster with the following command.
-
- ```
- kubectl get nodes -o wide
- ```
-
- It should return something like the following.
-
- ```
- NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
- Ready master 52m v1.12.2 10.128.0.28 Ubuntu 18.04.1 LTS 4.15.0-1023-gcp docker://18.6.1
- ```
-
-Congratulations! You now have a single-host Kubernetes cluster with {{prodname}}.
-
-## Next steps
-
-**Required**
-
-- [Install and configure calicoctl](../../operations/calicoctl/install.mdx)
-
-**Recommended tutorials**
-
-- [Secure a simple application using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx)
-- [Control ingress and egress traffic using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx)
-- [Run a tutorial that shows blocked and allowed connections in real time](../../network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/rancher.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/rancher.mdx
deleted file mode 100644
index 9a4bc35c62..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/rancher.mdx
+++ /dev/null
@@ -1,96 +0,0 @@
----
-description: Install Calico on a Rancher Kubernetes Engine cluster.
----
-
-# Install Calico on a Rancher Kubernetes Engine cluster
-
-## Big picture
-
-Install {{prodname}} as the required CNI for networking and/or network policy on Rancher-deployed clusters.
-
-## Concepts
-
-{{prodname}} supports the Calico CNI with Calico network policy:
-
-The geeky details of what you get:
-
-
-
-## Before you begin
-
-**Required**
-
-- A compatible [Rancher Kubernetes Engine cluster](https://rancher.com/docs/rke/latest/en/) with version 1.3
-
- - Configure your cluster with a [Cluster Config File](https://rancher.com/docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) and specify [no network plugin](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/) by setting `plugin: none` under `network` in your configuration file.
-
-- RKE cluster meets the [{{prodname}} requirements](requirements.mdx)
-
-- A `kubectl` environment with access to your cluster
-
- - Use [Rancher kubectl Shell](https://rancher.com/docs/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/) for access
- - Ensure you have the [Kubeconfig file that was generated when you created the cluster](https://rancher.com/docs/rke/latest/en/installation/#save-your-files).
-
-- If using a Kubeconfig file locally, [install and set up the Kubectl CLI tool](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
-
-## How to
-
-- [Install {{prodname}}](#install-calico)
-
-### Install {{prodname}}
-
-1. Install the Tigera {{prodname}} operator and custom resource definitions.
-
- ```
- kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml
- ```
-
- :::note
-
- Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`.
-
- :::
-
-1. Install {{prodname}} by creating the necessary custom resource. For more information on configuration options available in this manifest, see [the installation reference](../../reference/installation/api.mdx).
-
- ```
- kubectl create -f {{manifestsUrl}}/manifests/custom-resources.yaml
- ```
-
- :::note
-
- Before creating this manifest, read its contents and make sure its settings are correct for your environment. For example,
- you may need to change the default IP pool CIDR to match your pod network CIDR. Rancher uses `10.42.0.0/16` by default.
-
- :::
-
- :::note
-
- If you are installing {{prodname}} on Windows nodes in this cluster, please see the [{{prodnameWindows}} for RKE](windows-calico/kubernetes/rancher.mdx) installation instructions.
-
- :::
-
-1. Confirm that all of the pods are running with the following command.
-
- ```
- watch kubectl get pods -n calico-system
- ```
-
- Wait until each pod has the `STATUS` of `Running`.
-
-Congratulations! You now have an RKE cluster running {{prodname}}
-
-## Next steps
-
-**Required**
-
-- [Install and configure calicoctl](../../operations/calicoctl/install.mdx)
-
-**Recommended tutorials**
-
-- [Secure a simple application using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx)
-- [Control ingress and egress traffic using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx)
-- [Run a tutorial that shows blocked and allowed connections in real time](../../network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/requirements.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/requirements.mdx
deleted file mode 100644
index 690cda00ec..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/requirements.mdx
+++ /dev/null
@@ -1,72 +0,0 @@
----
-description: Review requirements before installing Calico to ensure success.
----
-
-# System requirements
-
-
-
-## Kubernetes requirements
-
-#### Supported versions
-
-We test {{prodname}} {{version}} against the following Kubernetes versions. Other versions may work, but we are not actively testing them.
-
-- v1.23
-- v1.24
-- v1.25
-- v1.26
-- v1.27
-- v1.28
-
-Due to changes in the Kubernetes API, {{prodname}} {{version}} will not work
-on Kubernetes v1.15 or below. v1.16-v1.18 may work, but they are no longer tested.
-Newer versions may also work, but we recommend upgrading to a version of {{prodname}}
-that is tested against the newer Kubernetes version.
-
-#### CNI plug-in enabled
-
-For Kubernetes 1.24 or later, {{prodname}} must be installed as a CNI plugin in the container runtime.
-
-This installation must use the Kubernetes default CNI configuration directory (`/etc/cni/net.d`) and binary directory (`/opt/cni/bin`).
-
-For Kubernetes 1.23 or earlier, the kubelet must be configured to use CNI networking by passing the `--network-plugin=cni` argument.
-(On kubeadm, this is the default.)
-
-#### Other network providers
-
-Generally, you cannot use {{prodname}} together with another network provider.
-
-Notable exceptions include the following:
-
-* [flannel](flannel/index.mdx)
-* Platform-specific CNIs, such as the [AWS VPC CNI](https://github.com/aws/amazon-vpc-cni-k8s/blob/master/README.md) and [Azure VNET CNI](https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md) plugins.
-
-If you're working with a cluster that already uses another CNI, you cannot migrate to {{prodname}}.
-
-#### Supported kube-proxy modes
-
-{{prodname}} supports the following kube-proxy modes:
-
-- `iptables` (default)
-- `ipvs` Requires Kubernetes ≥ v1.9.3. Refer to
- [Use IPVS kube-proxy](../../networking/configuring/use-ipvs.mdx) for more details.
-
-#### IP pool configuration
-
-The IP range selected for pod IP addresses cannot overlap with any other
-IP ranges in your network, including:
-
-- The Kubernetes service cluster IP range
-- The range from which host IPs are allocated
-
-## Application layer policy requirements
-
-- [MutatingAdmissionWebhook](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook) enabled
-- Istio [v1.9](https://istio.io/v1.9/) or [v1.10](https://archive.istio.io/v1.10/)
-
-Note that Kubernetes version 1.16+ requires Istio version 1.2 or greater.
-Note that Istio version 1.9 requires Kubernetes version 1.17-1.20.
-Note that Istio version 1.10 is supported on Kubernetes version 1.18-1.21, but has been tested on Kubernetes version 1.22.
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/config-options.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/config-options.mdx
deleted file mode 100644
index 7b4ff81146..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/config-options.mdx
+++ /dev/null
@@ -1,355 +0,0 @@
----
-description: Optionally customize Calico prior to installation.
----
-
-# Customize Calico configuration
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Perform common customizations of a {{prodname}} installation.
-
-## Concepts
-
-### {{prodname}} operator
-
-{{prodname}} is installed by an operator which manages the installation, upgrade, and general lifecycle of a {{prodname}} cluster. The operator is
-installed directly on the cluster as a Deployment, and is configured through one or more custom Kubernetes API resources.
-
-### {{prodname}} manifests
-
-{{prodname}} can also be installed using raw manifests as an alternative to the operator. The manifests contain the necessary resources for installing {{prodname}} on each node in your Kubernetes cluster. Using manifests is not recommended as they cannot automatically manage the lifecycle of the {{prodname}} as the operator does. However, manifests may be useful for clusters that require highly specific modifications to the underlying Kubernetes resources.
-
-## How to
-
-
-
-
-### About customizing an operator install
-
-Operator installations read their configuration from a specific set of Kubernetes APIs. These APIs are installed on the cluster
-as part of `tigera-operator.yaml` in the `operator.tigera.io/v1` API group.
-
-- [Installation](../../../reference/installation/api.mdx#operator.tigera.io/v1.Installation): a singleton resource with name "default" that
- configures common installation parameters for a {{prodname}} cluster.
-- [APIServer](../../../reference/installation/api.mdx#operator.tigera.io/v1.Installation): a singleton resource with name "default" that
- configures installation of the {{prodname}} API server extension.
-
-### Configure the pod IP range
-
-For many environments, {{prodname}} will auto-detect the correct pod IP range to use, or select an unused range on the cluster.
-
-You can select a specific pod IP range by modifying the `spec.calicoNetwork.ipPools` array in the Installation API resource.
-
-```yaml
-kind: Installation
-apiVersion: operator.tigera.io/v1
-metadata:
- name: default
-spec:
- calicoNetwork:
- ipPools:
- - cidr: 198.51.100.0/24
-```
-
-:::note
-
-the ipPools array can take at most one IPv4 and one IPv6 CIDR, and only takes effect when installing {{prodname}} for the first
-time on a given cluster. To add additional pools, see [the IPPool API](../../../reference/resources/ippool.mdx).
-
-:::
-
-### Use VXLAN
-
-You can enable VXLAN in a cluster by setting the option on your IPv4 pool. You can also disable BGP via the `spec.calicoNetwork.bgp` field.
-
-```yaml
-kind: Installation
-apiVersion: operator.tigera.io/v1
-metadata:
- name: default
-spec:
- calicoNetwork:
- bgp: Disabled
- ipPools:
- - cidr: 198.51.100.0/24
- encapsulation: VXLAN
-```
-
-
-
-
-We provide a number of manifests to make deployment of {{prodname}} easy. You can optionally
-modify the manifests before applying them. Or you can modify the manifest and reapply it to change
-settings as needed.
-
-### About customizing {{prodname}} manifests
-
-Each manifest contains all the necessary resources for installing {{prodname}}
-on each node in your Kubernetes cluster.
-
-It installs the following Kubernetes resources:
-
-- Installs the `{{nodecontainer}}` container on each host using a DaemonSet.
-- Installs the {{prodname}} CNI binaries and network config on each host using a DaemonSet.
-- Runs `calico/kube-controllers` as a deployment.
-- The `calico-etcd-secrets` secret, which optionally allows for providing etcd TLS assets.
-- The `calico-config` ConfigMap, which contains parameters for configuring the install.
-
-The sections that follow discuss the configurable parameters in greater depth.
-
-### Configure the pod IP range
-
-{{prodname}} IPAM assigns IP addresses from [IP pools](../../../reference/resources/ippool.mdx).
-
-To change the default IP range used for pods, modify the `CALICO_IPV4POOL_CIDR`
-section of the `calico.yaml` manifest. For more information, see
-[Configuring {{nodecontainer}}](../../../reference/configure-calico-node.mdx).
-
-## Configuring IP-in-IP
-
-By default, the manifests enable IP-in-IP encapsulation across subnets. Many users may
-want to disable IP-in-IP encapsulation, such as under the following circumstances.
-
-- Their cluster is [running in a properly configured AWS VPC](../../../reference/public-cloud/aws.mdx).
-- All their Kubernetes nodes are connected to the same layer 2 network.
-- They intend to use BGP peering to make their underlying infrastructure aware of
- pod IP addresses.
-
-To disable IP-in-IP encapsulation, modify the `CALICO_IPV4POOL_IPIP` section of the
-manifest. For more information, see [Configuring {{nodecontainer}}](../../../reference/configure-calico-node.mdx).
-
-## Switching from IP-in-IP to VXLAN
-
-By default, the Calico manifests enable IP-in-IP encapsulation. If you are on a network that blocks IP-in-IP, such
-as Azure, you may wish to switch to [Calico's VXLAN encapsulation mode](../../../networking/configuring/vxlan-ipip.mdx).
-To do this at install time (so that Calico creates the default IP pool with VXLAN and no IP-in-IP configuration has to
-be undone):
-
-- Start with one of the [Calico for policy and networking](config-options.mdx) manifests.
-- Replace environment variable name `CALICO_IPV4POOL_IPIP` with`CALICO_IPV4POOL_VXLAN`. Leave the value of the new variable as "Always".
-- Optionally, (to save some resources if you're running a VXLAN-only cluster) completely disable Calico's BGP-based
- networking:
- - Replace `calico_backend: "bird"` with `calico_backend: "vxlan"`. This disables BIRD.
- - Comment out the line `- -bird-ready` and `- -bird-live` from the calico/node readiness/liveness check (otherwise disabling BIRD will cause the
- readiness/liveness check to fail on every node):
-
-```yaml
-livenessProbe:
- exec:
- command:
- - /bin/calico-node
- - -felix-live
- - -bird-live
-readinessProbe:
- exec:
- command:
- - /bin/calico-node
- - -bird-ready
- - -felix-ready
-```
-
-For more information on {{nodecontainer}}'s configuration variables, including additional VXLAN settings, see
-[Configuring {{nodecontainer}}](../../../reference/configure-calico-node.mdx).
-
-:::note
-
-The `CALICO_IPV4POOL_VXLAN` environment variable only takes effect when the first {{nodecontainer}} to start
-creates the default IP pool. It has no effect after the pool has already been created. To switch to VXLAN mode
-after installation time, use calicoctl to modify the [IPPool](../../../reference/resources/ippool.mdx) resource.
-
-:::
-
-## Configuring etcd
-
-By default, these manifests do not configure secure access to etcd and assume an
-etcd proxy is running on each host. The following configuration options let you
-specify custom etcd cluster endpoints as well as TLS.
-
-The following table outlines the supported `ConfigMap` options for etcd:
-
-| Option | Description | Default |
-| -------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- |
-| etcd_endpoints | Comma-delimited list of etcd endpoints to connect to. | http://127.0.0.1:2379 |
-| etcd_ca | The file containing the root certificate of the CA that issued the etcd server certificate. Configures `{{nodecontainer}}`, the CNI plugin, and the Kubernetes controllers to trust the signature on the certificates provided by the etcd server. | None |
-| etcd_key | The file containing the private key of the `{{nodecontainer}}`, the CNI plugin, and the Kubernetes controllers client certificate. Enables these components to participate in mutual TLS authentication and identify themselves to the etcd server. | None |
-| etcd_cert | The file containing the client certificate issued to `{{nodecontainer}}`, the CNI plugin, and the Kubernetes controllers. Enables these components to participate in mutual TLS authentication and identify themselves to the etcd server. | None |
-
-To use these manifests with a TLS-enabled etcd cluster you must do the following:
-
-1. Download the {{version}} manifest that corresponds to your installation method.
-
- **{{prodname}} for policy and networking**
-
- ```bash
- curl {{manifestsUrl}}/manifests/calico-etcd.yaml -O
- ```
-
- **{{prodname}} for policy and flannel for networking**
-
- ```bash
- curl {{manifestsUrl}}/manifests/canal.yaml -O
- ```
-
-1. Within the `ConfigMap` section, uncomment the `etcd_ca`, `etcd_key`, and `etcd_cert`
- lines so that they look as follows.
-
- ```yaml
- etcd_ca: '/calico-secrets/etcd-ca'
- etcd_cert: '/calico-secrets/etcd-cert'
- etcd_key: '/calico-secrets/etcd-key'
- ```
-
-1. Ensure that you have three files, one containing the `etcd_ca` value, another containing
- the `etcd_key` value, and a third containing the `etcd_cert` value.
-
-1. Using a command like the following to strip the newlines from the files and
- base64-encode their contents.
-
- ```bash
- cat | base64 -w 0
- ```
-
-1. In the `Secret` named `calico-etcd-secrets`, uncomment `etcd_ca`, `etcd_key`, and `etcd_cert`
- and paste in the appropriate base64-encoded values.
-
- ```yaml
- apiVersion: v1
- kind: Secret
- type: Opaque
- metadata:
- name: calico-etcd-secrets
- namespace: kube-system
- data:
- # Populate the following files with etcd TLS configuration if desired, but leave blank if
- # not using TLS for etcd.
- # This self-hosted install expects three files with the following names. The values
- # should be base64 encoded strings of the entire contents of each file.
- etcd-key: LS0tLS1CRUdJTiB...VZBVEUgS0VZLS0tLS0=
- etcd-cert: LS0tLS1...ElGSUNBVEUtLS0tLQ==
- etcd-ca: LS0tLS1CRUdJTiBD...JRklDQVRFLS0tLS0=
- ```
-
-1. Apply the manifest.
-
- **{{prodname}} for policy and networking**
-
- ```bash
- kubectl apply -f calico.yaml
- ```
-
- **{{prodname}} for policy and flannel for networking**
-
- ```bash
- kubectl apply -f canal.yaml
- ```
-
-## Authorization options
-
-{{prodname}}'s manifests assign its components one of two service accounts.
-Depending on your cluster's authorization mode, you'll want to back these
-service accounts with the necessary permissions.
-
-## Other configuration options
-
-The following table outlines the remaining supported `ConfigMap` options.
-
-| Option | Description | Default |
-| ------------------ | --------------------------------------------------------------------------------------- | ------- |
-| calico_backend | The backend to use. | `bird` |
-| cni_network_config | The CNI Network config to install on each node. Supports templating as described below. |
-
-## CNI network configuration template
-
-The `cni_network_config` configuration option supports the following template fields, which will
-be filled in automatically by the `calico/cni` container:
-
-| Field | Substituted with |
-| ----------------------------- | -------------------------------------------------------------------------------------------------------------------- |
-| `__KUBERNETES_SERVICE_HOST__` | The Kubernetes service Cluster IP, e.g `10.0.0.1` |
-| `__KUBERNETES_SERVICE_PORT__` | The Kubernetes service port, e.g., `443` |
-| `__SERVICEACCOUNT_TOKEN__` | The service account token for the namespace, if one exists. |
-| `__ETCD_ENDPOINTS__` | The etcd endpoints specified in `etcd_endpoints`. |
-| `__KUBECONFIG_FILEPATH__` | The path to the automatically generated kubeconfig file in the same directory as the CNI network configuration file. |
-| `__ETCD_KEY_FILE__` | The path to the etcd key file installed to the host. Empty if no key is present. |
-| `__ETCD_CERT_FILE__` | The path to the etcd certificate file installed to the host, empty if no cert present. |
-| `__ETCD_CA_CERT_FILE__` | The path to the etcd certificate authority file installed to the host. Empty if no certificate authority is present. |
-
-## About customizing application layer policy manifests
-
-Instead of installing from our pre-modified Istio manifests, you may wish to
-customize your Istio install or use a different Istio version. This section
-walks you through the necessary changes to a generic Istio install manifest to
-allow application layer policy to operate.
-
-The standard Istio manifests for the sidecar injector include a ConfigMap that
-contains the template used when adding pods to the cluster. The template adds an
-init container and the Envoy sidecar. Application layer policy requires
-an additional lightweight sidecar called Dikastes which receives {{prodname}} policy
-from Felix and applies it to incoming connections and requests.
-
-If you haven't already done so, download an
-[Istio release](https://github.com/istio/istio/releases) and untar it to a
-working directory.
-
-Open the `install/kubernetes/istio-demo-auth.yaml` file in an
-editor, and locate the `istio-sidecar-injector` ConfigMap. In the existing `istio-proxy` container, add a new `volumeMount`.
-
-```yaml
-- mountPath: /var/run/dikastes
- name: dikastes-sock
-```
-
-Add a new container to the template.
-
-```yaml
- - name: dikastes
- image: {{registry}}{{imageNames.calico/dikastes}}:{{releases.0.components.calico/dikastes.version}}
- args: ["server", "-l", "/var/run/dikastes/dikastes.sock", "-d", "/var/run/felix/nodeagent/socket"]
- securityContext:
- allowPrivilegeEscalation: false
- livenessProbe:
- exec:
- command:
- - /healthz
- - liveness
- initialDelaySeconds: 3
- periodSeconds: 3
- readinessProbe:
- exec:
- command:
- - /healthz
- - readiness
- initialDelaySeconds: 3
- periodSeconds: 3
- volumeMounts:
- - mountPath: /var/run/dikastes
- name: dikastes-sock
- - mountPath: /var/run/felix
- name: felix-sync
-```
-
-Add two new volumes.
-
-```yaml
-- name: dikastes-sock
- emptyDir:
- medium: Memory
-- name: felix-sync
- csi:
- driver: 'csi.tigera.io'
-```
-
-The volumes you added are used to create Unix domain sockets that allow
-communication between Envoy and Dikastes and between Dikastes and
-Felix. Once created, a Unix domain socket is an in-memory communications
-channel. The volumes are not used for any kind of stateful storage on disk.
-
-Refer to the
-[Calico ConfigMap manifest]({{manifestsUrl}}/manifests/alp/istio-inject-configmap-1.4.2.yaml) for an
-example with the above changes.
-
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/index.mdx
deleted file mode 100644
index 74a3bde624..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Install Calico for on-premises deployments to provide networking and network policy, in either overlay or non-overlay networking modes.
-hide_table_of_contents: true
----
-
-# Self-managed on-premises
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/onpremises.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/onpremises.mdx
deleted file mode 100644
index b81b533088..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/onpremises.mdx
+++ /dev/null
@@ -1,230 +0,0 @@
----
-description: Install Calico networking and network policy for on-premises deployments.
----
-
-# Install Calico networking and network policy for on-premises deployments
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Install {{prodname}} to provide both networking and network policy for self-managed on-premises deployments.
-
-## Value
-
-**{{prodname}} networking** and **network policy** are a powerful choice for a CaaS implementation. If you have the networking infrastructure and resources to manage Kubernetes on-premises, installing the full {{prodname}} product provides the most customization and control.
-
-## Concepts
-
-### {{prodname}} operator
-
-{{prodname}} is installed by an operator which manages the installation, upgrade, and general lifecycle of a {{prodname}} cluster. The operator is
-installed directly on the cluster as a Deployment, and is configured through one or more custom Kubernetes API resources.
-
-### {{prodname}} manifests
-
-{{prodname}} can also be installed using raw manifests as an alternative to the operator. The manifests contain the necessary resources for installing {{prodname}} on each node in your Kubernetes cluster. Using manifests is not recommended as they cannot automatically manage the lifecycle of the {{prodname}} as the operator does. However, manifests may be useful for clusters that require highly specific modifications to the underlying Kubernetes resources.
-
-## Before you begin...
-
-- Ensure that your Kubernetes cluster meets [requirements](../requirements.mdx).
- If you do not have a cluster, see [Installing kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/).
-
-## How to
-
-- [Install Calico](#install-calico)
-
-### Install Calico
-
-
-
-
-1. Install the operator on your cluster.
-
- ```bash
- kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml
- ```
-
-1. Download the custom resources necessary to configure {{prodname}}
-
- ```bash
- curl {{manifestsUrl}}/manifests/custom-resources.yaml -O
- ```
-
- If you wish to customize the {{prodname}} install, customize the downloaded custom-resources.yaml manifest locally.
-
-1. Create the manifest to install {{prodname}}.
-
- ```bash
- kubectl create -f custom-resources.yaml
- ```
-
-1. Verify {{prodname}} installation in your cluster.
-
- ```
- watch kubectl get pods -n calico-system
- ```
-
- You should see a result similar to the below.
-
- ```
- NAMESPACE NAME READY STATUS RESTARTS AGE
- kube-system calico-node-txngh 1/1 Running 0 54s
- ```
-
-
-
-
-
-
-Based on your datastore and number of nodes, select a link below to install {{prodname}}.
-
-:::note
-
-The option, **Kubernetes API datastore, more than 50 nodes** provides scaling using [Typha daemon](../../../reference/typha/index.mdx). Typha is not included for etcd because etcd already handles many clients so using Typha is redundant and not recommended.
-
-:::
-
-- [Install Calico with Kubernetes API datastore, 50 nodes or less](#install-calico-with-kubernetes-api-datastore-50-nodes-or-less)
-- [Install Calico with Kubernetes API datastore, more than 50 nodes](#install-calico-with-kubernetes-api-datastore-more-than-50-nodes)
-- [Install Calico with etcd datastore](#install-calico-with-etcd-datastore)
-
-#### Install Calico with Kubernetes API datastore, 50 nodes or less
-
-1. Download the {{prodname}} networking manifest for the Kubernetes API datastore.
-
- ```bash
- curl {{manifestsUrl}}/manifests/calico.yaml -O
- ```
-
-1. If you are using pod CIDR `192.168.0.0/16`, skip to the next step.
- If you are using a different pod CIDR with kubeadm, no changes are required — Calico will automatically detect the CIDR based on the running configuration.
- For other platforms, make sure you uncomment the CALICO_IPV4POOL_CIDR variable in the manifest and set it to the same value as your chosen pod CIDR.
-1. Customize the manifest as necessary.
-1. Apply the manifest using the following command.
-
- ```bash
- kubectl apply -f calico.yaml
- ```
-
-The geeky details of what you get:
-
-
-
-#### Install Calico with Kubernetes API datastore, more than 50 nodes
-
-1. Download the {{prodname}} networking manifest for the Kubernetes API datastore.
-
- ```bash
- curl {{manifestsUrl}}/manifests/calico-typha.yaml -o calico.yaml
- ```
-
-1. If you are using pod CIDR `192.168.0.0/16`, skip to the next step.
- If you are using a different pod CIDR with kubeadm, no changes are required — Calico will automatically detect the CIDR based on the running configuration.
- For other platforms, make sure you uncomment the CALICO_IPV4POOL_CIDR variable in the manifest and set it to the same value as your chosen pod CIDR.
-1. Modify the replica count to the desired number in the `Deployment` named, `calico-typha`.
-
- ```yaml noValidation
- apiVersion: apps/v1beta1
- kind: Deployment
- metadata:
- name: calico-typha
- ...
- spec:
- ...
- replicas:
- ```
-
- We recommend at least one replica for every 200 nodes, and no more than
- 20 replicas. In production, we recommend a minimum of three replicas to reduce
- the impact of rolling upgrades and failures. The number of replicas should
- always be less than the number of nodes, otherwise rolling upgrades will stall.
- In addition, Typha only helps with scale if there are fewer Typha instances than
- there are nodes.
-
- :::note
-
- If you set `typha_service_name` and set the Typha deployment replica
- count to 0, Felix will not start.
-
- :::
-
-1. Customize the manifest if desired.
-1. Apply the manifest.
-
- ```bash
- kubectl apply -f calico.yaml
- ```
-
-The geeky details of what you get:
-
-
-
-#### Install Calico with etcd datastore
-
-:::note
-
-The **etcd** database is not recommended for new installs. However, it is an option if you are running {{prodname}} as the network plugin for both OpenStack and Kubernetes.
-
-:::
-
-1. Download the {{prodname}} networking manifest for etcd.
-
- ```bash
- curl {{manifestsUrl}}/manifests/calico-etcd.yaml -o calico.yaml
- ```
-
-1. If you are using pod CIDR `192.168.0.0/16`, skip to the next step.
- If you are using a different pod CIDR with kubeadm, no changes are required — Calico will automatically detect the CIDR based on the running configuration.
- For other platforms, make sure you uncomment the CALICO_IPV4POOL_CIDR variable in the manifest and set it to the same value as your chosen pod CIDR.
-1. In the `ConfigMap` named, `calico-config`, set the value of `etcd_endpoints` to the IP address and port of your etcd server.
- :::note
-
- You can specify more than one `etcd_endpoint` using commas as delimiters.
-
- :::
-
-1. Customize the manifest if desired.
-1. Apply the manifest using the following command.
-
- ```bash
- kubectl apply -f calico.yaml
- ```
-
-The geeky details of what you get:
-
-
-
-
-
-
-## Next steps
-
-**Required**
-
-- [Install and configure calicoctl](../../../operations/calicoctl/install.mdx)
-
-**Recommended - Networking**
-
-- If you are using the default BGP networking with full-mesh node-to-node peering with no encapsulation, go to [Configure BGP peering](../../../networking/configuring/bgp.mdx) to get traffic flowing between pods.
-- If you are unsure about networking options, or want to implement encapsulation (overlay networking), see [Determine best networking option](../../../networking/determine-best-networking.mdx).
-
-**Recommended - Security**
-
-- [Secure Calico component communications](../../../network-policy/comms/crypto-auth.mdx)
-- [Secure hosts by installing Calico on hosts](../../bare-metal/about.mdx)
-- [Secure pods with Calico network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx)
-- If you are using {{prodname}} with Istio service mesh, get started here: [Enable application layer policy](../../../network-policy/istio/app-layer-policy.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/aws.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/aws.mdx
deleted file mode 100644
index 53f91bf754..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/aws.mdx
+++ /dev/null
@@ -1,115 +0,0 @@
----
-description: Use Calico with a self-managed Kubernetes cluster in Amazon Web Services (AWS).
----
-
-# Self-managed Kubernetes in Amazon Web Services (AWS)
-
-## Big picture
-
-Use {{prodname}} with a self-managed Kubernetes cluster in Amazon Web Services (AWS).
-
-## Value
-
-Managing your own Kubernetes cluster (as opposed to using a managed-Kubernetes service like EKS), gives you the most flexibility in configuring {{prodname}} and Kubernetes. {{prodname}} combines flexible networking capabilities with "run-anywhere" security enforcement to provide a solution with native Linux kernel performance and true cloud-native scalability.
-
-## Concepts
-
-Kubernetes Operations (kops) is a cluster management tool that handles provisioning cluster VMs and installing Kubernetes. It has built-in support for using {{prodname}} as the Kubernetes networking provider.
-
-## Before you begin...
-
-- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
-- Install [AWS CLI tools](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html)
-
-:::note
-
-{{prodname}} makes use of the Kubernetes Container Storage Interface (CSI) to support various types of volumes. The necessary drivers required for CSI
-to function correctly in AWS clusters using EBS volumes may no longer be present by default in clusters running Kubernetes 1.23. Please check the documentation for the installer
-being used to ensure the necessary CSI drivers are installed.
-
-If using Kubernetes Operations (kops) as further down on this page please use the relevant linked kops documentation to ensure your cluster has the necessary configuration.
-
-:::
-
-## How to
-
-There are many ways to install and manage Kubernetes in AWS. Using Kubernetes Operations (kops) is a good default choice for most people, as it gives you access to all of {{prodname}}’s [flexible and powerful networking features](../../../networking/index.mdx). However, there are other options that may work better for your environment.
-
-- [Kubernetes Operations for Calico networking and network policy](#kubernetes-operations-for-calico-networking-and-network-policy)
-- [Other options and tools](#other-options-and-tools)
-
-### Kubernetes Operations for Calico networking and network policy
-
-To use kops to create a cluster with {{prodname}} networking and network policy:
-
-1. [Install kops](https://kops.sigs.k8s.io/install/) on your workstation.
-1. [Set up your environment for AWS](https://kops.sigs.k8s.io/getting_started/aws/) .
-1. Be sure to [set up an S3 state store](https://kops.sigs.k8s.io/getting_started/aws/#cluster-state-storage) and export its name:
-
- ```
- export KOPS_STATE_STORE=s3://name-of-your-state-store-bucket
- ```
-
-1. [Verify CSI driver installation configuration as per your particular cluster and volumes](https://kops.sigs.k8s.io/addons/#self-managed-aws-ebs-csi-driver)
-1. Configure kops to use {{prodname}} for networking.
- The easiest way to do this is to pass `--networking calico` to kops when creating the cluster. For example:
-
- ```
- kops create cluster \
- --zones us-west-2a \
- --networking calico \
- name-of-your-cluster
- ```
-
- Or, you can add `calico` to your cluster config. Run kops edit cluster and set the following networking configuration.
-
- ```yaml
- networking:
- calico: {}
- ```
-
-The geeky details of what you get:
-
-{' '}
-
-You can further customize the {{prodname}} install with [options listed in the kops documentation](https://kops.sigs.k8s.io/networking/#calico-example-for-cni-and-network-policy).
-
-### Other options and tools
-
-#### Amazon VPC CNI plugin
-
-As an alternative to {{prodname}} for both networking and network policy, you can use Amazon’s VPC CNI plugin for networking, and {{prodname}} for network policy. The advantage of this approach is that pods are assigned IP addresses associated with Elastic Network Interfaces on worker nodes. The IPs come from the VPC network pool and therefore do not require NAT to access resources outside the Kubernetes cluster.
-
-Set your kops cluster configuration to:
-
-```yaml
-networking:
- amazonvpc: {}
-```
-
-Then install {{prodname}} for network policy only after the cluster is up and ready.
-
-The geeky details of what you get:
-
-
-
-#### Kubespray
-
-[Kubespray](https://kubespray.io/) is a tool for provisioning and managing Kubernetes clusters with support for multiple clouds including Amazon Web Services. {{prodname}} is the default networking provider, or you can set the `kube_network_plugin` variable to `calico`. See the [Kubespray docs](https://kubespray.io/#/?id=network-plugins) for more details.
-
-## Next steps
-
-**Required**
-
-- [Install and configure calicoctl](../../../operations/calicoctl/install.mdx)
-
-**Recommended**
-
-- [Video: Everything you need to know about Kubernetes pod networking on AWS](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-pod-networking-on-aws/)
-- [Try out {{prodname}} network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/azure.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/azure.mdx
deleted file mode 100644
index d726c89186..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/azure.mdx
+++ /dev/null
@@ -1,87 +0,0 @@
----
-description: Use Calico with a self-managed Kubernetes cluster in Microsoft Azure.
----
-
-# Self-managed Kubernetes in Microsoft Azure
-
-## Big picture
-
-Use {{prodname}} with a self-managed Kubernetes cluster in Microsoft Azure.
-
-## Value
-
-Managing your own Kubernetes cluster (as opposed to using a managed-Kubernetes service like AKS), gives you the most flexibility in configuring {{prodname}} and Kubernetes. {{prodname}} combines flexible networking capabilities with "run-anywhere" security enforcement to provide a solution with native Linux kernel performance and true cloud-native scalability.
-
-## Concepts
-
-**aks-engine** is an open-source tool for creating and managing Kubernetes clusters in Microsoft Azure. It is the core technology for Microsoft’s Azure Kubernetes Service (AKS), but allows you to manage the cluster yourself.
-
-## Before you begin...
-
-- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
-- Install [Azure CLI tools](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)
-
-## How to
-
-There are many ways to install and manage Kubernetes in Azure. This guide shows how to use **aks-engine** to deploy a cluster with **Azure’s CNI plugin for networking** and **{{prodname}} for network policy enforcement**. The advantage of this approach is that pods are assigned IP addresses associated with Azure Network Interfaces on worker nodes. The IPs come from the VNET network pool and therefore do not require NAT to access resources outside the Kubernetes cluster. However, there are other options that may work better for your environment.
-
-- [aks-engine for Azure networking and Calico network policy](#aks-engine-for-azure-networking-and-calico-network-policy)
-- [Other options and tools](#other-options-and-tools)
-
-### aks-engine for Azure networking and Calico network policy
-
-[Install aks-engine](https://github.com/Azure/aks-engine/blob/master/docs/tutorials/quickstart.md#install-aks-engine) on your workstation.
-
-Before deploying, customize your cluster definition to use {{prodname}} for network policy. Add or modify the `kubernetesConfig` section to include the following (see the [aks-engine documentation](https://github.com/Azure/aks-engine/blob/master/docs/topics/clusterdefinitions.md#kubernetesconfig) for other Kubernetes configuration settings).
-
-```
-"kubernetesConfig": {
- "networkPlugin": "azure",
- "networkPolicy": "calico"
- }
-```
-
-Or, start with this [example cluster definition](https://github.com/Azure/aks-engine/blob/master/examples/networkpolicy/kubernetes-calico-azure.json) with these value already set, and customize to meet your needs.
-
-Then, [follow the aks-engine documentation to deploy your cluster](https://github.com/Azure/aks-engine/blob/master/docs/tutorials/quickstart.md#deploy), passing your cluster definition to `aks-engine deploy` via the `-m` flag.
-
-The geeky details of what you get:
-
-{' '}
-
-### Other options and tools
-
-#### {{prodname}} networking
-
-You can also deploy {{prodname}} for both networking and policy enforcement. In this mode, {{prodname}} uses a VXLAN-based overlay network that masks the IP addresses of the pods from the underlying Azure VNET. This can be useful in large deployments or when running multiple clusters and IP address space is a big concern.
-
-Unfortunately, aks-engine does not support this mode, so you must use a different tool chain to install and manage the cluster. Some options:
-
-- Use [Terraform](#terraform) to provision the Azure networks and VMs, then [kubeadm](#kubeadm) to install the Kubernetes cluster.
-- Use [Kubespray](#kubespray)
-
-### Terraform
-
-Terraform is a tool for automating infrastructure provisioning using declarative configurations. You can also go as far as automating the install of Docker, kubeadm, and Kubernetes using Terraform “provisioners.” See the [Terraform documentation](https://www.terraform.io/docs/index.html) for more details.
-
-#### kubeadm
-
-[kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/) is a command line tool for bootstrapping a Kubernetes cluster on top of already-provisioned compute resources, like VMs in a cloud or bare metal hosts. Unlike aks-engine which handles provisioning cloud resources, installing Kubernetes, and installing {{prodname}}, kubeadm only handles the second step of installing Kubernetes. You should proceed to install {{prodname}} after completing kubeadm install.
-
-#### Kubespray
-
-[Kubespray](https://kubespray.io/) is a tool for provisioning and managing Kubernetes clusters with support for multiple clouds including Azure. {{prodname}} is the default networking provider, or you can set the `kube_network_plugin` variable to `calico`. See the [Kubespray docs](https://kubespray.io/#/?id=network-plugins) for more details.
-
-## Next steps
-
-**Required**
-
-- [Install and configure calicoctl](../../../operations/calicoctl/install.mdx)
-
-**Recommended**
-
-- [Video: Everything you need to know about Kubernetes networking on Azure](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-azure/)
-- [Try out {{prodname}} network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/do.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/do.mdx
deleted file mode 100644
index e4ca6c6d78..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/do.mdx
+++ /dev/null
@@ -1,128 +0,0 @@
----
-description: Use Calico with a self-managed Kubernetes cluster in DigitalOcean (DO).
----
-
-# Self-managed Kubernetes in DigitalOcean (DO)
-
-## Big picture
-
-This tutorial creates a self-managed Kubernetes cluster (1 Master, 2 Worker nodes) using {{prodname}} networking in DigitalOcean.
-
-## Value
-
-Managing your own Kubernetes cluster (as opposed to using a managed-Kubernetes service like EKS), gives you the most flexibility in configuring {{prodname}} and Kubernetes. {{prodname}} combines flexible networking capabilities with "run-anywhere" security enforcement to provide a solution with native Linux kernel performance and true cloud-native scalability.
-
-## Concepts
-
-Kubernetes Operations (kops) is a cluster management tool that handles provisioning cluster VMs and installing Kubernetes. It has built-in support for using {{prodname}} as the Kubernetes networking provider.
-
-:::note
-
-Kops support for DigitalOcean is currently in the early stages of development and subject to change.
-More information can be viewed [at this link.](https://kops.sigs.k8s.io/getting_started/digitalocean/)
-
-:::
-
-## Before you begin...
-
-- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
-- Install [kops](https://kops.sigs.k8s.io/install/)
-
-## How to
-
-There are many ways to install and manage Kubernetes in DO. Using Kubernetes Operations (kops) is a good default choice for most people, as it gives you access to all of {{prodname}}’s [flexible and powerful networking features](../../../networking/index.mdx). However, there are other options that may work better for your environment.
-
-The geeky details of what you get:
-
-
-
-### Generate your DigitalOcean API token
-
-An API token is needed by kops for the CRUD (Create, Read, Update and Delete) operations necessary for resources in your DigitalOcean account.
-Use [this link](https://www.digitalocean.com/docs/apis-clis/api/create-personal-access-token/) to generate your API token and then export it as an environment variable.
-
-```bash
-export DIGITALOCEAN_ACCESS_TOKEN=
-```
-
-### Create an object storage
-
-DigitalOcean provides an S3 compatible storage API that Kops uses object storage to save your cluster status.
-You should create a Space using [this link](https://www.digitalocean.com/docs/spaces/how-to/create/) and export it.
-
-```bash
-export KOPS_STATE_STORE=do://
-export S3_ENDPOINT=
-```
-
-:::note
-
-Using FQDN for `S3_ENDPOINT` causes an error.
-If your Space FQDN is `my-test-space.nyc3.digitaloceanspaces.com` just export `my-test-space` as `` .
-
-:::
-
-### Generate an API key for object storage
-
-Access to object storage requires an API key.
-Follow this [tutorial](https://www.digitalocean.com/docs/spaces/how-to/manage-access/) and generate your keys then export them as environment variables.
-
-```bash
-export S3_ACCESS_KEY_ID=
-export S3_SECRET_ACCESS_KEY=
-```
-
-### Enable kops alpha feature
-
-Enable alpha feature support using `KOPS_FEATURE_FLAGS` environment variable.
-
-```bash
- export KOPS_FEATURE_FLAGS="AlphaAllowDO"
-```
-
-### Create your cluster
-
-Kops supports various options that enables you to customize your cluster the way you like.
-
-1. Add Calico to your cluster using `--networking=calico`.
-1. Kops requires an external DNS server to create a cluster, by adding `.k8s.local` suffix to `--name=` option
- you generate a [gossip](https://kops.sigs.k8s.io/gossip/) DNS to bypass this requirement.
-
-:::note
-
-You can view a complete list of options supported by kops
-[in this link.](https://kops.sigs.k8s.io/cli/kops_create_cluster/#options)
-
-:::
-
-```bash
- kops create cluster --cloud=digitalocean --name=calico-demo.k8s.local \
- --networking=calico --master-zones=nyc1 --zones=nyc1 \
- --master-count=1 --api-loadbalancer-type=public \
- --node-size=s-1vcpu-2gb --image=ubuntu-20-04-x64 --yes
-```
-
-You can further customize the {{prodname}} install with [options listed in the kops documentation](https://kops.sigs.k8s.io/networking/calico).
-
-## Cleanup
-
-If you wish to remove resources created by this tutorial
-
-```bash
-kops delete cluster calico-demo.k8s.local --yes
-```
-
-Use the DigitalOcean web UI to remove the API tokens and Space you created.
-
-## Next steps
-
-**Required**
-
-- [Install and configure calicoctl](../../../operations/calicoctl/install.mdx)
-
-**Recommended**
-
-- [Try out {{prodname}} network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/gce.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/gce.mdx
deleted file mode 100644
index adc34054a3..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/gce.mdx
+++ /dev/null
@@ -1,235 +0,0 @@
----
-description: Use Calico with a self-managed Kubernetes cluster in Google Compute Engine (GCE).
----
-
-# Self-managed Kubernetes in Google Compute Engine (GCE)
-
-## Big picture
-
-Use {{prodname}} with a self-managed Kubernetes cluster in Google Compute Engine (GCE).
-
-## Value
-
-Managing your own Kubernetes cluster (as opposed to using a managed-Kubernetes service like GKE) gives you the most flexibility in configuring {{prodname}} and Kubernetes. {{prodname}} combines flexible networking capabilities with "run-anywhere" security enforcement to provide a solution with native Linux kernel performance and true cloud-native scalability.
-
-## Concepts
-
-**kubeadm** is a cluster management tool that is used to install Kubernetes.
-
-## Before you begin...
-
-[Install and configure the Google Cloud CLI tools](https://cloud.google.com/sdk/docs/quickstarts)
-
-## How to
-
-There are many ways to install and manage Kubernetes in GCE. Using kubeadm is a good default choice for most people, as it gives you access to all of {{prodname}}’s [flexible and powerful networking features](../../../networking/index.mdx). However, there are other options that may work better for your environment.
-
-- [kubeadm for Calico networking and network policy](#kubeadm-for-calico-networking-and-network-policy)
-- [Other tools and options](#other-tools-and-options)
-
-### kubeadm for Calico networking and network policy
-
-#### Create cloud resources
-
-You will need at least one VM to serve as a control plane node and one or more worker nodes. (It is possible to have control plane nodes also act as workers. This is not recommended in most cases and not covered by this guide.) See [requirements](../requirements.mdx) for specific OS requirements for these VMs.
-
-The following worked example creates a single control node and three workers on a dedicated virtual private network (VPC). Adjust the example as needed for your requirements. Consider a dedicated infrastructure management tool like [Terraform](https://www.terraform.io/) for managing cloud resources. (This example is adapted from [Kubernetes the Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way/blob/master/docs/03-compute-resources.md).)
-
-1. Create the VPC.
-
- ```bash
- gcloud compute networks create example-k8s --subnet-mode custom
- ```
-
-2. Create the k8s-nodes subnet in the example-k8s VPC network:
-
- ```bash
- gcloud compute networks subnets create k8s-nodes \
- --network example-k8s \
- --range 10.240.0.0/24
- ```
-
-3. Create a firewall rule that allows internal communication across TCP, UDP, ICMP and IP in IP (used for the Calico overlay):
-
- ```bash
- gcloud compute firewall-rules create example-k8s-allow-internal \
- --allow tcp,udp,icmp,ipip \
- --network example-k8s \
- --source-ranges 10.240.0.0/24
- ```
-
-4. Create a firewall rule that allows external SSH, ICMP, and HTTPS:
-
- ```bash
- gcloud compute firewall-rules create example-k8s-allow-external \
- --allow tcp:22,tcp:6443,icmp \
- --network example-k8s \
- --source-ranges 0.0.0.0/0
- ```
-
-5. Create the controller VM.
-
- ```bash
- gcloud compute instances create controller \
- --async \
- --boot-disk-size 200GB \
- --can-ip-forward \
- --image-family ubuntu-2204-lts \
- --image-project ubuntu-os-cloud \
- --machine-type n1-standard-2 \
- --private-network-ip 10.240.0.11 \
- --scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \
- --subnet k8s-nodes \
- --zone us-central1-f \
- --tags example-k8s,controller
- ```
-
-6. Create three worker VMs.
-
- ```bash
- for i in 0 1 2; do
- gcloud compute instances create worker-${i} \
- --async \
- --boot-disk-size 200GB \
- --can-ip-forward \
- --image-family ubuntu-2204-lts \
- --image-project ubuntu-os-cloud \
- --machine-type n1-standard-2 \
- --private-network-ip 10.240.0.2${i} \
- --scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \
- --subnet k8s-nodes \
- --zone us-central1-f \
- --tags example-k8s,worker
- done
- ```
-7. Install Docker on the controller VM and each worker VM. On each VM run:
-
- ```bash
- sudo apt update
- sudo apt install -y docker.io
- sudo systemctl enable docker.service
- sudo apt install -y apt-transport-https curl
- ```
-
-#### Install Kubernetes and create the cluster
-
-1. Install `kubeadm`,` kubelet`, and `kubectl` on each worker node and the controller node (see [kubeadm docs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-kubeadm-kubelet-and-kubectl) for more details).
-
- Connect to each node and run these commands:
-
- ```bash
- curl -fsSL https://dl.k8s.io/apt/doc/apt-key.gpg | sudo apt-key add -
- cat < --discovery-token-ca-cert-hash sha256:
- ```
-
-5. Verify that all nodes have joined.
-
- Run this command on the controller node.
-
- ```bash
- kubectl get nodes
- ```
-
-which should output something similar to:
-
-```
-NAME STATUS ROLES AGE VERSION
-controller NotReady master 5m49s v1.17.2
-worker-0 NotReady 3m38s v1.17.2
-worker-1 NotReady 3m7s v1.17.2
-worker-2 NotReady 5s v1.17.2
-```
-
-#### Install {{prodname}}
-
-1. On the controller, install {{prodname}} using the operator:
-
- ```bash
- kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml
- ```
-
-2. Download the custom resources necessary to configure {{prodname}}
-
- ```bash
- curl {{manifestsUrl}}/manifests/custom-resources.yaml -O
- ```
-
-3. If you wish to customize the {{prodname}} install, customize the downloaded custom-resources.yaml manifest. Then create the manifest to install {{prodname}}.
-
- ```bash
- kubectl create -f custom-resources.yaml
- ```
-
-The geeky details of what you get:
-
-{' '}
-
-### Other tools and options
-
-#### Terraform
-
-You may have noticed that the bulk of the above instructions are about provisioning the Google Cloud resources for the cluster and installing Kubernetes. Terraform is a tool for automating infrastructure provisioning using declarative configurations. You can also go as far as automating the install of Docker, kubeadm, and Kubernetes using Terraform “provisioners.” See the [Terraform documentation](https://www.terraform.io/docs/index.html) for more details.
-
-#### Kubespray
-
-[Kubespray](https://kubespray.io/) is a tool for provisioning and managing Kubernetes clusters with support for multiple clouds including Google Compute Engine. Calico is the default networking provider, or you can set the `kube_network_plugin` variable to `calico`. See the Kubespray docs for more details. See the [Kubespray docs](https://kubespray.io/#/?id=network-plugins) for more details.
-
-## Next steps
-
-**Required**
-
-- [Install and configure calicoctl](../../../operations/calicoctl/install.mdx)
-
-**Recommended**
-
-- [Video: Everything you need to know about Kubernetes networking on Google cloud](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-google-cloud/)
-- [Try out {{prodname}} network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/index.mdx
deleted file mode 100644
index 371a6dd742..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Manage your own Kubernetes clusters in AWS, GCE, or Azure public clouds.
-hide_table_of_contents: true
----
-
-# Self-managed public cloud
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/getting-started.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/getting-started.mdx
deleted file mode 100644
index 5be5af49c6..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/getting-started.mdx
+++ /dev/null
@@ -1,423 +0,0 @@
----
-description: Install Calico with the VPP dataplane on a Kubernetes cluster.
----
-
-# Get started with VPP networking
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Install {{prodname}} and enable the beta release of the VPP dataplane.
-
-:::caution
-
-The VPP dataplane is in beta and should not be used in production clusters. It has had lots of testing and is pretty stable. However, chances are that some bugs are still lurking around (please report these on the [Calico Users slack](https://calicousers.slack.com/archives/C017220EXU1) or [GitHub](https://github.com/projectcalico/vpp-dataplane/issues)). In addition, it still does not support all the features of {{prodname}}.
-
-:::
-
-## Value
-
-The VPP dataplane mode has several advantages over standard Linux networking pipeline mode:
-
-- Scales to higher throughput, especially with WireGuard encryption enabled
-- Further improves encryption performance with IPsec
-- Native support for Kubernetes services without needing kube-proxy, which:
- - Reduces first-packet latency for packets to services
- - Preserves external client source IP addresses all the way to the pod
-
-The VPP dataplane is entirely compatible with the other {{prodname}} dataplanes, meaning you can have a cluster with VPP-enabled nodes along with regular nodes. This makes it possible to migrate a cluster from Linux or eBPF networking to VPP networking.
-
-In addition, the VPP dataplane offers some specific features for network-intensive applications, such as providing `memif` userspace packet interfaces to the pods (instead of regular Linux network devices), or exposing the VPP Host Stack to run optimized L4+ applications in the pods.
-
-Trying out the beta will give you a taste of these benefits and an opportunity to give feedback to the VPP dataplane team.
-
-## Concepts
-
-### VPP
-
-The Vector Packet Processor (VPP) is a high-performance, open-source userspace network dataplane written in C, developed under the [fd.io](https://fd.io) umbrella. It supports many standard networking features (L2 switching, L3 routing, NAT, encapsulations), and is easily extensible using plugins. The VPP dataplane uses plugins to efficiently implement Kubernetes services load balancing and {{prodname}} policies.
-
-### Operator based installation
-
-This guide uses the Tigera operator to install {{prodname}}. The operator provides lifecycle management for {{prodname}}
-exposed via the Kubernetes API defined as a custom resource definition. While it is also technically possible to install {{prodname}}
-and configure it for VPP using manifests directly, only operator based installations are supported at this stage.
-
-## How to
-
-This guide details two ways to install {{prodname}} with the VPP dataplane:
-
-- On a managed EKS cluster. This is the option that requires the least configuration
-- On a managed EKS cluster with the DPDK interface driver. This options is more complex to set up but provides better performance
-- On any Kubernetes cluster
-
-In all cases, here are the details of what you will get:
-
-
-
-
-
-
-## Install Calico with the VPP dataplane on an EKS cluster
-
-### Requirements
-
-For these instructions, we will use `eksctl` to provision the cluster. However, you can use any of the methods in [Getting Started with Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html)
-
-Before you get started, make sure you have downloaded and configured the [necessary prerequisites](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html#eksctl-prereqs)
-
-### Provision the cluster
-
-1. First, create an Amazon EKS cluster without any nodes.
-
- ```bash
- eksctl create cluster --name my-calico-cluster --without-nodegroup
- ```
-
-1. Since this cluster will use {{prodname}} for networking, you must delete the `aws-node` DaemonSet to disable the default AWS VPC networking for the pods.
-
- ```bash
- kubectl delete daemonset -n kube-system aws-node
- ```
-
-### Install and configure Calico with the VPP dataplane
-
-1. Now that you have an empty cluster configured, you can install the Tigera operator.
-
- ```bash
- kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml
- ```
-
- :::note
-
- Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`.
-
- :::
-
-1. Then, you need to configure the {{prodname}} installation for the VPP dataplane. The yaml in the link below contains a minimal viable configuration for EKS. For more information on configuration options available in this manifest, see [the installation reference](../../../reference/installation/api.mdx).
-
- :::note
-
- Before applying this manifest, read its contents and make sure its settings are correct for your environment. For example,
- you may need to specify the default IP pool CIDR to match your desired pod network CIDR.
-
- :::
-
- ```bash
- kubectl create -f https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/calico/installation-eks.yaml
- ```
-
-1. Now is time to install the VPP dataplane components.
-
- ```bash
- kubectl create -f https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/generated/calico-vpp-eks.yaml
- ```
-
-1. Finally, add nodes to the cluster.
-
- ```bash
- eksctl create nodegroup --cluster my-calico-cluster --node-type t3.medium --node-ami auto --max-pods-per-node 50
- ```
-
- :::tip
-
- The --max-pods-per-node option above, ensures that EKS does not limit the [number of pods based on node-type](https://github.com/awslabs/amazon-eks-ami/blob/main/nodeadm/internal/kubelet/eni-max-pods.txt). For the full set of node group options, see `eksctl create nodegroup --help`.
-
- :::
-
-
-
-
-## Install Calico with the VPP dataplane on an EKS cluster with the DPDK driver
-
-### Requirements
-
-DPDK provides better performance compared to the standard install but it requires some additional customisations (hugepages, for instance) in the EKS worker instances. We have a bash script, `init_eks.sh`, which takes care of applying the required customizations and we make use of the `preBootstrapCommands` property of `eksctl` [configuration file](https://eksctl.io/usage/schema) to execute the script during the worker node creation. These instructions require the latest version of `eksctl`.
-
-### Provision the cluster
-
-1. First, create an Amazon EKS cluster without any nodes.
-
- ```bash
- eksctl create cluster --name my-calico-cluster --without-nodegroup
- ```
-
-2. Since this cluster will use {{prodname}} for networking, you must delete the `aws-node` DaemonSet to disable the default AWS VPC networking for the pods.
-
- ```bash
- kubectl delete daemonset -n kube-system aws-node
- ```
-
-### Install and configure Calico with the VPP dataplane
-
-1. Now that you have an empty cluster configured, you can install the Tigera operator.
-
- ```bash
- kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml
- ```
-
- :::note
-
- Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`.
-
- :::
-
-2. Then, you need to configure the {{prodname}} installation for the VPP dataplane. The yaml in the link below contains a minimal viable configuration for EKS. For more information on configuration options available in this manifest, see [the installation reference](../../../reference/installation/api.mdx).
-
- :::note
-
- Before applying this manifest, read its contents and make sure its settings are correct for your environment. For example,
- you may need to specify the default IP pool CIDR to match your desired pod network CIDR.
-
- :::
-
- ```bash
- kubectl create -f https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/calico/installation-eks.yaml
- ```
-
-3. Now is time to install the VPP dataplane components.
-
- ```bash
- kubectl create -f https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/generated/calico-vpp-eks-dpdk.yaml
- ```
-
-4. Finally, time to add nodes to the cluster. Since we need to customize the nodes for DPDK, we will use an `eksctl` config file with the `preBootstrapCommands` property to create the worker nodes. The following command will create a managed nodegroup with 2 t3.large worker nodes in the cluster:
-
- ```
- cat <
- ```
-
- For details on ssh access refer to [Amazon EC2 key pairs and Linux instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html).
-
-
-
-
-## Install Calico with the VPP dataplane on any Kubernetes cluster
-
-### Requirements
-
-The VPP dataplane has the following requirements:
-
-**Required**
-
-- A blank Kubernetes cluster, where no CNI was ever configured.
-- These [base requirements](../requirements.mdx), except those related to the management of `cali*`, `tunl*` and `vxlan.calico` interfaces.
-
- :::note
-
- If you are using `kubeadm` to create the cluster please make sure to specify the pod network CIDR using the `--pod-network-cidr` command-line argument, i.e., `sudo kubeadm init --pod-network-cidr=192.168.0.0/16`. If 192.168.0.0/16 is already in use within your network you must select a different pod network CIDR.
-
- :::
-
-**Optional**
-For some hardware, the following hugepages configuration may enable VPP to use more efficient drivers:
-
-- At least 512 x 2MB-hugepages are available (`grep HugePages_Free /proc/meminfo`)
-- The `vfio-pci` (`vfio_pci` on centos) or `uio_pci_generic` kernel module is loaded. For example:
-
- ```bash
- echo "vfio-pci" > /etc/modules-load.d/95-vpp.conf
- modprobe vfio-pci
- echo "vm.nr_hugepages = 512" >> /etc/sysctl.conf
- sysctl -p
- # restart kubelet to take the changes into account
- # you may need to use a different command depending on how kubelet was installed
- systemctl restart kubelet
- ```
-
-### Install Calico and configure it for VPP
-
-1. Start by installing the Tigera operator on your cluster.
-
- ```bash
- kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml
- ```
-
- :::note
-
- Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`.
-
- :::
-
-1. Then, you need to configure the {{prodname}} installation for the VPP dataplane. The yaml in the link below contains a minimal viable configuration for VPP. For more information on configuration options available in this manifest, see [the installation reference](../../../reference/installation/api.mdx).
-
- :::note
-
- Before applying this manifest, read its contents and make sure its settings are correct for your environment. For example,
- you may need to specify the default IP pool CIDR to match your desired pod network CIDR.
-
- :::
-
- ```bash
- kubectl create -f https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/calico/installation-default.yaml
- ```
-
-### Install the VPP dataplane components
-
-Start by getting the appropriate yaml manifest for the VPP dataplane resources:
-
-```bash
-# If you have configured hugepages on your machines
-curl -o calico-vpp.yaml https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/generated/calico-vpp.yaml
-```
-
-```bash
-# If not, or if you're unsure
-curl -o calico-vpp.yaml https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/generated/calico-vpp-nohuge.yaml
-```
-
-Then locate the `calico-vpp-config` ConfigMap in this yaml manifest and configure it as follows.
-
-**Required Configuration**
-
-- `CALICOVPP_INTERFACES` contains a dictionary with parameters specific to interfaces in calicovpp. The field `uplinkInterfaces` contains a list of interfaces and their configuration, with the first element being the primary interface, and the rest (if any) being the secondary host interfaces.
-
-```yaml
-CALICOVPP_INTERFACES: |-
- {
- "uplinkInterfaces": [ { "interfaceName": "eth0" } ]
- }
-```
-
-The name of the used interface must be the name of a Linux interface, up and configured with an address. The address configured on this interface **must** be the node address in Kubernetes (`kubectl get nodes -o wide`).
-
-**Configuration options**
-
-`CALICOVPP_INTERFACES`
-
-| Field | Description | Type |
-| ---------------- | -------------------------------------------------------- | --------------------------------------------------- |
-| maxPodIfSpec | spec containing max values for pod interfaces config | [InterfaceSpec](#InterfaceSpec) |
-| defaultPodIfSpec | spec containing default values for pod interfaces config | [InterfaceSpec](#InterfaceSpec) |
-| vppHostTapSpec | spec containing config for host tap interface in vpp | [InterfaceSpec](#InterfaceSpec) |
-| uplinkInterfaces | list of host interfaces in vpp | List of [UplinkInterfaceSpec](#UplinkInterfaceSpec) |
-
-#### InterfaceSpec
-
-| Field | Description | Type | Default |
-| ------ | ---------------------------------- | -------------------------------------------------- | --------------------------------- |
-| rx | Number of RX queues | int | 1 |
-| tx | Number of TX queues | int | 1 |
-| rxqsz | RX queue size | int | 1024 |
-| txqsz | TX queue size | int | 1024 |
-| isl3 | Defines the interface mode (L2/L3) | boolean | true for tuntap ; false for memif |
-| rxMode | RX mode | string among "interrupt", "adaptive", or "polling" | `adaptive` |
-
-#### UplinkInterfaceSpec
-
-| Field | Description | Type | Default |
-| ------------- | -------------------------------------------------------------- | -------------------------------------------------- | ----------------------------- |
-| rx | Number of RX queues | int | 1 |
-| tx | Number of TX queues | int | 1 |
-| rxqsz | RX queue size | int | 1024 |
-| txqsz | TX queue size | int | 1024 |
-| isl3 | Defines the interface mode (L2/L3) for drivers that support it | boolean | true |
-| rxMode | RX mode | string among "interrupt", "adaptive", or "polling" | `adaptive` |
-| InterfaceName | interface name | string | unset |
-| vppDriver | driver to use in vpp | string | unset |
-| newDriver | linux driver to use before passing the interface to VPP | string | unset |
-| mtu | the interface's mtu | int | use the existing MTU in linux |
-
-- `service_prefix` is the Kubernetes service CIDR. You can retrieve it by running:
-
-```bash
-kubectl cluster-info dump | grep -m 1 service-cluster-ip-range
-```
-
-If this command doesn't return anything, you can leave the default value of `10.96.0.0/12`.
-
-**Optional**
-
-- To configure how VPP drives the physical interface, use `vppDriver` field for `uplinkInterfaces` elements in `CALICOVPP_INTERFACES`.
-
-The supported values will depend on the interface type. Available values are:
-
-- `""` : will automatically select and try drivers based on interface type and available resources, starting with the fastest
-- `af_xdp` : use an AF_XDP socket to drive the interface (requires kernel 5.4 or newer)
-- `af_packet` : use an AF_PACKET socket to drive the interface (not optimized but works everywhere)
-- `avf` : use the VPP native driver for Intel 700-Series and 800-Series interfaces (requires hugepages)
-- `vmxnet3` : use the VPP native driver for VMware virtual interfaces (requires hugepages)
-- `virtio` : use the VPP native driver for Virtio virtual interfaces (requires hugepages)
-- `rdma` : use the VPP native driver for Mellanox CX-4 and CX-5 interfaces (requires hugepages)
-- `dpdk` : use the DPDK interface drivers with VPP (requires hugepages, works with most interfaces)
-- `none` : do not configure connectivity automatically. This can be used when [configuring the interface manually](../../../reference/vpp/uplink-configuration.mdx)
-
-**Legacy options**
-
-We maintain legacy support for the `CALICOVPP_INTERFACE` and `CALICOVPP_NATIVE_DRIVER` environment variables:
-
-`CALICOVPP_INTERFACE` -> `uplinkInterfaces[0].interfaceName`
-
-`CALICOVPP_NATIVE_DRIVER` -> `uplinkInterfaces[0].vppDriver`
-
-If `CALICOVPP_INTERFACES` is unspecified, `CALICOVPP_INTERFACE` is the primary interface to be used.
-In that case, use `CALICOVPP_NATIVE_DRIVER` instead of `vppDriver`.
-
-So either patch `CALICOVPP_INTERFACES` with the suitable interface in `uplinkInterfaces`, or delete `CALICOVPP_INTERFACES` and use `CALICOVPP_INTERFACE` instead.
-
-**Example**
-
-```yaml noValidation
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: calico-config
- namespace: calico-vpp-dataplane
-data:
- service_prefix: 10.96.0.0/12
- vpp_dataplane_interface: eth1
- vpp_uplink_driver: ""
- ...
-```
-
-### Apply the configuration
-
-To apply the configuration, run:
-
-```bash
-kubectl create -f calico-vpp.yaml
-```
-
-This will install all the resources required by the VPP dataplane in your cluster.
-
-
-
-
-## Next steps
-
-After installing {{prodname}} with the VPP dataplane, you can benefit from the features of the VPP dataplane, such as fast [IPsec](ipsec.mdx) or [Wireguard](../../../network-policy/encrypt-cluster-pod-traffic.mdx) encryption.
-
-**Tools**
-
-- [Install and configure calicoctl](../../../operations/calicoctl/install.mdx) to configure and monitor your cluster.
-
-**Security**
-
-- [Secure pods with {{prodname}} network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/index.mdx
deleted file mode 100644
index f2d1d775d5..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Install the VPP userspace dataplane to unlock extra performance for your cluster!
-hide_table_of_contents: true
----
-
-# VPP dataplane tech preview
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/ipsec.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/ipsec.mdx
deleted file mode 100644
index faa1952743..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/ipsec.mdx
+++ /dev/null
@@ -1,62 +0,0 @@
----
-description: Enable IPsec for faster encryption between nodes when using the VPP dataplane.
----
-
-# IPsec configuration with VPP
-
-## Big picture
-
-Enable IPsec encryption for the traffic flowing between the nodes.
-
-## Value
-
-IPsec is the fastest option to encrypt the traffic between nodes. It enables blanket application traffic encryption with very little performance impact.
-
-## Before you begin...
-
-To enable IPsec encryption, you will need a Kubernetes cluster with:
-
-- the [VPP dataplane](getting-started.mdx) configured
-- [IP-in-IP encapsulation](../../../networking/configuring/vxlan-ipip.mdx) configured between the nodes
-
-## How to
-
-- [Create the IKEv2 PSK](#create-the-ikev2-psk)
-- [Configure the VPP dataplane](#configure-the-vpp-dataplane)
-
-### Create the IKEv2 PSK
-
-Create a Kubernetes secret that contains the PSK used for the IKEv2 exchange between the nodes. You can use the following command to create a random PSK. It will generate a unique random key. You may also replace the part after `psk=` with a key of your choice.
-
-```bash
-kubectl -n calico-vpp-dataplane create secret generic calicovpp-ipsec-secret \
- --from-literal=psk="$(dd if=/dev/urandom bs=1 count=36 2>/dev/null | base64)"
-```
-
-### Configure the VPP dataplane
-
-To enable IPsec, you need to configure two environment variables on the `calico-vpp-node` pod. You can do so with the following kubectl command:
-
-```bash
-kubectl -n calico-vpp-dataplane patch daemonset calico-vpp-node --patch "$(curl https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/components/ipsec/ipsec.yaml)"
-```
-
-Once IPsec is enabled, all the traffic that uses IP-in-IP encapsulation in the cluster will be automatically encrypted.
-
-## Next steps
-
-### Verify encryption
-
-To verify that the traffic is encrypted, open a VPP debug CLI session to check the configuration with [calivppctl](../../../operations/troubleshoot/vpp.mdx)
-
-```bash
-calivppctl vppctl myk8node1
-```
-
-Then at the `vpp#` prompt, you can run the following commands:
-
-- `show ikev2 profile` will list the configured IKEv2 profiles, there should be one per other node in your cluster
-- `show ipsec sa` will list the establish IPsec SA, two per IKEv2 profile
-- `show interface` will list all the interfaces configured in VPP. The ipip interfaces (which correspond to the IPsec tunnels) should be up
-
-You can also [capture the traffic](../../../operations/troubleshoot/vpp.mdx#tracing-packets) flowing between the nodes to verify that it is encrypted.
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/specifics.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/specifics.mdx
deleted file mode 100644
index de906b30b6..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/specifics.mdx
+++ /dev/null
@@ -1,26 +0,0 @@
----
-description: Behavioral discrepancies when running with the Calico/VPP dataplane
----
-
-# Details of VPP implementation & known-issues
-
-Enabling VPP as the calico Dataplane should be transparent for most applications, but some specific behaviours might differ. This page gives a summary of the main differences, as well as the features that are still unsupported or with known issues.
-
-## Behavioural differences from other dataplanes
-
-The main difference between VPP and a regular iptables/IPVS dataplane is in the NodePorts implementation. As the constraints differ, it allows VPP to optimise the service implementation, but as a consequence, some behaviours might differ. This will mostly impact policies expecting packets to have been source NATed or not.
-
-- For `ClusterIPs`, `ExternalIPs` and `LoadBalancerIPs` load-balancing is done with the Maglev algorithm, and the packets are only NAT-ed on the node where the selected backend lives. This allows us to avoid source NAT-ing packets, and thus present the real client address to the destination pod. The same is true when a pod connects to a ClusterIP. This behavior allows the service load balancing to use direct service return (DSR) by default.
-
-- For `NodePorts` packets are always NATed on the node targeted by the traffic. This is not the case for the eBPF dataplane where all nodes will NAT traffic to a node port regardless of the destination IP. Traffic is also always source-NATed in order for the return traffic to come back through the same node.
-
-## Known issues & unsupported features
-
-Although we aim at being feature complete, as VPP is still in beta status, some features are still unsupported or have known issues :
-
-- For host endpoints policies, setting `doNotTrack` or `preDNAT` is not supported.
- - Setting them to `true` will result in the policy being ignored, and an error message to be printed by the calico-vpp-agent
-- VPP does not support running with `BGP disabled`.
-- `Session affinity for services` is not supported
-- `Wireguard` is supported when activated cluster wide at startup time. Enabling/disabling Wireguard on a running cluster with live pods is known to be unstable.
-- `EndpointSlices` are not supported
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/demo.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/demo.mdx
deleted file mode 100644
index 361719619b..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/demo.mdx
+++ /dev/null
@@ -1,630 +0,0 @@
----
-description: An interactive demo to show how to apply basic network policy to pods in a Calico for Windows cluster.
----
-
-# Basic policy demo
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-This guide provides a simple demo to illustrate basic pod-to-pod connectivity and the application of network policy in a {{prodnameWindows}} cluster. We will create client and server pods on Linux and Windows nodes, verify connectivity between the pods, and then we'll apply a basic network policy to isolate pod traffic.
-
-## Prerequisites
-
-To run this demo, you will need a [{{prodnameWindows}} cluster](quickstart.mdx) with
-Windows Server 1809 (build 17763.1432 August 2020 update or newer). More recent versions of Windows Server can be used with a change to the demo manifests.
-
-:::note
-
-Windows Server 1809 (build older than 17763.1432) do not support [direct server return](https://techcommunity.microsoft.com/t5/networking-blog/direct-server-return-dsr-in-a-nutshell/ba-p/693710). This means that policy support is limited to only pod IP addresses.
-
-:::
-
-
-
-
-## Create pods on Linux nodes
-
-First, create a client (busybox) and server (nginx) pod on the Linux nodes:
-
-```bash
-kubectl apply -f - < 80
-```
-
-To combine both of the above steps:
-
-```bash
-kubectl exec -n calico-demo busybox -- nc -vz $(kubectl get po porter -n calico-demo -o 'jsonpath={.status.podIP}') 80
-```
-
-If the connection from the busybox pod to the porter pod succeeds, we will get output similar to the following:
-
-```
-192.168.40.166 (192.168.40.166:80) open
-```
-
-Now let's verify that the powershell pod can reach the nginx pod:
-
-```bash
-kubectl exec -n calico-demo pwsh -- powershell Invoke-WebRequest -Uri http://$(kubectl get po nginx -n calico-demo -o 'jsonpath={.status.podIP}') -UseBasicParsing -TimeoutSec 5
-```
-
-If the connection succeeds, we will get output similar to:
-
-```
-StatusCode : 200
-StatusDescription : OK
-Content :
-
-
- Welcome to nginx!
-
- <...
-...
-```
-
-Finally, let's verify that the powershell pod can reach the porter pod:
-
-```bash
-kubectl exec -n calico-demo pwsh -- powershell Invoke-WebRequest -Uri http://$(kubectl get po porter -n calico-demo -o 'jsonpath={.status.podIP}') -UseBasicParsing -TimeoutSec 5
-```
-
-If that succeeds, we will see something like:
-
-```
-StatusCode : 200
-StatusDescription : OK
-Content : This is a Calico for Windows demo.
-RawContent : HTTP/1.1 200 OK
- Content-Length: 49
- Content-Type: text/plain; charset=utf-8
- Date: Fri, 21 Aug 2020 22:45:46 GMT
-
- This is a Calico for Windows demo.
-Forms :
-Headers : {[Content-Length, 49], [Content-Type, text/plain;
- charset=utf-8], [Date, Fri, 21 Aug 2020 22:45:46 GMT]}
-Images : {}
-InputFields : {}
-Links : {}
-ParsedHtml :
-RawContentLength : 49
-```
-
-## Apply policy to the Windows client pod
-
-Now let's apply a basic network policy that allows only the busybox pod to reach the porter pod.
-
-```bash
-calicoctl apply -f - <
-
-
-## Installing kubectl on Windows
-
-To run the commands in this demo you need the Windows version of kubectl installed and add it to the system path.
-[Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/) and move the kubectl binary to **c:\k**.
-
-Add `c:\k` to the system path
-
-1. Open a PowerShell window as Administrator
-
- ```powershell
- $env:Path += ";C:\k"
- ```
-
-1. Close all PowerShell windows.
-
-## Create pods on Linux nodes
-
-First, create a client (busybox) and server (nginx) pod on the Linux nodes.
-
-### Create a YAML file policy-demo-linux.yaml using your favorite editor on Windows
-
-```yaml
-apiVersion: v1
-kind: Namespace
-metadata:
- name: calico-demo
-
----
-apiVersion: v1
-kind: Pod
-metadata:
- labels:
- app: busybox
- name: busybox
- namespace: calico-demo
-spec:
- containers:
- - args:
- - /bin/sh
- - -c
- - sleep 360000
- image: busybox:1.28
- imagePullPolicy: Always
- name: busybox
- nodeSelector:
- kubernetes.io/os: linux
-
----
-apiVersion: v1
-kind: Pod
-metadata:
- labels:
- app: nginx
- name: nginx
- namespace: calico-demo
-spec:
- containers:
- - name: nginx
- image: nginx:1.8
- ports:
- - containerPort: 80
- nodeSelector:
- kubernetes.io/os: linux
-```
-
-### Apply the policy-demo-linux.yaml file to the Kubernetes cluster
-
-1. Open a PowerShell window.
-1. Use `kubectl` to apply the `policy-demo-linux.yaml` configuration.
-
-```powershell
-kubectl apply -f policy-demo-linux.yaml
-```
-
-## Create pods on Window nodes
-
-Next, we’ll create a client (pwsh) and server (porter) pod on the Windows nodes.
-:::note
-
-The pwsh and porter pod manifests below use images based on mcr.microsoft.com/windows/servercore:1809. If you are using a more recent Windows Server version, update the manifests to use a servercore image that matches your Windows Server version.
-
-:::
-
-### Create the policy-demo-windows.yaml using your favorite editor on Windows
-
-```yaml
-apiVersion: v1
-kind: Pod
-metadata:
- name: pwsh
- namespace: calico-demo
- labels:
- app: pwsh
-spec:
- containers:
- - name: pwsh
- image: mcr.microsoft.com/windows/servercore:1809
- args:
- - powershell.exe
- - -Command
- - 'Start-Sleep 360000'
- imagePullPolicy: IfNotPresent
- nodeSelector:
- kubernetes.io/os: windows
----
-apiVersion: v1
-kind: Pod
-metadata:
- name: porter
- namespace: calico-demo
- labels:
- app: porter
-spec:
- containers:
- - name: porter
- image: calico/porter:1809
- ports:
- - containerPort: 80
- env:
- - name: SERVE_PORT_80
- value: This is a Calico for Windows demo.
- imagePullPolicy: IfNotPresent
- nodeSelector:
- kubernetes.io/os: windows
-```
-
-### Apply the policy-demo-windows.yaml file to the Kubernetes cluster
-
-1. Open a PowerShell window.
-1. Use `kubectl` to apply the `policy-demo-windows.yaml` configuration
-
-```powershell
-kubectl apply -f policy-demo-windows.yaml
-```
-
-### Verify four pods have been created and are running
-
-:::note
-
-Launching the Windows pods is going to take some time depending on your network download speed.
-
-:::
-
-1. Open a PowerShell window.
-1. Using `kubectl` to list the pods in the `calico-demo` namespace.
-
-```powershell
-kubectl get pods --namespace calico-demo
-```
-
-You should see something like the below
-
-```output
-NAME READY STATUS RESTARTS AGE
-busybox 1/1 Running 0 4m14s
-nginx 1/1 Running 0 4m14s
-porter 0/1 ContainerCreating 0 74s
-pwsh 0/1 ContainerCreating 0 2m9s
-```
-
-Repeat the command every few minutes until the output shows all 4 pods in the Running state.
-
-```output
-NAME READY STATUS RESTARTS AGE
-busybox 1/1 Running 0 7m24s
-nginx 1/1 Running 0 7m24s
-porter 1/1 Running 0 4m24s
-pwsh 1/1 Running 0 5m19s
-```
-
-### Check connectivity between pods on Linux and Windows nodes
-
-Now that client and server pods are running on both Linux and Windows nodes, let’s verify that client pods on Linux nodes can reach server pods on Windows nodes.
-
-1. Open a PowerShell window.
-1. Using `kubectl` to determine the porter pod IP address:
-
- ```powershell
- kubectl get pod porter --namespace calico-demo -o 'jsonpath={.status.podIP}'
- ```
-
-1. Log into the busybox pod and try reaching the porter pod on port 80. Replace the `` tag with the IP address returned from the previous command.
-
- ```powershell
- kubectl exec --namespace calico-demo busybox -- nc -vz 80
- ```
-
- :::note
-
- You can also combine both of the above steps:
-
- :::
-
- ```powershell
- kubectl exec --namespace calico-demo busybox -- nc -vz $(kubectl get pod porter --namespace calico-demo -o 'jsonpath={.status.podIP}') 80
- ```
-
- If the connection from the busybox pod to the porter pod succeeds, you will get output similar to the following:
-
- ```powershell
- 192.168.40.166 (192.168.40.166:80) open
- ```
-
- :::note
-
- The IP addresses returned will vary depending on your environment setup.
-
- :::
-
-1. Now you can verify that the pwsh pod can reach the nginx pod:
-
- ```powershell
- kubectl exec --namespace calico-demo pwsh -- powershell Invoke-WebRequest -Uri http://$(kubectl get po nginx -n calico-demo -o 'jsonpath={.status.podIP}') -UseBasicParsing -TimeoutSec 5
- ```
-
- If the connection succeeds, you will see output similar to:
-
- ```
- StatusCode : 200
- StatusDescription : OK
- Content :
-
-
- Welcome to nginx!
-
- <...
- ```
-
-1. Verify that the pwsh pod can reach the porter pod:
-
- ```powershell
- kubectl exec --namespace calico-demo pwsh -- powershell Invoke-WebRequest -Uri http://$(kubectl get po porter -n calico-demo -o 'jsonpath={.status.podIP}') -UseBasicParsing -TimeoutSec 5
- ```
-
- If that succeeds, you will see something like:
-
- ```
- StatusCode : 200
- StatusDescription : OK
- Content : This is a Calico for Windows demo.
- RawContent : HTTP/1.1 200 OK
- Content-Length: 49
- Content-Type: text/plain; charset=utf-8
- Date: Fri, 21 Aug 2020 22:45:46 GMT
-
- This is a Calico for Windows demo.
- Forms :
- Headers : {[Content-Length, 49], [Content-Type, text/plain;
- charset=utf-8], [Date, Fri, 21 Aug 2020 22:45:46 GMT]}
- Images : {}
- InputFields : {}
- Links : {}
- ParsedHtml :
- RawContentLength : 49
-
- ```
-
-You have now verified that communication is possible between all pods in the application.
-
-## Apply policy to the Windows client pod
-
-In a real world deployment you would want to make sure only pods that are supposed to communicate with each other, are actually allowed to do so.
-
-To achieve this you will apply a basic network policy which allows only the busybox pod to reach the porter pod.
-
-### Create the network-policy.yaml file using your favorite editor on Windows
-
-```yaml
-apiVersion: networking.k8s.io/v1
-kind: NetworkPolicy
-metadata:
- name: allow-busybox
- namespace: calico-demo
-spec:
- podSelector:
- matchLabels:
- app: porter
- policyTypes:
- - Ingress
- ingress:
- - from:
- - podSelector:
- matchLabels:
- app: busybox
- ports:
- - protocol: TCP
- port: 80
-```
-
-### Apply the network-policy.yaml file
-
-1. Open a PowerShell window.
-1. Use `kubectl` to apply the network-policy.yaml file.
-
-```powershell
-kubectl apply -f network-policy.yaml
-```
-
-### Verify the policy is in effect
-
-With the policy in place, the busybox pod should still be able to reach the porter pod:
-:::note
-
-We will be using the combined command line from earlier in this chapter.
-
-:::
-
-```powershell
-kubectl exec --namespace calico-demo busybox -- nc -vz $(kubectl get po porter -n calico-demo -o 'jsonpath={.status.podIP}') 80
-```
-
-However, the pwsh pod will not able to reach the porter pod:
-
-```powershell
-kubectl exec --namespace calico-demo pwsh -- powershell Invoke-WebRequest -Uri http://$(kubectl get po porter -n calico-demo -o 'jsonpath={.status.podIP}') -UseBasicParsing -TimeoutSec 5
-```
-
-The request times out with a message like the below:
-
-```powershell
-Invoke-WebRequest : The operation has timed out.
-At line:1 char:1
-+ Invoke-WebRequest -Uri http://192.168.40.166 -UseBasicParsing -Timeout ...
-+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- + CategoryInfo : InvalidOperation: (System.Net.HttpWebRequest:Htt
-pWebRequest) [Invoke-WebRequest], WebException
- + FullyQualifiedErrorId : WebCmdletWebResponseException,Microsoft.PowerShell.Commands.InvokeWebRequestCommand
-command terminated with exit code 1
-```
-
-## Wrap up
-
-In this demo we’ve configured pods on Linux and Windows nodes, verified basic pod connectivity, and tried a basic network policy to isolate pod to pod traffic.
-As the final step you can clean up all of the demo resources:
-
-1. Open a PowerShell window.
-
-```powershell
-kubectl delete namespace calico-demo
-```
-
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/index.mdx
deleted file mode 100644
index 6e0421123d..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Install and configure Calico for Windows.
-hide_table_of_contents: true
----
-
-# Calico for Windows
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubeconfig.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubeconfig.mdx
deleted file mode 100644
index 9a5e846458..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubeconfig.mdx
+++ /dev/null
@@ -1,115 +0,0 @@
----
-description: Configure kubeconfig for Calico for Windows.
----
-
-# Create kubeconfig for Windows nodes
-
-## Big picture
-
-Create kubeconfig for Windows nodes for manual installations of {{prodnameWindows}}.
-
-## How to
-
-In a manual installation of {{prodnameWindows}}, {{prodname}} requires a kubeconfig file to access the API server. This section describes how to find an existing `calico-node` service account used by {{prodname}} on Linux side, and then to export the service account token as a kubeconfig file for {{prodname}} to use.
-
-:::note
-
-In general, the node kubeconfig as used by kubelet does not have enough permissions to access {{prodname}}-specific resources.
-
-:::
-
-### Export calico-node service account token as a kubeconfig file
-
-:::note
-
-If your Kubernetes version is v1.24.0 or higher, service account token secrets are no longer automatically created. Before continuing, [manually create](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#manually-create-a-service-account-api-token) the calico-node service account token:
-
-```bash
-kubectl apply -f - <:
-```
-
-Then, find the secret containing the service account token for the `calico-node` service account:
-
-```
-kubectl get secret -n calico-system | grep calico-node
-```
-
-Inspect the output and find the name of the token, store it in a variable:
-
-```
-$ name=calico-node-token-xxxxx
-```
-
-Extract the parts of the secret, storing them in variables:
-
-```
-$ ca=$(kubectl get secret/$name -o jsonpath='{.data.ca\.crt}' -n calico-system)
-
-$ token=$(kubectl get secret/$name -o jsonpath='{.data.token}' -n calico-system | base64 --decode)
-
-$ namespace=$(kubectl get secret/$name -o jsonpath='{.data.namespace}' -n calico-system | base64 --decode)
-```
-
-Then, output the file:
-
-```bash
-cat < calico-config
-apiVersion: v1
-kind: Config
-clusters:
-- name: kubernetes
- cluster:
- certificate-authority-data: ${ca}
- server: ${server}
-contexts:
-- name: calico-windows@kubernetes
- context:
- cluster: kubernetes
- namespace: calico-system
- user: calico-windows
-current-context: calico-windows@kubernetes
-users:
-- name: calico-windows
- user:
- token: ${token}
-EOF
-```
-
-Copy this config file to the windows node `{{rootDirWindows}}\calico-kube-config` and set the KUBECONFIG environment variable in `config.ps1` to point to it.
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/index.mdx
deleted file mode 100644
index c923ac2a0a..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Get Calico for Windows running in your Kubernetes cluster.
-hide_table_of_contents: true
----
-
-# Kubernetes
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/rancher.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/rancher.mdx
deleted file mode 100644
index ab0573bd1a..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/rancher.mdx
+++ /dev/null
@@ -1,128 +0,0 @@
----
-description: Install Calico for Windows on a Rancher RKE cluster.
----
-
-# Install Calico for Windows on a Rancher Kubernetes Engine cluster
-
-## Big picture
-
-Install {{prodnameWindows}} on a Rancher Kubernetes Engine (RKE) cluster.
-
-## Value
-
-Run Linux and Windows workloads on a RKE cluster with {{prodname}}.
-
-## Before you begin
-
-**Supported**
-
-- RKE Kubernetes 1.20, 1.19, or 1.18
-
-**Supported networking**
-
-- BGP with no encapsulation
-- VXLAN
-
-**Required**
-
-- An RKE cluster provisioned with [no network plugin](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins#disabling-deployment-of-a-network-plug-in)
- but which otherwise meets the {{prodnameWindows}} Kubernetes [cluster requirements](requirements.mdx). This guide was tested with RKE v1.18.9.
-- One or more Windows nodes that meet the [requirements](requirements.mdx).
-
-## How to
-
-The following steps will outline the installation of {{prodname}} networking on the RKE cluster, then the installation of {{prodnameWindows}} on the Windows nodes.
-
-1. Install the Tigera {{prodname}} operator and custom resource definitions.
-
- ```
- kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml
- ```
-
- :::note
-
- Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`.
-
- :::
-
-1. Download the necessary Installation custom resource.
-
- ```bash
- wget {{manifestsUrl}}/manifests/custom-resources.yaml
- ```
-
-1. Update the `calicoNetwork` options, ensuring that the correct pod CIDR is set. (Rancher uses `10.42.0.0/16` by default.)
- Below are sample installations for VXLAN and BGP networking using the default Rancher pod CIDR:
-
- **VXLAN**
-
- ```yaml
- apiVersion: operator.tigera.io/v1
- kind: Installation
- metadata:
- name: default
- spec:
- # Configures Calico networking.
- calicoNetwork:
- bgp: Disabled
- # Note: The ipPools section cannot be modified post-install.
- ipPools:
- - blockSize: 26
- cidr: 10.42.0.0/16
- encapsulation: VXLAN
- natOutgoing: Enabled
- nodeSelector: all()
- ```
-
- **BGP**
-
- ```yaml
- apiVersion: operator.tigera.io/v1
- kind: Installation
- metadata:
- name: default
- spec:
- # Configures Calico networking.
- calicoNetwork:
- # Note: The ipPools section cannot be modified post-install.
- ipPools:
- - blockSize: 26
- cidr: 10.42.0.0/16
- encapsulation: None
- natOutgoing: Enabled
- nodeSelector: all()
- ```
-
- :::note
-
- For more information on configuration options available in this manifest, see [the installation reference](../../../../reference/installation/api.mdx).
-
- :::
-
-1. Apply the updated custom resources:
-
- ```bash
- kubectl create -f custom-resources.yaml
- ```
-
-1. Configure strict affinity:
-
- ```bash
- kubectl patch ipamconfigurations default --type merge --patch='{"spec": {"strictAffinity": true}}'
- ```
-
-1. Finally, follow the {{prodnameWindows}} [quickstart guide for Kubernetes](../quickstart.mdx#install-calico-for-windows-manually)
- For VXLAN clusters, follow the instructions under the "Kubernetes VXLAN" tab. For BGP clusters, follow the instructions under the "Kubernetes BGP" tab.
-
- :::note
-
- For Rancher default values for service CIDR and DNS cluster IP, see the [Rancher kube-api service options](https://rancher.com/docs/rke/latest/en/config-options/services/#kubernetes-api-server-options).
-
- :::
-
-1. Check the status of the nodes with `kubectl get nodes`. If you see that the Windows node has the status `Ready`, then you have a {{prodnameWindows}} on RKE cluster ready for Linux and Windows workloads!
-
-## Next steps
-
-- [Try the basic policy demo](../demo.mdx)
-- [Secure pods with {{prodname}} network policy](../../../../network-policy/get-started/calico-policy/calico-network-policy.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/requirements.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/requirements.mdx
deleted file mode 100644
index f55f3a1217..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/requirements.mdx
+++ /dev/null
@@ -1,110 +0,0 @@
----
-description: Review the requirements for the standard install for Calico for Windows.
----
-
-# Requirements
-
-## About {{prodnameWindows}}
-
-Because the Kubernetes and {{prodname}} control components do not run on Windows yet, a hybrid Linux/Windows cluster is required. The {{prodnameWindows}} standard installation is distributed as a **.zip archive**.
-
-## What's supported in this release
-
-✓ Install: Manifest install for Kubernetes clusters
-
-✓ Platforms: Kubernetes, OpenShift, RKE, EKS, AKS
-
-✓ Networking:
-
-- Kubernetes, on-premises: Calico CNI with BGP or VXLAN
-- OpenShift: Calico CNI with BGP or VXLAN
-- Rancher Kubernetes Engine: Calico CNI with BGP or VXLAN
-- EKS: VPC CNI
-- AKS: Azure CNI
-
-## Requirements
-
-### CNI and networking options
-
-The following table summarizes the networking options and considerations.
-
-| Networking | Components | **Value/Content** |
-| ------------------ | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| {{prodname}} BGP | Windows CNI plugin:
calico.exeLinux: {{prodname}} for policy and networking | {{prodname}}'s native networking approach, supports: - Auto-configured node-to-node BGP mesh over an L2 fabric - Peering with external routers for an L3 fabric - {{prodname}} IPAM and IP aggregation (with some limitations) - Route reflectors (including the new in-cluster route reflector introduced in {{prodname}} v3.3). **Note**: Windows node cannot act as route reflectors. - Kubernetes API datastore driver
**AWS users**: If running on AWS, you must disable the source/dest check on your EC2 instances so that hosts can forward traffic on behalf of pods. |
-| {{prodname}} VXLAN | Windows CNI plugin: calico.exe
Linux: {{prodname}} for policy and networking | {{prodname}}'s VXLAN overlay, supports:
- VXLAN overlay, which can traverse most networks. - Auto-configured node-to-node routing - {{prodname}} IPAM and IP aggregation (with some limitations) - Kubernetes API datastore driver **Note**: VXLAN runs on UDP port 4789 (this is the only port supported by Windows), remember to open that port between your {{prodname}} hosts in any firewalls / security groups. |
-| Cloud provider | Windows CNI plugin: win-bridge.exe
Linux: {{prodname}} policy-only | A useful fallback, particularly if you have a Kubernetes cloud provider that automatically installs inter-host routes. {{prodname}} has been tested with the standard **win-bridge.exe** CNI plugin so it should work with any networking provider that ultimately uses win-bridge.exe to network the pod (such as the Azure CNI plugin and cloud provider). |
-
-:::note
-
-If Calico CNI with VXLAN is used, BGP must be disabled. See the [installation reference](../../../../reference/installation/api.mdx#operator.tigera.io/v1.BGPOption).
-
-:::
-
-### Datastores
-
-Whether you use etcd or Kubernetes datastore (kdd), the datastore for the Windows node/Kubernetes cluster must be the same as the datastore for the Linux control node. (You cannot mix datastores in {{prodnameWindows}}.)
-
-### Kubernetes version
-
-See the [Kubernetes requirements](../../requirements.mdx#kubernetes-requirements).
-
-Earlier versions may work, but we do not actively test {{prodnameWindows}} against them, and they may have known issues and incompatibilities.
-
-### Linux platform requirements
-
-- At least one Linux Kubernetes worker node to run {{prodname}}'s cluster-wide components that meets [Linux system requirements](../../requirements.mdx), and is installed with {{prodname}} v3.12+.
-- VXLAN or BGP without encapsulation is supported if using {{prodname}} CNI. IPIP ({{prodname}}'s default encapsulation mode) is not supported. Use the following command to turn off IPIP.
-
-```bash
-calicoctl patch felixconfiguration default -p '{"spec":{"ipipEnabled":false}}'
-```
-
-- If using {{prodname}} IPAM, strict affinity of IPAM configuration must be set to `true`.
-
-```bash
-kubectl patch ipamconfigurations default --type merge --patch='{"spec": {"strictAffinity": true}}'
-```
-
-:::note
-
-For operator-managed Linux {{prodname}} clusters, three Linux worker nodes are required to meet high-availability requirements for Typha.
-
-:::
-
-### Windows platform requirements
-
-- Windows versions:
-
- - Windows Server 1809 (build 17763.1432 or later)
- - Windows Server 2022 (build 20348.169 or later)
-
- :::note
-
- Windows Server version support differs for each Kubernetes version. Review the [Windows OS Version Support](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#windows-os-version-support) table for the Windows Server versions supported by each Kubernetes version.
-
- :::
-
-- Be able to run commands as Administrator using PowerShell.
-- Container runtime: [Docker](https://docs.microsoft.com/en-us/virtualization/windowscontainers/quick-start/set-up-environment?tabs=Windows-Server) or [containerd](https://kubernetes.io/docs/setup/production-environment/container-runtimes/#containerd) is installed and running. If containerd is running, it will be used as the container runtime otherwise Docker is assumed.
-- Remote access to the Windows node via Remote Desktop Protocol (RDP) or Windows Remote Management (WinRM)
-- If you are using {{prodname}} BGP networking, the RemoteAccess service must be installed for the Windows BGP Router.
-- Windows nodes support only a single IP pool type (so, if using a VXLAN pool, you should only use VXLAN throughout the cluster).
-- TLS v1.2 enabled. For example:
-
-```powershell
-[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
-```
-
-### EKS requirements
-
-- The VPC controllers must be installed to run Windows pods.
-- An instance role on the Windows instance must have permissions to get `namespaces` and get `secrets` in the calico-system namespace (or kube-system namespace if you are using a non operator-managed {{prodname}} installation.)
-
-### AKS requirements
-
-- {{prodnameWindows}} can be enabled only on newly created clusters.
-- Available with Kubernetes version 1.20 or later
-
-## Next steps
-
-[Install Calico for Windows](standard.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/standard.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/standard.mdx
deleted file mode 100644
index 9ea217ed1a..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/standard.mdx
+++ /dev/null
@@ -1,310 +0,0 @@
----
-description: Install Calico for Windows to enable a workload-to-workload Zero Trust model that protects modern business and legacy applications.
----
-
-# Install Calico for Windows
-
-## Big picture
-
-Install {{prodnameWindows}} on Kubernetes clusters. The standard installation for {{prodnameWindows}} requires more time and expertise to configure. If you need to get started quickly, we recommend the [Quickstart](../quickstart.mdx).
-
-## Value
-
-Extend your Kubernetes deployment to Windows environments.
-
-## Before you begin
-
-**Required**
-
-- Install and configure [calicoctl](../../../../operations/calicoctl/index.mdx)
-- Linux and Windows nodes [meet requirements](requirements.mdx)
-- If using {{prodname}} networking, copy the kubeconfig file (used by kubelet) to each Windows node to the file, `c:\k\config`.
-- Download {{prodnameWindows}} and Kubernetes binaries to each Windows nodes to prepare for install:
-
- On each of your Windows nodes, download and run {{prodnameWindows}} installation scripts:
-
- ```
- Invoke-WebRequest {{calicoReleasesURL}}/{{releaseTitle}}/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1
- c:\install-calico-windows.ps1 -DownloadOnly yes -KubeVersion
- ```
-
- cd into `{{rootDirWindows}}`, you will see the calico-node.exe binary, install scripts, and other files.
-
-## How to
-
-Because the Kubernetes and {{prodname}} control components do not run on Windows yet, a hybrid Linux/Windows cluster is required. First you create a Linux cluster for {{prodname}} components, then you join Windows nodes to the Linux cluster.
-
-The geeky details of what you get by default:
-
-
-
-**Kubernetes**
-
-1. [Create a Linux cluster](#create-a-linux-cluster)
-1. [Ensure pods run on the correct nodes](#ensure-pods-run-on-the-correct-nodes)
-1. [Prepare Windows nodes to join the Linux cluster](#prepare-windows-nodes-to-join-the-linux-cluster)
-
-**{{prodname}}**
-
-1. [Install Calico on Linux control and worker nodes](#install-calico-on-linux-control-and-worker-nodes)
-1. [Install Calico and Kubernetes on Windows nodes](#install-calico-and-kubernetes-on-windows-nodes)
-
-### Create a Linux cluster
-
-There are many ways to create a Linux Kubernetes cluster. We regularly test {{prodnameWindows}} with `kubeadm`.
-
-### Ensure pods run on the correct nodes
-
-A primary issue of running a hybrid Kubernetes cluster is that many Kubernetes manifests do not specify a **node selector** to restrict where their pods can run. For example, `kubeadm` installs `kube-proxy` (Kubernetes per-host NAT daemon) using a DaemonSet that does not include a node selector. This means that the kube-proxy pod, which only supports Linux, will be scheduled to both Linux and Windows nodes. Services/pods that should run only on Linux nodes (such as the `kube-proxy` DaemonSet) should be started with a node selector to avoid attempting to schedule them to Windows nodes.
-
-To get around this for `kube-proxy`:
-
-1. Use `kubectl` to retrieve the DaemonSet.
-
- ```
- kubectl get ds kube-proxy -n kube-system -o yaml > kube-proxy.yaml
- ```
-
-1. Modify the `kube-proxy.yaml` file to include a node selector that selects only Linux nodes:
-
- ```yaml noValidation
- spec:
- template:
- ...
- spec:
- nodeSelector:
- kubernetes.io/os: linux
- containers:
- ```
-
-1. Apply the updated manifest.
-
- ```
- kubectl apply -f kube-proxy.yaml
- ```
-
-A similar change may be needed for other Kubernetes services (such as `kube-dns` or `core-dns`).
-
-### Prepare Windows nodes to join the Linux cluster
-
-On each Windows node, follow the steps below to configure `kubelet` and `kube-proxy` service.
-
-**Step 1: Configure kubelet**
-
-`kubelet` must be configured to use CNI networking by setting the following command line arguments, depending on the installed container runtime.
-
-For Docker:
-
-- `--network-plugin=cni`
-- `--cni-bin-dir=`
-- `--cni-conf-dir=`
-
-For containerd:
-
-- `--container-runtime=remote`
-- `--container-runtime-endpoint=npipe:////.//pipe//containerd-containerd`
-
-The CNI bin and conf dir settings are required by the {{prodname}} installer to install the CNI binaries and configuration file.
-
-:::note
-
-Among other parameters, the containerd configuration file includes options to configure the CNI bin and conf dirs.
-
-:::
-
-The following kubelet settings are also important:
-
-- `--hostname-override` can be set to $(hostname) to match {{prodname}}'s default. `kubelet` and {{prodname}} must agree on the host/nodename; if your network environment results in hostnames that vary over time you should set the hostname override to a static value per host and update {{prodname}}'s nodename accordingly.
-- `--node-ip` should be used to explicitly set the IP that kubelet reports to the API server for the node. We recommend setting this to the host's main network adapter's IP since we've seen kubelet incorrectly use an IP assigned to a HNS bridge device rather than the host's network adapter.
-- Because of a Windows networking limitation, if using {{prodname}} IPAM, --max-pods should be set to, at most, the IPAM block size of the IP pool in use minus 4:
-
- | **IP pool block size** | **Max pods** |
- | ---------------------- | -------------- |
- | /n | 2^/32-n^ - 4 |
- | /24 | 252 |
- | /25 | 124 |
- | /26 (default) | 60 |
- | /27 | 28 |
- | /28 | 12 |
- | /29 | 4 |
- | /30 or above | Cannot be used |
-
-In addition, it's important that `kubelet` is started after the vSwitch has been created, which happens when {{prodname}} initializes the dataplane. Otherwise, `kubelet` can be disconnected for the API server when the vSwitch is created.
-
-**AWS users**: If using the AWS cloud provider, you should add the following argument to the `kubelet`:
-
-`--hostname-override=` (and set the {{prodname}} nodename variable to match). In addition, you should add `KubernetesCluster=` as a tag when creating your Windows instance.
-
-**As a quickstart**, the {{prodname}} package includes a sample script at `{{rootDirWindows}}\kubernetes\kubelet-service.ps1` that:
-
-- Waits for {{prodname}} to initialise the vSwitch
-- Starts `kubelet` with
- - If containerd service is running, the following flags are set:
- - --container-runtime set to `remote`
- - --container-runtime-endpoint set to `npipe:////.//pipe//containerd-containerd`
- - Otherwise, the following flags are set for Docker:
- - --network-plugin set to `cni`
- - --cni-bin-dir set to `c:\k\cni`
- - --cni-conf-dir set to `c:\k\cni\config`
- - --pod-infra-container-image set to `kubeletwin/pause`
- - --kubeconfig set to the path of node kubeconfig file
- - --hostname-override set to match {{prodname}}'s nodename
- - --node-ip set to the IP of the default vEthernet device
- - --cluster-dns set to the IPs of the dns name servers
-
-See the README in the same directory for more details. Feel free to modify the script to adjust other `kubelet` parameters.
-
-:::note
-
-The script will pause at the first stage until {{prodname}} is installed by following the instructions in the next section.
-
-:::
-
-**Step 2: Configure kube-proxy**
-
-`kube-proxy` must be configured as follows:
-
-- With the correct HNS network name used by the active CNI plugin. kube-proxy reads the HNS network name from an environment variable KUBE_NETWORK
- - With default configuration, {{prodname}} uses network name "{{prodname}}"
-- For VXLAN, with the source VIP for the pod subnet allocated to the node. This is the IP that kube-proxy uses when it does SNAT for a NodePort. For {{prodname}}, the source VIP should be the second IP address in the subnet chosen for the host. For example, if {{prodname}} chooses an IP block 10.0.0.0/26 then the source VIP should be 10.0.0.2. The script below will automatically wait for the block to be chosen and configure kube-proxy accordingly.
-- For {{prodname}} policy to function correctly with Kubernetes services, the WinDSR feature gate must be enabled. This requires Windows Server build 17763.1432 or greater and Kubernetes v1.14 or greater. {{prodname}} will automatically enable the WinDSR feature gate if kubernetes services are managed by {{prodnameWindows}}.
-
-kube-proxy should be started via a script that waits for the Calico HNS network to be provisioned. The {{prodname}} package contains a suitable script for use with {{prodname}} networking at `{{rootDirWindows}}\kubernetes\kube-proxy-service.ps1`. The script:
-
-- Waits for {{prodname}} to initialise the vSwitch.
-- Calculates the correct source VIP for the local subnet.
-- Starts kube-proxy with the correct feature gates and hostname to work with {{prodname}}.
-
-See the README in the same directory for more details. Feel free to modify the script to
-adjust other kube-proxy parameters.
-
-The script will pause at the first stage until {{prodname}} is installed by following the instructions in the next section.
-
-### Install Calico on Linux control and worker nodes
-
-**If using {{prodname}} BGP networking**
-
-1. Disable the default {{prodname}} IP-in-IP networking (which is not compatible with Windows), by modifying the {{prodname}} manifest, and setting the `CALICO_IPV4POOL_IPIP` environment variable to "Never" before applying the manifest.
-
- If you do apply the manifest with the incorrect value, changing the manifest and re-applying will have no effect. To adjust the already-created IP pool:
-
- ```bash
- calicoctl get ippool -o yaml > ippool.yaml
- ```
-
- Then, modify ippool.yaml by setting the `ipipMode` to `Never` and then apply the updated manifest:
-
- ```bash
- calicoctl apply -f ippool.yaml
- ```
-
-**If using {{prodname}} VXLAN networking**
-
-1. Modify VXLAN as described in [Customize the manifests](../../self-managed-onprem/config-options.mdx) guide. Note the following:
-
- - Windows can support only a single type of IP pool so it is important that you use only a single VXLAN IP pool in this mode.
- - Windows supports only VXLAN on port 4789 and VSID ≥ 4096. {{prodname}}'s default (on Linux and Windows) is to use port 4789 and VSID 4096.
-
-1. Apply the manifest using `calicoctl`, and verify that you have a single pool with `VXLANMODE Always`.
-
- ```bash
- calicoctl get ippool -o wide
- ```
-
-1. For Linux control nodes using {{prodname}} networking, strict affinity must be set to `true`.
- This is required to prevent Linux nodes from borrowing IP addresses from Windows nodes:
- ```bash
- kubectl patch ipamconfigurations default --type merge --patch='{"spec": {"strictAffinity": true}}'
- ```
-
-### Install Calico and Kubernetes on Windows nodes
-
-Follow the steps below on each Windows node to install Kubernetes and {{prodname}}:
-
-**If using {{prodname}} BGP**
-
-Install the RemoteAccess service using the following PowerShell commands:
-
-```powershell
-Install-WindowsFeature RemoteAccess
-Install-WindowsFeature RSAT-RemoteAccess-PowerShell
-Install-WindowsFeature Routing
-```
-
-Then restart the computer:
-
-```powershell
-Restart-Computer -Force
-```
-
-before running:
-
-```powershell
-Install-RemoteAccess -VpnType RoutingOnly
-```
-
-Sometimes the remote access service fails to start automatically after install. To make sure it is running, execute the following command:
-
-```powershell
-Start-Service RemoteAccess
-```
-
-1. If using a non-{{prodname}} network plugin for networking, install and verify it now.
-2. Edit the install configuration file, `config.ps1` as follows:
-
- | **Set this variable...** | To... |
- | ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
- | $env:KUBE_NETWORK | CNI plugin you plan to use. For {{prodname}}, set the variable to `{{prodname}}.*` |
- | $env:CALICO_NETWORKING_BACKEND | `windows-bgp` `vxlan` or `none` (if using a non-{{prodname}} CNI plugin). |
- | $env:CNI\_ variables | Location of your Kubernetes installation. |
- | $env:K8S_SERVICE_CIDR | Your Kubernetes service cluster IP CIDR. |
- | $env:CALICO_DATASTORE_TYPE | {{prodname}} datastore you want to use. |
- | $env:KUBECONFIG | Location of the kubeconfig file {{prodname}} should use to access the Kubernetes API server. To set up a secure kubeconfig with the correct permissions for {{prodnameWindows}}, see [Create a kubeconfig](../kubeconfig.mdx) for {{prodnameWindows}}. |
- | $env:ETCD\_ parameters | etcd3 datastore parameters. **Note**: Because of a limitation of the Windows dataplane, a Kubernetes service ClusterIP cannot be used for the etcd endpoint (the host compartment cannot reach Kubernetes services). |
- | $env:NODENAME | Hostname used by kubelet. The default uses the node's hostname. **Note**: If you are using the sample kubelet start-up script from the {{prodname}} package, kubelet is started with a hostname override that forces it to use this value. |
- | | For AWS to work properly, kubelet should use the node's internal domain name for the AWS integration. |
-
-3. Run the installer.
-
- - Change directory to the location that you unpacked the archive. For example:
-
-```powershell
-cd {{rootDirWindows}}
-```
-
-- Run the install script:
-
-```
-.\install-calico.ps1
-```
-
-:::note
-
-The installer initializes the Windows vSwitch, which can cause a short connectivity outage as the networking stack is reconfigured. After running that command, you may need to:
-
-- Reconnect to your remote desktop session.
-- Restart `kubelet` and `kube-proxy` if they were already running.
-- If you haven't started `kubelet` and `kube-proxy` already, you should do so now. The quickstart scripts provided in the {{prodname}} package provide an easy way to do this. {{prodname}} requires `kubelet` to be running to complete its per-node configuration (since Kubelet creates the Kubernetes Node resource).
-
-:::
-
-:::note
-
- After you run the installer, do not move the directory because the service registration refers to the path of the directory.
-
-:::
-
-4. Verify that the {{prodname}} services are running.
-
- ```powershell
- Get-Service -Name CalicoNode
- Get-Service -Name CalicoFelix
- ```
-
-## Next steps
-
-- [Create a kubeconfig](../kubeconfig.mdx)
-- [Review network policy limitations in Windows](../limitations.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/limitations.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/limitations.mdx
deleted file mode 100644
index ce10cabfb1..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/limitations.mdx
+++ /dev/null
@@ -1,174 +0,0 @@
----
-description: Review limitations before starting installation.
----
-
-# Limitations and known issues
-
-## Calico for Windows feature limitations
-
-| Feature | |
-| ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| Distributions | **Supported:** - EKS - AKS - AWS - GCE - Azure - Kubernetes on-premises - Kubernetes on DigitalOcean - OpenShift - Rancher RKE
**Not supported**: - Operator install - Non-cluster hosts - Typha component for scaling (Linux-based feature) |
-| Networking | **Supported**: - Calico VXLAN, no cross-subnet or VXLAN MTU settings with [limitations](#{{prodname}}-vxlan-networking-limitations) - Calico non-overlay mode with BGP peering with [limitations](#{{prodname}}-bgp-networking-limitations) - IPv4
**Not supported**: - Overlay mode with BGP peering - IP in IP overlay with BGP routing - Cross-subnet support and MTU setting for VXLAN - IPv6 and dual stack - Service advertisement |
-| Security | **Not supported**: - Application Layer Policy (ALP) for Istio - Policy for hosts (host endpoints, including automatic host endpoints) - Encryption with WireGuard |
-| Operations | **Not supported**: - Calico node status |
-| Metrics | **Not supported**: Prometheus monitoring |
-| eBPF | **Not supported**: (Linux-based feature) |
-
-## {{prodname}} BGP networking limitations
-
-If you are using {{prodname}} with BGP, note these current limitations with Windows.
-
-| Feature | Limitation |
-| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| IP mobility/ borrowing | {{prodname}} IPAM allocates IPs to host in blocks for aggregation purposes. If the IP pool is full, nodes can also "borrow" IPs from another node's block. In BGP terms, the borrower then advertises a more specific "/32" route for the borrowed IP and traffic for that IP only is routed to the borrowing host.
Windows nodes do not support this borrowing mechanism; they will not borrow IPs even if the IP pool is full and they mark their blocks so that Linux nodes will not borrow from them. |
-| IPs reserved for Windows | {{prodname}} IPAM allocates IPs in CIDR blocks. Due to networking requirements on Windows, four IPs per Windows node-owned block must be reserved for internal purposes.
For example, with the default block size of /26, each block contains 64 IP addresses, 4 are reserved for Windows, leaving 60 for pod networking.
To reduce the impact of these reservations, a larger block size can be configured at the IP pool scope (before any pods are created). |
-| Single IP block per host | {{prodname}} IPAM is designed to allocate blocks of IPs (default size /26) to hosts on demand. While the {{prodname}} CNI plugin was written to do the same, kube-proxy currently only supports a single IP block per host.
To work around the default limit of one /26 per host there some options:
- With {{prodname}} BGP networking and the etcd datastore before creating any blocks, change the block size used by the IP pool so that it is sufficient for the largest number of Pods that are to be used on a single Windows host. - Use {{prodname}} BGP networking with the kubernetes datastore. In that mode, {{prodname}} IPAM is not used and the CNI host-local IPAM plugin is used with the node's Pod CIDR.
To allow multiple IPAM blocks per host (at the expense of kube-proxy compatibility), set the `windows_use_single_network` flag to `false` in the `cni.conf.template` before installing {{prodname}}. Changing that setting after pods are networked is not recommended because it may leak HNS endpoints. |
-| IP-in-IP overlay | {{prodname}}'s IPIP overlay mode cannot be used in clusters that contain Windows nodes because Windows does not support IP-in-IP. |
-| NATOutgoing | {{prodname}} IP pools support a "NAT outgoing" setting with the following behaviour:
- Traffic between {{prodname}} workloads (in any IP pools) is not NATted. - Traffic leaving the configured IP pools is NATted if the workload has an IP within an IP pool that has NAT outgoing enabled. {{prodnameWindows}} honors the above setting but it is only applied at pod creation time. If the IP pool configuration is updated after a pod is created, the pod's traffic will continue to be NATted (or not) as before. NAT policy for newly-networked pods will honor the new configuration. {{prodnameWindows}} automatically adds the host itself and its subnet to the NAT exclusion list. This behaviour can be disabled by setting flag `windows_disable_host_subnet_nat_exclusion` to `true` in `cni.conf.template` before running the install script. |
-| Service IP advertisement | This {{prodname}} feature is not supported on Windows. |
-
-### Check your network configuration
-
-If you are using a networking type that requires layer 2 reachability (such as {{prodname}} with a BGP mesh and no peering to your fabric), you can check that your network has layer 2 reachability as follows:
-
-On each of your nodes, check the IP network of the network adapter that you plan to use for pod networking. For example, on Linux, assuming your network adapter is eth0, you can run:
-
-```
-$ ip addr show eth0
- 2: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
-
- link/ether 00:0c:29:cb:c8:19 brd ff:ff:ff:ff:ff:ff
- inet 192.168.171.136/24 brd 192.168.171.255 scope
-
- global eth0
- valid_lft forever preferred_lft forever
- inet6 fe80::20c:29ff:fecb:c819/64 scope
- link
-
- valid_lft forever preferred_lft
- forever
-```
-
-In this case, the IPv4 is 192.168.171.136/24; which, after applying the /24 mask gives 192.168.171.0/24 for the IP network.
-
-Similarly, on Windows, you can run
-
-```
-PS C:\> ipconfig
-
-Windows IP Configuration
-
-Ethernet adapter vEthernet (Ethernet 2):
-
- Connection-specific DNS Suffix . :
- us-west-2.compute.internal Link-local IPv6 Address . . . .
- . : fe80::6d10:ccdd:bfbe:bce2%15 IPv4 Address. . . . . . .
- . . . . : 172.20.41.103 Subnet Mask . . . . . . . . . . .
- : 255.255.224.0 Default Gateway . . . . . . . . . :
- 172.20.32.1
-
-```
-
-In this case, the IPv4 address is 172.20.41.103 and the mask is represented as bytes 255.255.224.0 rather than CIDR notation. Applying the mask, we get a network address 172.20.32.0/19.
-
-Because the linux node has network 192.168.171.136/24 and the Windows node has a different network, 172.20.32.0/19, they are unlikely to be on the same layer 2 network.
-
-## {{prodname}} VXLAN networking limitations
-
-Because of differences between the Linux and Windows dataplane feature sets, the following {{prodname}} features are not supported on Windows.
-
-| Feature | Limitation |
-| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| IPs reserved for Windows | {{prodname}} IPAM allocates IPs in CIDR blocks. Due to networking requirements on Windows, four IPs per Windows node-owned block must be reserved for internal purposes.
For example, with the default block size of /26, each block contains 64 IP addresses, 4 are reserved for Windows, leaving 60 for pod networking.
To reduce the impact of these reservations, a larger block size can be configured at the IP pool scope (before any pods are created). |
-| Single IP block per host | {{prodname}} IPAM is designed to allocate blocks of IPs (default size /26) to hosts on demand. While the {{prodname}} CNI plugin was written to do the same, kube-proxy currently only supports a single IP block per host. To allow multiple IPAM blocks per host (at the expense of kube-proxy compatibility), set the `windows_use_single_network` flag to `false` in the `cni.conf.template` before installing {{prodname}}. Changing that setting after pods are networked is not recommended because it may leak HNS endpoints. |
-
-## Routes are lost in cloud providers
-
-If you create a Windows host with a cloud provider (AWS for example), the creation of the vSwitch at {{prodname}} install time can remove the cloud provider's metadata route. If your application relies on the metadata service, you may need to examine the routing table before and after installing {{prodname}} to reinstate any lost routes.
-
-## VXLAN limitations
-
-**VXLAN support**
-
-- Windows 1903 build 18317 and above
-- Windows 1809 build 17763 and above
-
-**Configuration updates**
-
-Certain configuration changes will not be honored after the first pod is networked. This is because Windows does not currently support updating the VXLAN subnet parameters after the network is created so updating those parameters requires the node to be drained:
-
-One example is the VXLAN VNI setting. To change such parameters:
-
-- Drain the node of all pods
-- Delete the {{prodname}} HNS network:
-
- ```powershell
- Import-Module -DisableNameChecking {{rootDirWindows}}\libs\hns\hns.psm1
- Get-HNSNetwork | ? Name -EQ "{{prodname}}" | Remove-HNSNetwork
- ```
-
-- Update the configuration in `config.ps1`, run `uninstall-calico.ps1` and then `install-calico.ps1` to regenerate the CNI configuration.
-
-## Pod-to-pod connections are dropped with TCP reset packets
-
-Restarting Felix or changes to policy (including changes to endpoints referred to in policy), can cause pod-to-pod connections to be dropped with TCP reset packets. When one of the following occurs:
-
-- The policy that applies to a pod is updated
-- Some ingress or egress policy that applies to a pod contains selectors and the set of endpoints that those selectors match changes
-
-Felix must reprogram the HNS ACL policy attached to the pod. This reprogramming can cause TCP resets. Microsoft has confirmed this is a HNS issue, and they are investigating.
-
-## Service ClusterIPs incompatible with selectors/pod IPs in network policy
-
-**Windows 1809 prior to build 17763.1432**
-
-On Windows nodes, kube-proxy unconditionally applies source NAT to traffic from local pods to service ClusterIPs. This means that, at the destination pod, where policy is applied, the traffic appears to come from the source host rather than the source pod. In turn, this means that a network policy with a source selector matching the source pod will not match the expected traffic.
-
-## Network policy and using selectors
-
-Under certain conditions, relatively simple {{prodname}} policies can require significant Windows dataplane resources, that can cause significant CPU and memory usage, and large policy programming latency.
-
-We recommend avoiding policies that contain rules with both a source and destination selector. The following is an example of a policy that would be inefficient. The policy applies to all workloads, and it only allows traffic from workloads labeled as clients to workloads labeled as servers:
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: calico-dest-selector
-spec:
- selector: all()
- order: 500
- ingress:
- - action: Allow
- destination:
- selector: role == "webserver"
- source:
- selector: role == "client"
-```
-
-Because the policy applies to all workloads, it will be rendered once per workload (even if the workload is not labeled as a server), and then the selectors will be expanded into many individual dataplane rules to capture the allowed connectivity.
-
-Here is a much more efficient policy that still allows the same traffic:
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: calico-dest-selector
-spec:
- selector: role == "webserver"
- order: 500
- ingress:
- - action: Allow
- source:
- selector: role == "client"
-```
-
-The destination selector is moved into the policy selector, so this policy is only rendered for workloads that have the `role: webserver` label. In addition, the rule is simplified so that it only matches on the source of the traffic. Depending on the number of webserver pods, this change can reduce the dataplane resource usage by several orders of magnitude.
-
-## Next steps
-
-- [Quickstart](quickstart.mdx)
-- [Standard install](kubernetes/standard.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/maintain.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/maintain.mdx
deleted file mode 100644
index b607d8441f..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/maintain.mdx
+++ /dev/null
@@ -1,65 +0,0 @@
----
-description: Tasks to manage Calico services and uninstall Calico for Windows.
----
-
-# Start and stop Calico for Windows services
-
-## Big picture
-
-Start, stop, and update {{prodnameWindows}} services on the Linux control plane node, and uninstall for {{prodnameWindows}}.
-
-## How to
-
-### Start and stop {{prodnameWindows}} services
-
-- Install and boot {{prodnameWindows}}: `install-calico.ps1`
-- Start {{prodnameWindows}} services:`start-calico.ps1`
-- Stop {{prodnameWindows}} services: `stop-calico.ps1`
-
-### Update {{prodname}} services
-
-To change the parameters defined in `config.ps1`:
-
-- Run `uninstall-calico.ps1` to remove {{prodnameWindows}} service configuration
-- Modify the configuration
-- Run `install-calico.ps1`to reinstall {{prodnameWindows}}.
-
-Because `config.ps1` is imported by the various component startup scripts, additional environment variables can be added, as documented in the [{{prodname}} reference guide](../../../reference/index.mdx).
-
-### Update service wrapper configuration
-
-The `nssm` command supports changing a number of configuration options for the {{prodname}} services. For example, to adjust the maximum size of the Felix log file before it is rotated:
-
-```powershell
-nssm set CalicoFelix AppRotateBytes 1048576
-```
-
-### Uninstall {{prodnameWindows}} from Windows nodes
-
-The following steps removes {{prodnameWindows}} (for example to change configuration), but keeps the cluster running.
-
-1. Remove all pods from the Windows nodes.
-1. On each Windows node, run the uninstall script:
-
- ```powershell
- {{rootDirWindows}}\uninstall-calico.ps1
- ```
-
- :::note
-
- If you are uninstalling to change configuration, make sure that you run the uninstall script with the old configuration file.
-
- :::
-
-### Uninstall kubelet and kube-proxy services from Windows nodes
-
-The following steps uninstall kubelet/kube-proxy services if they were installed by running `{{rootDirWindows}}\kubernetes\install-kube-services.ps1`.
-
-1. Remove all pods from the Windows nodes.
-1. On each Windows node, run the uninstall script:
-
- ```
- {{rootDirWindows}}\kubernetes\uninstall-kube-services.ps1
- ```
-
-1. If desired, delete the `{{rootDirWindows}}` directory.
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/openshift-installation.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/openshift-installation.mdx
deleted file mode 100644
index 216188667d..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/openshift-installation.mdx
+++ /dev/null
@@ -1,339 +0,0 @@
----
-description: Install Calico on an OpenShift 4 cluster on Windows nodes
----
-
-# Install an OpenShift 4 cluster on Windows nodes
-
-:::note
-
-Currently, {{prodnameWindows}} supports Openshift versions only up to v4.5 because it requires the Windows Machine Config Bootstrapper binary (wmcb.exe) for adding Windows nodes to clusters. OpenShift v4.6+ does not support the Windows Machine Config Bootstrapper binary and uses the Red Hat Windows Machine Config Operator (WMCO), which does not correctly recognize {{prodname}} networking in the cluster.
-
-:::
-
-## Big picture
-
-Install an OpenShift 4 cluster on AWS with {{prodname}} on Windows nodes.
-
-## Value
-
-Run Windows workloads on OpenShift 4 with {{prodname}}.
-
-## How to
-
-### Before you begin
-
-- Ensure that your environment meets the {{prodname}} [system requirements](../openshift/requirements.mdx).
-
-- Ensure that you have [configured an AWS account](https://docs.openshift.com/container-platform/4.4/installing/installing_aws/installing-aws-account.html) appropriate for OpenShift 4,
- and have [set up your AWS credentials](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/setup-credentials.html).
- Note that the OpenShift installer supports a subset of [AWS regions](https://docs.openshift.com/container-platform/4.4/installing/installing_aws/installing-aws-account.html#installation-aws-regions_installing-aws-account).
-
-- Ensure that you have a [RedHat account](https://cloud.redhat.com/). A RedHat account is required to obtain the pull secret necessary to provision an OpenShift cluster.
-
-- Ensure that you have installed the OpenShift installer **v4.4 or later** and OpenShift command line interface from [cloud.redhat.com](https://cloud.redhat.com/openshift/install/aws/installer-provisioned).
-
-- Ensure that you have [generated a local SSH private key](https://docs.openshift.com/container-platform/4.4/installing/installing_aws/installing-aws-default.html#ssh-agent-using_installing-aws-default) and have added it to your ssh-agent
-
-**Limitations**
-
-Due to an [upstream issue](https://bugzilla.redhat.com/show_bug.cgi?id=1768858), Windows pods can only be run in specific namespaces if you disable SCC.
-To do this, label the namespace with `openshift.io/run-level: "1"`.
-
-### Create a configuration file for the OpenShift installer
-
-First, create a staging directory for the installation. This directory will contain the configuration file, along with cluster state files, that OpenShift installer will create:
-
-```bash
-mkdir openshift-tigera-install && cd openshift-tigera-install
-```
-
-Now run OpenShift installer to create a default configuration file:
-
-```bash
-openshift-install create install-config
-```
-
-:::note
-
-Refer to the [OpenShift installer documentation](https://cloud.redhat.com/openshift/install) for more information
-about the installer and any configuration changes required for your platform.
-
-:::
-
-Once the installer has finished, your staging directory will contain the configuration file `install-config.yaml`.
-
-### Update the configuration file to use {{prodname}}
-
-Override the OpenShift networking to use Calico and update the AWS instance types to meet the [system requirements](../openshift/requirements.mdx):
-
-```bash
-sed -i 's/\(OpenShiftSDN\|OVNKubernetes\)/Calico/' install-config.yaml
-```
-
-### Generate the install manifests
-
-Now generate the Kubernetes manifests using your configuration file:
-
-```bash
-openshift-install create manifests
-```
-
-
-
-### Configure VXLAN
-
-Edit the Installation custom resource manifest `manifests/01-cr-installation.yaml` so that it configures an OpenShift {{prodname}} cluster with VXLAN enabled and BGP disabled:
-
-```yaml
-apiVersion: operator.tigera.io/v1
-kind: Installation
-metadata:
- name: default
-spec:
- variant: Calico
- calicoNetwork:
- bgp: Disabled
- ipPools:
- - blockSize: 26
- cidr: 10.128.0.0/14
- encapsulation: VXLAN
- natOutgoing: Enabled
- nodeSelector: all()
-```
-
-### Create the cluster
-
-Start the cluster creation with the following command and wait for it to complete.
-
-```bash
-openshift-install create cluster
-```
-
-Once the above command is complete, you can verify {{prodname}} is installed by verifying the components are available with the following command.
-
-```bash
-oc get tigerastatus
-```
-
-:::note
-
-To get more information, add `-o yaml` to the above command.
-
-:::
-
-Next, [install calicoctl](../../../operations/calicoctl/install.mdx) and ensure strict affinity is true:
-
-```bash
-kubectl patch ipamconfigurations default --type merge --patch='{"spec": {"strictAffinity": true}}'
-```
-
-### Add Windows nodes to the cluster
-
-Download the latest [Windows Node Installer (WNI)](https://github.com/openshift/windows-machine-config-bootstrapper/releases) binary `wni` that matches your OpenShift minor version.
-
-:::note
-
-For OpenShift 4.6, use the latest wni for OpenShift 4.5. A wni binary for OpenShift 4.6 is not published yet.
-
-:::
-
-Next, determine the AMI id corresponding to Windows Server 1903 (build 18317) or greater. `wni` defaults to using Windows Server 2019 (build 10.0.17763) which does not include WinDSR support.
-One way to do this is by searching for AMI's matching the string `Windows_Server-1903-English-Core-ContainersLatest` in the Amazon EC2 console
-
-Next, run `wni` to add a Windows node to your cluster. Replace AMI_ID, AWS_CREDENTIALS_PATH, AWS_KEY_NAME and AWS_PRIVATE_KEY_PATH with your values:
-
-```bash
-chmod u+x wni
-./wni aws create \
- --image-id AMI_ID \
- --kubeconfig openshift-tigera-install/auth/kubeconfig \
- --credentials AWS_CREDENTIALS_PATH \
- --credential-account default \
- --instance-type m5a.large \
- --ssh-key AWS_KEY_NAME \
- --private-key AWS_PRIVATE_KEY_PATH
-```
-
-An example of running the above steps:
-
-```
-$ chmod u+x wni
-$ ./wni aws create \
-> --kubeconfig openshift-tigera-install/auth/kubeconfig \
-> --credentials ~/.aws/credentials \
-> --credential-account default \
-> --instance-type m5a.large \
-> --ssh-key test-key \
-> --private-key /home/user/.ssh/test-key.pem
-2020/10/05 12:52:51 kubeconfig source: /home/user/openshift-tigera-install/auth/kubeconfig
-2020/10/05 12:52:59 Added rule with port 5986 to the security groups of your local IP
-2020/10/05 12:52:59 Added rule with port 22 to the security groups of your local IP
-2020/10/05 12:52:59 Added rule with port 3389 to the security groups of your local IP
-2020/10/05 12:52:59 Using existing Security Group: sg-06d1de22807d5dc48
-2020/10/05 12:57:30 External IP: 52.35.12.231
-2020/10/05 12:57:30 Internal IP: 10.0.90.193
-```
-
-### Get the administrator password
-
-The `wni` binary writes the instance details to the file `windows-node-installer.json`. An example of the file:
-
-```
-{"InstanceIDs":["i-02e13d4cc76c13c83"],"SecurityGroupIDs":["sg-0a777565d64e1d2ef"]}
-```
-
-Use the instance ID from the file and the path of the private key used to create the instance to get the Administrator user's password:
-
-```bash
-aws ec2 get-password-data --instance-id --priv-launch-key
-```
-
-### Install {{prodnameWindows}}
-
-1. Remote into the Windows node, open a Powershell window, and prepare the directory for Kubernetes files.
-
- ```powershell
- mkdir c:\k
- ```
-
-1. Copy the Kubernetes kubeconfig file (default location: openshift-tigera-install/auth/kubeconfig), to the file **c:\k\config**.
-
-1. Download the powershell script, **install-calico-windows.ps1**.
-
- ```powershell
- Invoke-WebRequest {{calicoReleasesURL}}/{{releaseTitle}}/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1
- ```
-
-1. Run the installation script, replacing the Kubernetes version with the version corresponding to your version of OpenShift.
-
- ```powershell
- c:\install-calico-windows.ps1 -KubeVersion -ServiceCidr 172.30.0.0/16 -DNSServerIPs 172.30.0.10
- ```
-
- :::note
-
- Get the Kubernetes version with `oc version` and use only the major, minor, and patch version numbers. For example from a cluster that returns:
-
- ```
- $ oc version
- Client Version: 4.5.3
- Server Version: 4.5.14
- Kubernetes Version: v1.18.3+5302882
- ```
-
- You will use `1.18.3`:
-
- :::
-
-1. Install and start kube-proxy service. Execute following powershell script/commands.
-
- ```powershell
- C:\CalicoWindows\kubernetes\install-kube-services.ps1 -service kube-proxy
- Start-Service -Name kube-proxy
- ```
-
-1. Verify kube-proxy service is running.
-
- ```powershell
- Get-Service -Name kube-proxy
- ```
-
-### Configure kubelet
-
-From the Windows node, download the Windows Machine Config Bootstrapper `wmcb.exe` that matches your OpenShift minor version from [Windows Machine Config Bootstrapper releases](https://github.com/openshift/windows-machine-config-bootstrapper/releases). For example, for OpenShift 4.5.x:
-
-```powershell
-curl https://github.com/openshift/windows-machine-config-bootstrapper/releases/download/v4.5.2-alpha/wmcb.exe -o c:\wmcb.exe
-```
-
-:::note
-
-For OpenShift 4.6, use the latest wmcb.exe for OpenShift 4.5. A wmcb.exe binary for OpenShift 4.6 is not published yet.
-
-:::
-
-Next, we will download the `worker.ign` file from the API server:
-
-```powershell
-$apiServer = c:\k\kubectl --kubeconfig c:\k\config get po -n openshift-kube-apiserver -l apiserver=true --no-headers -o custom-columns=":metadata.name" | select -first 1
-c:\k\kubectl --kubeconfig c:\k\config -n openshift-kube-apiserver exec $apiserver -- curl -ks https://localhost:22623/config/worker > c:\worker.ign
-((Get-Content c:\worker.ign) -join "`n") + "`n" | Set-Content -NoNewline c:\worker.ign
-```
-
-Next, we run wmcb to configure the kubelet:
-
-```powershell
-c:\wmcb.exe initialize-kubelet --ignition-file worker.ign --kubelet-path c:\k\kubelet.exe
-```
-
-:::note
-
-The kubelet configuration installed by Windows Machine Config
-Bootstrapper includes `--register-with-taints="os=Windows:NoSchedule"` which
-will require Windows pods to tolerate that taint.
-
-:::
-
-Next, we make a copy of the kubeconfig because `wmcb.exe` expects the kubeconfig to be the file `c:\k\kubeconfig`.
-Then we configure kubelet to use Calico CNI:
-
-```powershell
-cp c:\k\config c:\k\kubeconfig
-c:\wmcb.exe configure-cni --cni-dir c:\k\cni --cni-config c:\k\cni\config\10-calico.conf
-```
-
-Finally, clean up the additional files created on the Windows node:
-
-```powershell
-rm c:\k\kubeconfig,c:\wmcb.exe,c:\worker.ign
-```
-
-Exit the remote session to the Windows node and return to a shell to a Linux
-node.
-
-We need to approve the CSR's generated by the kubelet's bootstrapping process. First, view the pending CSR's:
-
-```bash
-oc get csr
-```
-
-For example:
-
-```
-$ oc get csr
-NAME AGE SIGNERNAME REQUESTOR CONDITION
-csr-55brx 4m32s kubernetes.io/kube-apiserver-client-kubelet system:admin Approved,Issued
-csr-bmnfd 4m30s kubernetes.io/kubelet-serving system:node:ip-10-0-45-102.us-west-2.compute.internal Pending
-csr-hwl89 5m1s kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending
-```
-
-To approve the pending CSR's:
-
-```bash
-oc get csr -o name | xargs oc adm certificate approve
-```
-
-For example:
-
-```
-$ oc get csr -o name | xargs oc adm certificate approve
-certificatesigningrequest.certificates.k8s.io/csr-55brx approved
-certificatesigningrequest.certificates.k8s.io/csr-bmnfd approved
-certificatesigningrequest.certificates.k8s.io/csr-hwl89 approved
-```
-
-Finally, wait a minute or so and get all nodes:
-
-```
-$ oc get node -owide
-```
-
-If the Windows node registered itself successfully, it should appear in the list with a Ready status, ready to run Windows pods!
-
-## Next steps
-
-**Recommended - Security**
-
-- [Secure Calico component communications](../../../network-policy/comms/crypto-auth.mdx)
-- [Secure pods with Calico network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx)
-- If you are using {{prodname}} with Istio service mesh, get started here: [Enable application layer policy](../../../network-policy/istio/app-layer-policy.mdx)
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/quickstart.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/quickstart.mdx
deleted file mode 100644
index 49d7c3e5aa..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/quickstart.mdx
+++ /dev/null
@@ -1,525 +0,0 @@
----
-description: Install Calico for Windows on a Kubernetes cluster for testing or development.
----
-
-# Quickstart
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Install {{prodnameWindows}} on your Kubernetes cluster in approximately 5 minutes.
-
-## Concepts
-
-{{prodnameWindows}} is a hybrid implementation that requires a Linux cluster for {{prodname}} components and Linux workloads, and Windows nodes for Windows workloads.
-
-## Before you begin
-
-Review the [Linux requirements](../requirements.mdx) and the [{{prodnameWindows}} requirements](kubernetes/requirements.mdx).
-
-Before beginning the quickstart, setup a {{prodname}} cluster on Linux nodes and provision Windows machines.
-
-## How to
-
-- [Configure strict affinity for clusters using {{prodname}} networking](#configure-strict-affinity-for-clusters-using-calico-networking)
-- [Install {{prodnameWindows}} manually](#install-calico-for-windows-manually)
-- [Install {{prodnameWindows}} using HostProcess containers](#install-calico-for-windows-using-hostprocess-containers)
-- [Configure installation parameters](#configure-installation-parameters)
-
-### Configure strict affinity for clusters using {{prodname}} networking
-
-For Linux control nodes using {{prodname}} networking, strict affinity must be set to `true`.
-This is required to prevent Linux nodes from borrowing IP addresses from Windows nodes:
-
-```bash
-kubectl patch ipamconfigurations default --type merge --patch='{"spec": {"strictAffinity": true}}'
-```
-
-:::note
-
-If the above command failed to find ipamconfigurations resource, you need to install Calico API server. Please refer to [installing the Calico API server](../../../operations/install-apiserver.mdx).
-
-:::
-
-### Install {{prodnameWindows}} manually
-
-The following steps install a Kubernetes cluster on a single Windows node with a Linux control node.
-
-- **Kubernetes VXLAN**
-
- The geeky details of what you get by default:
-
-
-
-- **Kubernetes BGP**
-
- The geeky details of what you get by default:
-
-
-
-- **EKS**
-
- The geeky details of what you get by default:
-
-
-
-- **AKS**
-
- The geeky details of what you get by default:
-
-
-
-
-
-
-1. Ensure that BGP is disabled since you're using VXLAN.
- If you installed Calico using operator, you can do this by:
-
- ```bash
- kubectl patch installation default --type=merge -p '{"spec": {"calicoNetwork": {"bgp": "Disabled"}}}'
- ```
-
- If you installed Calico using the manifest then BGP is already disabled.
-
-1. Prepare the directory for Kubernetes files on Windows node.
-
- ```powershell
- mkdir c:\k
- ```
-
-1. Copy the Kubernetes kubeconfig file from the control plane node (default, Location $HOME/.kube/config), to **c:\k\config**.
-
-1. Download the PowerShell script, **install-calico-windows.ps1**.
-
- ```powershell
- Invoke-WebRequest {{calicoReleasesURL}}/{{releaseTitle}}/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1
- ```
-
-1. Install {{prodnameWindows}} for your datastore with using the default parameters or [customize installation parameters](#configure-installation-parameters).
- The PowerShell script downloads {{prodnameWindows}} release binary, Kubernetes binaries, Windows utilities files, configures {{prodnameWindows}}, and starts the Calico service.
-
- **Kubernetes datastore (default)**
-
- ```powershell
- c:\install-calico-windows.ps1 -KubeVersion `
- -ServiceCidr `
- -DNSServerIPs
- ```
-
- **etcd datastore**
-
- ```powershell
- c:\install-calico-windows.ps1 -KubeVersion `
- -Datastore etcdv3 `
- -EtcdEndpoints `
- -EtcdTlsSecretName (default no etcd TLS secret is used) `
- -EtcdKey (default not using TLS) `
- -EtcdCert (default not using TLS) `
- -EtcdCaCert (default not using TLS) `
- -ServiceCidr `
- -DNSServerIPs
- ```
-
- :::note
-
- - You do not need to pass a parameter if the default value of the parameter is correct for your cluster.
- - If your Windows nodes have multiple network adapters, you can configure the one used for VXLAN by editing `VXLAN_ADAPTER` in `{{rootDirWindows}}\config.ps1`, then restarting {{prodnameWindows}}.
-
- :::
-
-1. Verify that the {{prodname}} services are running.
-
- ```powershell
- Get-Service -Name CalicoNode
- Get-Service -Name CalicoFelix
- ```
-
-1. Install and start kubelet/kube-proxy service. Execute following PowerShell script/commands.
-
- ```powershell
- {{rootDirWindows}}\kubernetes\install-kube-services.ps1
- Start-Service -Name kubelet
- Start-Service -Name kube-proxy
- ```
-
-1. Verify kubelet/kube-proxy services are running.
-
- ```powershell
- Get-Service -Name kubelet
- Get-Service -Name kube-proxy
- ```
-
-
-
-
-1. Enable BGP service on Windows node (instead of VXLAN).
- Install the RemoteAccess service using the following Powershell commands:
-
- ```powershell
- Install-WindowsFeature RemoteAccess
- Install-WindowsFeature RSAT-RemoteAccess-PowerShell
- Install-WindowsFeature Routing
- ```
-
- Then restart the computer:
-
- ```powershell
- Restart-Computer -Force
- ```
-
- before running:
-
- ```powershell
- Install-RemoteAccess -VpnType RoutingOnly
- ```
-
- Sometimes the remote access service fails to start automatically after install. To make sure it is running, execute the following command:
-
- ```powershell
- Start-Service RemoteAccess
- ```
-
-1. Prepare the directory for Kubernetes files on Windows node.
-
- ```powershell
- mkdir c:\k
- ```
-
-1. Copy the Kubernetes kubeconfig file from the control plane node (default, Location $HOME/.kube/config), to **c:\k\config**.
-
-1. Download the PowerShell script, **install-calico-windows.ps1**.
-
- ```powershell
- Invoke-WebRequest {{calicoReleasesURL}}/{{releaseTitle}}/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1
- ```
-
-1. Install {{prodnameWindows}} for your datastore with using the default parameters or [customize installation parameters](#configure-installation-parameters).
- The PowerShell script downloads {{prodnameWindows}} release binary, Kubernetes binaries, Windows utilities files, configures {{prodnameWindows}}, and starts the Calico service.
-
- You do not need to pass a parameter if the default value of the parameter is correct for your cluster.
-
- **Kubernetes datastore (default)**
-
- ```powershell
- c:\install-calico-windows.ps1 -KubeVersion `
- -ServiceCidr `
- -DNSServerIPs
- ```
-
- **etcd datastore**
-
- ```powershell
- c:\install-calico-windows.ps1 -KubeVersion `
- -Datastore etcdv3 `
- -EtcdEndpoints `
- -EtcdTlsSecretName (default no etcd TLS secret is used) `
- -EtcdKey (default not using TLS) `
- -EtcdCert (default not using TLS) `
- -EtcdCaCert (default not using TLS) `
- -ServiceCidr `
- -DNSServerIPs
- ```
-
- :::note
-
- You do not need to pass a parameter if the default value of the parameter is correct for your cluster.
-
- :::
-
-1. Verify that the {{prodname}} services are running.
-
- ```powershell
- Get-Service -Name CalicoNode
- Get-Service -Name CalicoFelix
- ```
-
-1. Install and start kubelet/kube-proxy service. Execute following PowerShell script/commands.
-
- ```powershell
- {{rootDirWindows}}\kubernetes\install-kube-services.ps1
- Start-Service -Name kubelet
- Start-Service -Name kube-proxy
- ```
-
-1. Verify kubelet/kube-proxy services are running.
-
- ```powershell
- Get-Service -Name kubelet
- Get-Service -Name kube-proxy
- ```
-
-
-
-
-1. Ensure that a Windows instance role has permissions to get `namespaces` and to get `secrets` in the calico-system namespace (or kube-system namespace if you are using a non operator-managed {{prodname}} installation.)
- One way to do this is by running the following commands to install the required permissions temporarily. Before running the commands, replace `` with the Kubernetes node name of the EKS Windows node, for example `ip-192-168-42-34.us-west-2.compute.internal`.
- :::note
-
- If you are using a non operator-managed {{prodname}} installation, replace the namespace `calico-system` with `kube-system` in the commands below.
-
- :::
-
- ```bash
- kubectl create clusterrole calico-install-ns --verb=get --resource=namespace
- kubectl create clusterrolebinding calico-install-ns --clusterrole=calico-install-ns --user=system:node:
- kubectl create role calico-install-token --verb=get,list --resource=secrets --namespace calico-system
- kubectl create rolebinding calico-install-token --role=calico-install-token --user=system:node: --namespace calico-system
- ```
-
-1. Prepare the directory for Kubernetes files on the Windows node.
-
- ```powershell
- mkdir c:\k
- ```
-
-1. [Install kubectl](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html#windows) and move the kubectl binary to **c:\k**.
-
-1. Download the PowerShell script, **install-calico-windows.ps1**.
-
- ```powershell
- Invoke-WebRequest {{calicoReleasesURL}}/{{releaseTitle}}/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1
- ```
-
-1. Install {{prodnameWindows}} for your datastore with using the default parameters or [customize installation parameters](#configure-installation-parameters).
- The PowerShell script downloads {{prodnameWindows}} release binary, Kubernetes binaries, Windows utilities files, configures {{prodnameWindows}}, and starts the Calico service.
-
- You do not need to pass a parameter if the default value of the parameter is correct for your cluster.
-
- **Kubernetes datastore (default)**
-
- ```powershell
- c:\install-calico-windows.ps1 -ServiceCidr `
- -DNSServerIPs
- ```
-
- **etcd datastore**
-
- ```powershell
- c:\install-calico-windows.ps1 -Datastore etcdv3 `
- -EtcdEndpoints `
- -ServiceCidr `
- -DNSServerIPs
- ```
-
- :::note
-
- You do not need to pass a parameter if the default value of the parameter is correct for your cluster.
-
- :::
-
-1. Verify that the {{prodname}} services are running.
-
- ```powershell
- Get-Service -Name CalicoNode
- Get-Service -Name CalicoFelix
- ```
-
-1. Verify kubelet and kube-proxy services are running.
-
- ```powershell
- Get-Service -Name kubelet
- Get-Service -Name kube-proxy
- ```
-
-1. If you installed temporary RBAC in the first step, remove the permissions by running the following commands.
- :::note
-
- If you are using a non operator-managed {{prodname}} installation, replace the namespace `calico-system` with `kube-system` in the commands below.
-
- :::
-
- ```bash
- kubectl delete clusterrolebinding calico-install-ns
- kubectl delete clusterrole calico-install-ns
- kubectl delete rolebinding calico-install-token --namespace calico-system
- kubectl delete role calico-install-token --namespace calico-system
- ```
-
-
-
-
-1. Register the `EnableAKSWindowsCalico` feature flag with the following Azure CLI command.
-
- ```bash
- az feature register --namespace "Microsoft.ContainerService" --name "EnableAKSWindowsCalico"
- ```
-
-1. Wait until the `EnableAKSWindowsCalico` feature flag is registered successfully. Execute following CLI command to get current status of the feature.
-
- ```bash
- az feature list -o table --query "[?contains(name, 'Microsoft.ContainerService/EnableAKSWindowsCalico')].{Name:name,State:properties.state}"
- ```
-
- Move to next step if the output from above command matches the following output.
-
- ```bash
- Name State
- ------------------------------------------------- ----------
- Microsoft.ContainerService/EnableAKSWindowsCalico Registered
- ```
-
-1. Refresh the registration of the `Microsoft.ContainerService` resource provider. Run the following command.
-
- ```bash
- az provider register --namespace Microsoft.ContainerService
- ```
-
-1. Create the AKS cluster with these settings: `network-plugin` to `azure`, and `network-policy` to `calico`. For example,
-
- ```bash
- az group create -n $your-resource-group -l $your-region
- az aks create \
- --resource-group $your-resource-group \
- --name $your-cluster-name \
- --node-count 1 \
- --enable-addons monitoring \
- --windows-admin-username azureuser \
- --windows-admin-password $your-windows-password \
- --kubernetes-version 1.20.2 \
- --vm-set-type VirtualMachineScaleSets \
- --service-principal $your-service-principal \
- --client-secret $your-client-secret \
- --load-balancer-sku standard \
- --node-vm-size Standard_D2s_v3 \
- --network-plugin azure \
- --network-policy calico
- ```
-
-1. Add a Windows node pool. For example,
-
- ```bash
- az aks nodepool add \
- --resource-group $your-resource-group \
- --cluster-name $your-cluster-name \
- --os-type Windows \
- --name $your-windows-node-pool-name \
- --node-count 1 \
- --kubernetes-version 1.20.2 \
- --node-vm-size Standard_D2s_v3
- ```
-
-
-
-
-Congratulations! You now have a Kubernetes cluster with {{prodnameWindows}} and a Linux control node.
-
-### Install {{prodnameWindows}} using HostProcess containers
-
-:::note
-
-This installation method is a tech preview and should not be used for production clusters. Upgrades from a tech preview version of this
-installation method to the GA version might not be seamless.
-
-:::
-
-With Kubernetes v1.22, a new Windows container type called "HostProcess containers" can run directly on the host with access to the host network namespace,
-storage, and devices. With this feature, {{prodnameWindows}} can now be installed and managed using Kubernetes resources such as Daemonsets and ConfigMaps,
-instead of needing to configure and install {{prodnameWindows}} manually on each node. Using this installation method, the {{prodnameWindows}}
-services are no longer registered on the host. Instead, the services are run directly within HostProcess containers.
-
-#### Requirements
-
-In addition to the [{{prodnameWindows}} requirements](kubernetes/requirements.mdx),
-this installation method has [additional requirements](https://kubernetes.io/docs/tasks/configure-pod-container/create-hostprocess-pod/):
-
-- Kubernetes v1.22+
-- HostProcess containers support enabled: for v1.22, HostProcess containers support has to be [enabled](https://kubernetes.io/docs/tasks/configure-pod-container/create-hostprocess-pod/#before-you-begin-version-check). For Kubernetes v1.23+, HostProcess containers are enabled by default.
-- ContainerD 1.6.0+
-- The Windows nodes have joined the cluster
-
-To install ContainerD on the Windows node and configure the ContainerD service:
-
-```powershell
-Invoke-WebRequest {{tmpScriptsURL}}/scripts/Install-Containerd.ps1 -OutFile c:\Install-Containerd.ps1
-c:\Install-Containerd.ps1 -ContainerDVersion 1.6.2 -CNIConfigPath "c:/etc/cni/net.d" -CNIBinPath "c:/opt/cni/bin"
-```
-
-If you have an existing {{prodnameWindows}} installation using the manual method, your Windows nodes may have already joined the cluster.
-
-To join a Windows node to a cluster provisioned with kubeadm:
-
-- Install kubeadm and kubelet binaries and install the kubelet service
-
-```powershell
-Invoke-WebRequest {{tmpScriptsURL}}/scripts/PrepareNode.ps1 -OutFile c:\PrepareNode.ps1
-c:\PrepareNode.ps1 -KubernetesVersion v1.23.4 -ContainerRuntime ContainerD
-```
-
-- Run kubeadm on a control plane host and copy the join command
-
-```bash
-kubeadm token create --print-join-command
-```
-
-- Edit the join command by appending `--cri-socket "npipe:////./pipe/containerd-containerd"` and update the kubeadm.exe path to `c:\k\kubeadm.exe`.
- An example join command:
-
-```
-c:\k\kubeadm.exe join 172.16.101.139:6443 --token v8w2jt.jmc45acn85dbll1e --discovery-token-ca-cert-hash sha256:d0b7040a704d8deb805ba1f29f56bbc7cea8af6aafa78137a9338a62831739b4 --cri-socket "npipe:////./pipe/containerd-containerd"
-```
-
-- Run the join command on the Windows node. Shortly after it completes successfully, the Windows node will appear in `kubectl get nodes`.
- The new node's status will be NotReady since the Calico CNI has not yet been installed.
-
-#### Migrating from {{prodnameWindows}} installed manually
-
-If your Windows nodes already have {{prodnameWindows}} installed using the manual installation method, you can continue this quickstart guide
-to migrate to a manifest-based installation. This installation process will uninstall any existing {{prodnameWindows}} services and overwrite the {{prodnameWindows}} installation files with those included in the `calico/windows` image. If `kubelet` and `kube-proxy` were installed using `{{rootDirWindows}}\kubernetes\install-kube-services.ps1`, those services will updated in-place and remain installed. If those services were running, they are restarted so those services
-will be updated in place and remain installed.
-
-:::note
-
-Before proceeding, take note of the configuration parameters in `{{rootDirWindows}}\config.ps1`. These configuration parameters will be needed during the install.
-
-:::
-
-#### Install
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-Congratulations! You now have a Kubernetes cluster with {{prodnameWindows}} and a Linux control node.
-
-### Configure installation parameters
-
-| **Parameter Name** | **Description** | **Default** |
-| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------ |
-| KubeVersion | Version of Kubernetes binaries to use. If the value is an empty string (default), the {{prodnameWindows}} installation script does not download Kubernetes binaries and run Kubernetes service. Use the default for managed public cloud. | "" |
-| DownloadOnly | Download without installing {{prodnameWindows}}. Set to `yes` to manually install and configure {{prodnameWindows}}. For example, {{prodnameWindows}} the hard way. | no |
-| Datastore | {{prodnameWindows}} datastore type [`kubernetes` or `etcdv3`] for reading endpoints and policy information. | kubernetes |
-| EtcdEndpoints | Comma-delimited list of etcd connection endpoints. Example: `http://127.0.0.1:2379,http://127.0.0.2:2379`. Valid only if `Datastore` is set to `etcdv3`. | "" |
-| EtcdTlsSecretName | Name of a secret in `calico-system` namespace which contains `etcd-key`, `etcd-cert`, `etcd-ca` for automatically configuring TLS. Either use this or parameters `EtcdKey`, `EtcdCert`, `EtcdCaCert` below. Note: If you are not using operator-based installation, use namespace `kube-system`. | "" |
-| EtcdKey | Path to key file for etcd TLS connection. | "" |
-| EtcdCert | Path to certificate file for etcd TLS connection. | "" |
-| EtcdCaCert | Path to CA certificate file for etcd TLS connection. | "" |
-| ServiceCidr | Service IP range of the Kubernetes cluster. Not required for most managed Kubernetes clusters. Note: EKS has non-default value. | 10.96.0.0/12 |
-| DNSServerIPs | Comma-delimited list of DNS service IPs used by Windows pod. Not required for most managed Kubernetes clusters. Note: EKS has a non-default value. | 10.96.0.10 |
-| CalicoBackend | Calico backend network type (`vxlan` or `bgp`). If the value is an empty string (default), backend network type is auto detected. | "" |
-
-## Next steps
-
-You can now use the {{prodname}} Linux-based docs site for your documentation. Before you continue, review the [Limitations and known issues](limitations.mdx) to understand the features (and sections of documentation) that do not apply to Windows.
diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/troubleshoot.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/troubleshoot.mdx
deleted file mode 100644
index 8d9d6e3bf5..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/troubleshoot.mdx
+++ /dev/null
@@ -1,202 +0,0 @@
----
-description: Help for troubleshooting Calico for Windows issues in Calico this release.
----
-
-# Troubleshoot Calico for Windows
-
-## Useful troubleshooting commands
-
-**Examine the HNS network(s)**
-
-When using the {{prodname}} CNI plugin, each {{prodname}} IPAM block (or the single podCIDR in host-local IPAM mode), is represented as a HNS l2bridge network. Use the following command to inspect the networks.
-
-```powershell
-ipmo -DisableNameChecking {{rootDirWindows}}\libs\hns\hns.psm1
-Get-HNSNetwork
-```
-
-**Examine pod endpoints**
-
-Use the following command to view the HNS endpoints on the system. There should be one HNS endpoint per pod networked with {{prodname}}:
-
-```powershell
-ipmo -DisableNameChecking {{rootDirWindows}}\libs\hns\hns.psm1
-Get-HNSEndpoint
-```
-
-## Troubleshoot
-
-### kubectl exec fails with timeout for Windows pods
-
-Ensure that the Windows firewall (and any network firewall or cloud security group) allows traffic to the host on port 10250.
-
-### kubelet fails to register, complains of node not found in logs
-
-This can be caused by a mismatch between a cloud provider (such as the AWS cloud provider) and the configuration of the node. For example, the AWS cloud provider requires that the node has a nodename matching its private domain name.
-
-### After initializing {{prodnameWindows}}, AWS metadata server is no longer reachable
-
-This is a known Windows issue that Microsoft is working on. The route to the metadata server is lost when the vSwitch is created. As a workaround, manually add the route back by running:
-
-```powershell
-New-NetRoute -DestinationPrefix 169.254.169.254/32 -InterfaceIndex
-```
-
-Where `` is the index of the "vEthernet (Ethernet 2)" device as shown by
-
-```powershell
-Get-NetAdapter
-```
-
-### Installation stalls at "Waiting for {{prodname}} initialization to finish"
-
-This can be caused by Window's Execution protection feature. Exit the install using Ctrl-C, unblock the scripts, run `uninstall-calico.ps1`, followed by `install-calico.ps1`.
-
-### Windows Server 2019 insider preview: after rebooting a node, {{prodnameWindows}} fails to start, the tigera-node.err.log file contains errors
-
-After rebooting the Windows node, pods fail to schedule, and the kubelet log has CNI errors like "timed out waiting for interface matching the management IP (169.254.57.5) of network" (where the IP address may vary but will always be a 169.254.x.x address). To workaround:
-
-- Stop and then start {{prodnameWindows}} using the `stop-calico.ps1` and `start-calico.ps1` scripts
-- Sometimes the HNS network picks up a temporary self-assigned address at start-of-day and it does not get refreshed when the correct IP becomes known. Rebooting the node a second time often resolves the problem.
-
-### Invoke-Webrequest fails with TLS errors
-
-The error, "The request was aborted: Could not create SSL/TLS secure channel", often means that Windows does not support TLS v1.2 (which is required by many websites) by default. To enable TLS v1.2, run the following command:
-
-```powershell
-[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
-```
-
-### Kubelet persistently fails to contact the API server
-
-If kubelet is already running when {{prodnameWindows}} is installed, the creation of the container vSwitch can cause kubelet to lose its connection and then persistently fail to reconnect to the API server.
-To resolve this, restart kubelet after installing {{prodnameWindows}}.
-
-### No connectivity between pods on Linux and Windows nodes
-
-If using AWS, check that the source/dest check is disabled on the interfaces assigned to your nodes. This allows nodes to forward traffic on behalf of local pods.
-In AWS, the "Change Source/Dest. Check" option can be found on the Actions menu for a selected network interface.
-
-If using {{prodname}} networking, check that the {{prodname}} IP pool you are using has IPIP mode disabled (set to "Never). IPIP is not supported on Windows. To check the IP pool, you can use `calicoctl`:
-
-```bash
-calicoctl get ippool -o yaml
-```
-
-Example output of an IP pool with IPIP disabled:
-
-```yaml
-apiVersion: projectcalico.org/v3
-items:
- - apiVersion: projectcalico.org/v3
- kind: IPPool
- metadata:
- creationTimestamp: 2018-11-26T15:37:39Z
- name: default-ipv4-ippool
- resourceVersion: '172'
- uid: 34db7316-f191-11e8-ad7d-02850eebe6c4
- spec:
- blockSize: 26
- cidr: 192.168.0.0/16
- disabled: true
- ipipMode: Never
- natOutgoing: true
-```
-
-### Felix log error: "Failed to create datastore client"
-
-If the error includes `loading config file ""`, follow the instructions in
-[Set environment variables](kubernetes/standard.mdx#install-calico-and-kubernetes-on-windows-nodes) to update the `KUBECONFIG` environment variable to the path of your kubeconfig file.
-
-### Felix starts, but does not output logs
-
-By default, Felix waits to connect to the datastore before logging (in case the datastore configuration intentionally disables logging). To start logging at startup, update the [FELIX_LOGSEVERITYSCREEN environment variable](../../../reference/felix/configuration.mdx#general-configuration) to "info" or "debug" level.
-
-### {{prodname}} BGP mode: connectivity issues, Linux calico/node pods report unready
-
-Check the detailed health output that shows which health check failed:
-
-```
-kubectl describe pod -n calico-system
-```
-
-:::note
-
-Use namespace `kube-system` instead of `calico-system` if your Calico installation is non operator-managed.
-
-:::
-
-If the health check reports a BGP peer failure, check the IP address of the peer is either an
-expected IP of a node or an external BGP peer. If the IP of the failed peering is a Windows node:
-
-- Check that the node is up a reachable over IP
-- Check that the RemoteAccess service is installed and running:
-
- ```powershell
- Get-Service | ? Name -EQ RemoteAccess
- ```
-
-- Check the logs for the confd service in the configured log directory for errors
- (default {{rootDirWindows}}\logs).
-
-**Examine BGP state on a Windows host**
-
-The Windows BGP router exposes its configuration and state as PowerShell commandlets.
-
-**To show BGP peers**:
-
-```powershell
-Get-BgpPeer
-```
-
-Example output:
-
-```
-PeerName LocalIPAddress PeerIPAddress PeerASN OperationMode ConnectivityStatus
--------- -------------- ------------- ------- ------------- ------------------
-Mesh_172_20_48_43 172.20.55.101 172.20.48.43 64512 Mixed Connected
-Mesh_172_20_51_170 172.20.55.101 172.20.51.170 64512 Mixed Connected
-Mesh_172_20_54_3 172.20.55.101 172.20.54.3 64512 Mixed Connected
-Mesh_172_20_58_252 172.20.55.101 172.20.58.252 64512 Mixed Connected
-For an established peering, the ConnectivityStatus column should be "Connected".
-```
-
-**To examine routes learned from other hosts**:
-
-```powershell
-Get-BgpRouteInformation -Type all
-```
-
-Example output:
-
-```
-DestinationNetwork NextHop LearnedFromPeer State LocalPref MED
------------------- ------- --------------- ----- --------- ---
-10.243.128.192/26 172.20.58.252 Mesh_172_20_58_252 Best 100
-10.244.115.128/26 172.20.48.43 Mesh_172_20_48_43 Best 100
-10.244.128.192/26 172.20.58.252 Mesh_172_20_58_252 Best 100
-```
-
-For active routes, the State should show as "Best". Routes with State equal to "Unresolved"
-indicate that the BGP router could not resolve a route to the peer and the route will not be
-used. This can occur if the networking state changes after the BGP router is started;
-restarting the BGP router may solve the problem:
-
-```powershell
-Restart-Service RemoteAccess
-```
-
-To see the routes being exported by this host:
-
-```powershell
-(Get-BgpCustomRoute).Network
-```
-
-Example output:
-
-```
-10.243.214.152/29
-10.243.214.160/29
-10.243.214.168/29
-10.244.42.0/26
-```
diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/index.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/index.mdx
deleted file mode 100644
index 5057d063c4..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/openstack/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Install Calico networking and network policy for OpenStack.
-hide_table_of_contents: true
----
-
-# OpenStack
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/devstack.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/installation/devstack.mdx
deleted file mode 100644
index bc8d09ba65..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/devstack.mdx
+++ /dev/null
@@ -1,78 +0,0 @@
----
-description: Quickstart to show connectivity between DevStack and Calico.
----
-
-# DevStack
-
-The networking-calico project provides a DevStack plugin. The following
-instructions explain how to set up a single or multiple node DevStack/{{prodname}}
-system, and then how to see {{prodname}} connectivity in action.
-
-:::note
-
-networking-calico includes a
-[shell script](https://github.com/projectcalico/calico/blob/master/networking-calico/devstack/bootstrap.sh)
-that implements the following setup instructions. You are welcome to use it,
-but we recommend that you read the following description first anyway, and
-briefly review the script's code, so that you will understand what the
-script does.
-
-:::
-
-1. Download DevStack as usual.
-
-2. Add to your DevStack local.conf file:
-
- ```bash
- enable_plugin networking-calico https://github.com/projectcalico/networking-calico
- ```
-
-3. Run `stack.sh`.
-
-4. Create a shared, routed network with an IPv4 subnet:
-
- ```bash
- . openrc admin admin
- neutron net-create --shared --provider:network_type local calico
- neutron subnet-create --gateway 10.65.0.1 --enable-dhcp --ip-version 4 --name calico-v4 calico 10.65.0/24
- ```
-
-5. Ensure that IPv4 and IPv6 forwarding are enabled:
-
- ```bash
- sysctl -w net.ipv4.ip_forward=1
- sysctl -w net.ipv6.conf.all.forwarding=1
- ```
-
-## Multi-node setup
-
-This plugin also supports additional compute-only nodes. So, in the system as
-a whole, there can then be:
-
-- one node with both controller and compute function
-
-- any number of additional nodes with just compute function.
-
-The first node should be prepared as described above. Then, for each
-additional compute node:
-
-- set and export the SERVICE_HOST environment variable, to the name of the
- controller node; for example:
-
- ```bash
- export SERVICE_HOST=calico-vm18
- ```
-
-- follow the steps above, except for the network and subnet creations, to
- install and set up DevStack with {{prodname}} on that node.
-
-## Demonstrating {{prodname}} connectivity
-
-Then, to see {{prodname}} connectivity in action:
-
-1. Launch instances attached to the 'calico' network.
-
-2. Use `ip route` to observe per-instance routes created by the {{prodname}} agent.
-
-3. Log into each instance (e.g. through Horizon console) and verify that it can
- ping the others.
diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/index.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/installation/index.mdx
deleted file mode 100644
index 3e4dfe81db..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Install Calico on OpenStack
-hide_table_of_contents: true
----
-
-# Install Calico on OpenStack
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/overview.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/installation/overview.mdx
deleted file mode 100644
index 87b00ebd33..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/overview.mdx
+++ /dev/null
@@ -1,17 +0,0 @@
----
-description: Choose a method for installing Calico for OpenStack.
----
-
-# Calico on OpenStack
-
-There are many ways to try out {{prodname}} with OpenStack. We provide instructions for the
-following methods:
-
-- [Package-based install for Ubuntu](ubuntu.mdx)
-
-- [RPM-based install for Red Hat Enterprise Linux (RHEL)](redhat.mdx)
-
-- [DevStack](devstack.mdx) (for development purposes only—not recommended for production!)
-
-In all cases, except DevStack, you will need at least two or three servers to
-get going: one OpenStack controller and one or more OpenStack compute nodes.
diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/redhat.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/installation/redhat.mdx
deleted file mode 100644
index da2e8ba686..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/redhat.mdx
+++ /dev/null
@@ -1,287 +0,0 @@
----
-description: Install Calico on OpenStack, Red Hat Enterprise Linux nodes.
----
-
-# Red Hat Enterprise Linux
-
-import OpenStackEtcdAuth from '@site/calico_versioned_docs/version-3.25/_includes/content/_openstack-etcd-auth.mdx';
-
-These instructions will take you through a first-time install of
-{{prodname}}. If you are upgrading an existing system, please see
-[Upgrading {{prodname}} on OpenStack](../../../operations/upgrading/openstack-upgrade.mdx)
-instead.
-
-There are two sections to the install: adding {{prodname}} to OpenStack
-control nodes, and adding {{prodname}} to OpenStack compute nodes. Follow
-the [Common steps](#common-steps) on each node before moving on to the specific
-instructions in the control and compute sections. If you want to create a
-combined control and compute node, work through all three sections.
-
-## Before you begin
-
-- Ensure that you meet the [requirements](../requirements.mdx).
-- Confirm that you have SSH access to and root privileges on one or more Red Hat
- Enterprise Linux (RHEL) hosts.
-- Make sure you have working DNS between the RHEL hosts (use `/etc/hosts` if you
- don't have DNS on your network).
-- [Install OpenStack with Neutron and ML2 networking](http://docs.openstack.org)
- on the RHEL hosts.
-
-## Common steps
-
-Some steps need to be taken on all machines being installed with {{prodname}}.
-These steps are detailed in this section.
-
-1. [Add the EPEL repository](https://fedoraproject.org/wiki/EPEL). You may
- have already added this to install OpenStack.
-
-1. Configure the {{prodname}} repository:
-
- ```bash
- cat > /etc/yum.repos.d/calico.repo <` is the IP address of the etcd
- server.
-
- ```
- [calico]
- etcd_host =
- ```
-
-## Control node install
-
-On each control node, perform the following steps:
-
-1. Delete all configured OpenStack state, in particular any instances,
- routers, subnets and networks (in that order) created by the install
- process referenced above. You can do this using the web dashboard or
- at the command line.
-
- :::tip
-
- The Admin and Project sections of the web dashboard both
- have subsections for networks and routers. Some networks may
- need to be deleted from the Admin section.
-
- :::
-
- :::caution
-
- The {{prodname}} install will fail if incompatible state is
- left around.
-
- :::
-
-1. Edit `/etc/neutron/neutron.conf`. In the `[DEFAULT]` section, find
- the line beginning with `core_plugin`, and change it to read `core_plugin = calico`. Also remove any existing setting for `service_plugins`.
-
-1. Install the `calico-control` package:
-
- ```
- yum install -y calico-control
- ```
-
-1. Restart the neutron server process:
-
- ```
- service neutron-server restart
- ```
-
-## Compute node install
-
-On each compute node, perform the following steps:
-
-1. Open `/etc/nova/nova.conf` and remove the line from the `[DEFAULT]`
- section that reads:
-
- ```conf
- linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
- ```
-
- Remove the lines from the `[neutron]` section setting
- `service_neutron_metadata_proxy` or `service_metadata_proxy` to
- `True`, if there are any. Additionally, if there is a line setting
- `metadata_proxy_shared_secret`, comment that line out as well.
-
- Restart nova compute.
-
- ```bash
- service openstack-nova-compute restart
- ```
-
- If this node is also a controller, additionally restart nova-api.
-
- ```bash
- service openstack-nova-api restart
- ```
-
-1. If they're running, stop the Open vSwitch services.
-
- ```bash
- service neutron-openvswitch-agent stop
- service openvswitch stop
- ```
-
- Then, prevent the services running if you reboot.
-
- ```bash
- chkconfig openvswitch off
- chkconfig neutron-openvswitch-agent off
- ```
-
- Then, on your control node, run the following command to find the
- agents that you just stopped.
-
- ```
- neutron agent-list
- ```
-
- For each agent, delete them with the following command on your
- control node, replacing `` with the ID of the agent.
-
- ```
- neutron agent-delete
- ```
-
-1. Install Neutron infrastructure code on the compute host.
-
- ```
- yum install -y openstack-neutron
- ```
-
-1. Edit `/etc/neutron/neutron.conf`. In the `[oslo_concurrency]` section,
- ensure that the `lock_path` variable is uncommented and set as follows.
-
- ```
- # Directory to use for lock files. For security, the specified directory should
- # only be writable by the user running the processes that need locking.
- # Defaults to environment variable OSLO_LOCK_PATH. If external locks are used,
- # a lock path must be set.
- lock_path = $state_path/lock
- ```
-
-1. Stop and disable the Neutron DHCP agent, and install the
- {{prodname}} DHCP agent (which uses etcd, allowing it to scale to higher
- numbers of hosts).
-
- ```
- service neutron-dhcp-agent stop
- chkconfig neutron-dhcp-agent off
- yum install -y calico-dhcp-agent
- ```
-
-1. Stop and disable any other routing/bridging agents such as the L3
- routing agent or the Linux bridging agent. These conflict
- with {{prodname}}.
-
- ```bash
- service neutron-l3-agent stop
- chkconfig neutron-l3-agent off
- ```
-
- Repeat for bridging agent and any others.
-
-1. If this node is not a controller, install and start the Nova
- Metadata API. This step is not required on combined compute and
- controller nodes.
-
- ```bash
- yum install -y openstack-nova-api
- service openstack-nova-metadata-api restart
- chkconfig openstack-nova-metadata-api on
- ```
-
-1. Install the BIRD BGP client.
-
- ```bash
- yum install -y bird bird6
- ```
-
-1. Install the `calico-compute` package.
-
- ```bash
- yum install -y calico-compute
- ```
-
-1. Configure BIRD. By default {{prodname}} assumes that you will deploy a
- route reflector to avoid the need for a full BGP mesh. To this end, it
- includes configuration scripts to prepare a BIRD config file with a single
- peering to the route reflector. If that's correct for your network, you can
- run either or both of the following commands.
-
- For IPv4 connectivity between compute hosts:
-
- ```bash
- calico-gen-bird-conf.sh
- ```
-
- And/or for IPv6 connectivity between compute hosts:
-
- ```bash
- calico-gen-bird6-conf.sh
- ```
-
- You will also need to [configure your route reflector to allow connections from the compute node as a route reflector client](../../../networking/configuring/bgp.mdx)
-.
-
- If you _are_ configuring a full BGP mesh you need to handle the BGP
- configuration appropriately on each compute host. The scripts above can be
- used to generate a sample configuration for BIRD, by replacing the
- `` with the IP of one other compute host—this will
- generate the configuration for a single peer connection, which you can
- duplicate and update for each compute host in your mesh.
-
- To maintain connectivity between VMs if BIRD crashes or is upgraded,
- configure BIRD graceful restart. Edit the systemd unit file
- /usr/lib/systemd/system/bird.service (and bird6.service for IPv6):
-
- - Add `-R` to the end of the `ExecStart` line.
- - Add `KillSignal=SIGKILL` as a new line in the `[Service]` section.
- - Run `systemctl daemon-reload` to tell systemd to reread that file.
-
- Ensure that BIRD (and/or BIRD 6 for IPv6) is running and starts on
- reboot.
-
- ```bash
- service bird restart
- service bird6 restart
- chkconfig bird on
- chkconfig bird6 on
- ```
-
-1. Create `/etc/calico/felix.cfg` with the following content, where `` is the IP
- address of the etcd server.
-
- ```conf
- [global]
- DatastoreType = etcdv3
- EtcdAddr = :2379
- ```
-
-1. Restart the Felix service.
-
- ```
- service calico-felix restart
- ```
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/ubuntu.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/installation/ubuntu.mdx
deleted file mode 100644
index bc040a0a06..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/ubuntu.mdx
+++ /dev/null
@@ -1,270 +0,0 @@
----
-description: Install Calico on OpenStack, Ubuntu nodes.
----
-
-# Ubuntu
-
-import OpenStackEtcdAuth from '@site/calico_versioned_docs/version-3.25/_includes/content/_openstack-etcd-auth.mdx';
-
-These instructions will take you through a first-time install of
-{{prodname}}. If you are upgrading an existing system, please see
-[Upgrading {{prodname}} on OpenStack](../../../operations/upgrading/openstack-upgrade.mdx)
-instead.
-
-There are two sections to the install: adding {{prodname}} to OpenStack
-control nodes, and adding {{prodname}} to OpenStack compute nodes. Follow
-the [Common steps](#common-steps) on each node before moving on to the specific
-instructions in the control and compute sections. If you want to create a
-combined control and compute node, work through all three sections.
-
-## Before you begin
-
-- Ensure that you meet the [requirements](../requirements.mdx).
-- Confirm that you have SSH access to and root privileges on one or more Ubuntu hosts
- (your OpenStack compute or control nodes).
-- [Install OpenStack with Neutron and ML2 networking](http://docs.openstack.org)
- on the Ubuntu hosts.
-
-## Common steps
-
-Some steps need to be taken on all machines being installed with {{prodname}}.
-These steps are detailed in this section.
-
-1. Configure APT to use the {{prodname}} PPA:
-
- ```bash
- add-apt-repository ppa:project-calico/{{ ppa_repo_name }}
- ```
-
-1. Add the official BIRD PPA. This PPA contains
- fixes to BIRD that are not yet available in Ubuntu. To add the PPA, run:
-
- ```bash
- add-apt-repository ppa:cz.nic-labs/bird
- ```
-
- :::tip
-
- If the above command fails with error
- `'ascii' codec can't decode byte`, try running the command with a
- UTF-8 enabled locale:
- `LC_ALL=en_US.UTF-8 add-apt-repository ppa:cz.nic-labs/bird`.
-
- :::
-
-1. Update your package manager on each machine:
-
- ```bash
- apt-get update
- ```
-
-1. Install the `etcd3-gateway` Python package. A current copy of that code is
- needed by {{prodname}}'s OpenStack driver and DHCP agent, so you
- should install it with `pip3`.
-
- ```bash
- apt-get install -y python3-pip
- pip3 install git+https://github.com/dims/etcd3-gateway.git@5a3157a122368c2314c7a961f61722e47355f981
- ```
-
-1. Edit `/etc/neutron/neutron.conf`. Add a `[calico]` section with
- the following content, where `` is the IP address of the etcd
- server.
-
- ```
- [calico]
- etcd_host =
- ```
-
-## Control node install
-
-On each control node, perform the following steps.
-
-1. Delete all configured OpenStack state, in particular any instances,
- routers, subnets and networks (in that order) created by the install
- process referenced above. You can do this using the web dashboard or
- at the command line.
-
- :::tip
-
- The Admin and Project sections of the web dashboard both
- have subsections for networks and routers. Some networks may
- need to be deleted from the Admin section.
-
- :::
-
- :::caution
-
- The {{prodname}} install will fail if incompatible state is
- left around.
-
- :::
-
-1. Run `apt-get upgrade` and `apt-get dist-upgrade`. These commands
- bring in {{prodname}}-specific updates to the OpenStack packages and
- to `dnsmasq`.
-
-1. Edit `/etc/neutron/neutron.conf`. In the `[DEFAULT]` section, find
- the line beginning with `core_plugin`, and change it to read `core_plugin = calico`. Also remove any existing setting for `service_plugins`.
-
-1. Install the `calico-control` package:
-
- ```bash
- apt-get install -y calico-control
- ```
-
-1. Restart the Neutron server process:
-
- ```bash
- service neutron-server restart
- ```
-
-## Compute node install
-
-On each compute node, perform the following steps:
-
-1. Open `/etc/nova/nova.conf` and remove the line from the `[DEFAULT]`
- section that reads:
-
- ```bash
- linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
- ```
-
- Remove the lines from the `[neutron]` section setting
- `service_neutron_metadata_proxy` or `service_metadata_proxy` to
- `True`, if there are any.
-
- Restart nova compute.
-
- ```bash
- service nova-compute restart
- ```
-
-1. If they're running, stop the Open vSwitch services:
-
- ```bash
- service openvswitch-switch stop
- service neutron-plugin-openvswitch-agent stop
- ```
-
- Then, prevent the services running if you reboot:
-
- ```bash
- sh -c "echo 'manual' > /etc/init/openvswitch-switch.override"
- sh -c "echo 'manual' > /etc/init/openvswitch-force-reload-kmod.override"
- sh -c "echo 'manual' > /etc/init/neutron-plugin-openvswitch-agent.override"
- ```
-
- Then, on your control node, run the following command to find the
- agents that you just stopped:
-
- ```bash
- neutron agent-list
- ```
-
- For each agent, delete them with the following command on your
- control node, replacing `` with the ID of the agent:
-
- ```bash
- neutron agent-delete
- ```
-
-1. Install some extra packages:
-
- ```bash
- apt-get install -y neutron-common neutron-dhcp-agent nova-api-metadata
- ```
-
-1. Run `apt-get upgrade` and `apt-get dist-upgrade`. These commands
- bring in {{prodname}}-specific updates to the OpenStack packages and
- to `dnsmasq`.
-
-1. Edit `/etc/neutron/neutron.conf`. In the `[oslo_concurrency]` section,
- ensure that the `lock_path` variable is uncommented and set as follows.
-
- ```
- # Directory to use for lock files. For security, the specified directory should
- # only be writable by the user running the processes that need locking.
- # Defaults to environment variable OSLO_LOCK_PATH. If external locks are used,
- # a lock path must be set.
- lock_path = $state_path/lock
- ```
-
-1. Install the {{prodname}} DHCP agent (which uses etcd, allowing
- it to scale to higher numbers of hosts) and disable the Neutron-provided
- one:
-
- ```
- service neutron-dhcp-agent stop
- echo manual | tee /etc/init/neutron-dhcp-agent.override
- apt-get install -y calico-dhcp-agent
- ```
-
-1. Install the `calico-compute` package:
-
- ```bash
- apt-get install -y calico-compute
- ```
-
- This step may prompt you to save your iptables rules to make them
- persistent on restart -- hit yes.
-
-1. Configure BIRD. By default {{prodname}} assumes that you will deploy a
- route reflector to avoid the need for a full BGP mesh. To this end, it
- includes configuration scripts to prepare a BIRD config file with a single
- peering to the route reflector. If that's correct for your network, you can
- run either or both of the following commands.
-
- For IPv4 connectivity between compute hosts:
-
- ```bash
- calico-gen-bird-conf.sh
- ```
-
- And/or for IPv6 connectivity between compute hosts:
-
- ```bash
- calico-gen-bird6-conf.sh
- ```
-
- You will also need to [configure your route reflector to allow connections from the compute node as a route reflector client](../../../networking/configuring/bgp.mdx)
-.
-
- If you _are_ configuring a full BGP mesh you need to handle the BGP
- configuration appropriately on each compute host. The scripts above can be
- used to generate a sample configuration for BIRD, by replacing the
- `` with the IP of one other compute host -- this will
- generate the configuration for a single peer connection, which you can
- duplicate and update for each compute host in your mesh.
-
- To maintain connectivity between VMs if BIRD crashes or is upgraded,
- configure BIRD graceful restart:
-
- - Add `-R` to `BIRD_ARGS` in /etc/bird/envvars (you may need to
- uncomment this option).
- - Edit the upstart jobs /etc/init/bird.conf and bird6.conf (if
- you're using IPv6), and add the following script to it.
-
- ```bash
- pre-stop script
- PID=`status bird | egrep -oi '([0-9]+)$' | head -n1`
- kill -9 $PID
- end script
- ```
-
-1. Create `/etc/calico/felix.cfg` with the following content, where `` is the IP
- address of the etcd server.
-
- ```conf
- [global]
- DatastoreType = etcdv3
- EtcdAddr = :2379
- ```
-
-1. Restart the Felix service.
-
- ```bash
- service calico-felix restart
- ```
-
-
diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/verification.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/installation/verification.mdx
deleted file mode 100644
index 6d88e6718a..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/verification.mdx
+++ /dev/null
@@ -1,175 +0,0 @@
----
-description: Quick steps to test that your Calico-based OpenStack deployment is running correctly.
----
-
-# Verify your deployment
-
-This document takes you through the steps you can perform to verify that
-a {{prodname}}-based OpenStack deployment is running correctly.
-
-## Prerequisites
-
-This document requires you have the following things:
-
-- SSH access to the nodes in your {{prodname}}-based OpenStack deployment.
-- Access to an administrator account on your {{prodname}}-based
- OpenStack deployment.
-
-## Procedure
-
-Begin by creating several instances on your OpenStack deployment using
-your administrator account. Confirm that these instances all launch and
-correctly obtain IP addresses.
-
-You'll want to make sure that your new instances are evenly striped
-across your hypervisors. On your control node, run:
-
-```bash
-nova list --fields host
-```
-
-Confirm that there is an even spread across your compute nodes. If there
-isn't, it's likely that an error has happened in either nova or {{prodname}}
-on the affected compute nodes. Check the logs on those nodes for more
-logging, and report your difficulty on the mailing list.
-
-Now, SSH into one of your compute nodes. We're going to verify that the
-FIB on the compute node has been correctly populated by {{prodname}}. To do
-that, run the `route` command. You'll get output something like this:
-
-```
-Kernel IP routing table
-Destination Gateway Genmask Flags Metric Ref Use Iface
-default net-vl401-hsrp- 0.0.0.0 UG 0 0 0 eth0
-10.65.0.0 * 255.255.255.0 U 0 0 0 ns-b1163e65-42
-10.65.0.103 npt06.datcon.co 255.255.255.255 UGH 0 0 0 eth0
-10.65.0.104 npt09.datcon.co 255.255.255.255 UGH 0 0 0 eth0
-10.65.0.105 * 255.255.255.255 UH 0 0 0 tap242f8163-08
-10.65.0.106 npt09.datcon.co 255.255.255.255 UGH 0 0 0 eth0
-10.65.0.107 npt07.datcon.co 255.255.255.255 UGH 0 0 0 eth0
-10.65.0.108 npt08.datcon.co 255.255.255.255 UGH 0 0 0 eth0
-10.65.0.109 npt07.datcon.co 255.255.255.255 UGH 0 0 0 eth0
-10.65.0.110 npt06.datcon.co 255.255.255.255 UGH 0 0 0 eth0
-10.65.0.111 npt08.datcon.co 255.255.255.255 UGH 0 0 0 eth0
-10.65.0.112 * 255.255.255.255 UH 0 0 0 tap3b561211-dd
-link-local * 255.255.0.0 U 1000 0 0 eth0
-172.18.192.0 * 255.255.255.0 U 0 0 0 eth0
-```
-
-You'll expect to see one route for each of the VM IP addresses in this
-table. For VMs on other compute nodes, you should see that compute
-node's IP address (or domain name) as the `gateway`. For VMs on this
-compute node, you should see `*` as the `gateway`, and the tap interface
-for that VM in the `Iface` field. As long as routes are present to all
-VMs, the FIB has been configured correctly. If any VMs are missing from
-the routing table, you'll want to verify the state of the BGP
-connection(s) from the compute node hosting those VMs.
-
-Having confirmed the FIB is present and correct, open the console for
-one of the VM instances you just created. Confirm that the machine has
-external connectivity by pinging `google.com` (or any other host you are
-confident is routable and that will respond to pings). Additionally,
-confirm it has internal connectivity by pinging the other instances
-you've created (by IP).
-
-If all of these tests behave correctly, your {{prodname}}-based OpenStack
-deployment is in good shape.
-
-## Troubleshooting
-
-If you find that none of the advice below solves your problems, please
-use our diagnostics gathering script to generate diagnostics, and then
-raise a GitHub issue against our repository. To generate the diags, run:
-
-```bash
-/usr/bin/calico-diags
-```
-
-### VMs cannot DHCP
-
-This can happen if your iptables is configured to have a default DROP
-behaviour on the INPUT or FORWARD chains. You can test this by running
-`iptables -L -t filter` and checking the output. You should see
-something that looks a bit like this:
-
-```
-Chain INPUT (policy ACCEPT)
-target prot opt source destination
-ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED
-ACCEPT icmp -- anywhere anywhere
-ACCEPT all -- anywhere anywhere
-ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh
-REJECT all -- anywhere anywhere reject-with icmp-host-prohibited
-
-Chain FORWARD (policy ACCEPT)
-target prot opt source destination
-REJECT all -- anywhere anywhere reject-with icmp-host-prohibited
-
-Chain OUTPUT (policy ACCEPT)
-target prot opt source destination
-```
-
-The important sections are `Chain INPUT` and `Chain FORWARD`. Each of
-those needs to have a policy of `ACCEPT`. In some systems, this policy
-may be set to `DENY`. To change it, run `iptables -P ACCEPT`,
-replacing `` with either `INPUT` or `FORWARD`.
-
-Note that doing this may be considered a security risk in some networks.
-A future {{prodname}} enhancement will remove the requirement to perform this
-step.
-
-### Routes are missing in the FIB.
-
-If routes to some VMs aren't present when you run `route`, this suggests
-that your BGP sessions are not functioning correctly. Your BGP daemon
-should have either an interactive console or a log. Open the relevant
-one and check that all of your BGP sessions have come up appropriately
-and are replicating routes. If you're using a full mesh configuration,
-confirm that you have configured BGP sessions with _all_ other {{prodname}}
-nodes.
-
-### VMs Cannot Ping Non-VM IPs
-
-Assuming all the routes are present in the FIB (see above), this most
-commonly happens because the gateway is not configured with routes to
-the VM IP addresses. To get full {{prodname}} functionality the gateway should
-also be a BGP peer of the compute nodes (or the route reflector).
-
-Confirm that your gateway has routes to the VMs. Assuming it does, make
-sure that your gateway is also advertising those routes to its external
-peers. It may do this using eBGP, but it may also be using some other
-routing protocol.
-
-### VMs Cannot Ping Other VMs
-
-Before continuing, confirm that the two VMs are in security groups that
-allow inbound traffic from each other (or are both in the same security
-group which allows inbound traffic from itself). Traffic will not be
-routed between VMs that do not allow inbound traffic from each other.
-
-Assuming that the security group configuration is correct, confirm that
-the machines hosting each of the VMs (potentially the same machine) have
-routes to both VMs. If they do not, check out the troubleshooting
-section [above](#routes-are-missing-in-the-fib).
-
-### Web UI Shows Error Boxes Saying "Error: Unable to get quota info" and/or "Error: Unable to get volume limit"
-
-This is likely a problem encountered with mapping devices in `cinder`,
-OpenStack's logical volume management component. Many of these can be
-resolved by restarting `cinder`.
-
-```bash
-service cinder-volume restart
-service cinder-scheduler restart
-service cinder-api restart
-```
-
-### Cannot create instances, error log says "could not open /dev/net/tun: Operation not permitted"
-
-This is caused by having not restarted libvirt after you add lines to
-the end of `/etc/libvirt/qemu.conf`. This can be fixed by either
-rebooting your entire system or running:
-
-```bash
-service libvirt-bin restart
-```
diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/overview.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/overview.mdx
deleted file mode 100644
index c483931df3..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/openstack/overview.mdx
+++ /dev/null
@@ -1,42 +0,0 @@
----
-description: Review the Calico components used in an OpenStack deployment.
----
-
-# Calico for OpenStack
-
-{{prodname}}'s integration with OpenStack consists of the following pieces.
-
-- etcd, providing a distributed key/value database that is accessible from all
- compute hosts and Neutron servers.
-
-- Felix (the {{prodname}} agent) running on each compute host. Felix reads
- information from etcd that specifies workloads and their properties (IP
- addresses, security etc.), and implements that connectivity and security for
- them. Felix also reports its own agent status, and the programming status
- for each workload, through etcd.
-
-- BIRD, also running on each compute host, to propagate local workload routes
- to other compute hosts and infrastructure routers.
-
-- The {{prodname}} driver for Neutron, that runs as part of the Neutron server on
- each machine where the Neutron server runs. (There can be just one Neutron
- server, but typically there are more, to provide higher availability.) This
- driver handles OpenStack network, subnet, instance and security operations
- and translates them into equivalent etcd data for Felix to implement. It
- also reads the agent and per-port status information that Felix writes into
- etcd, and reports this into the Neutron DB.
-
-- The {{prodname}} DHCP agent, running on each compute host, that configures and
- launches Dnsmasq instances to provide DHCP for the locally hosted workloads.
- Architecturally this fills the same role as the reference Neutron DHCP agent;
- the key difference is that it gets its information from Etcd instead of by
- RPC from the Neutron server, as we have found this to be more scalable.
-
-The Etcd, Felix and BIRD pieces are the same as in other {{prodname}} integrations,
-and so independent of OpenStack. The {{prodname}} Neutron driver and DHCP agent are
-specific to OpenStack, and are provided by the [networking-calico](https://github.com/projectcalico/networking-calico/) project.
-
-From an OpenStack point of view, networking-calico is just one of many possible
-Neutron drivers that provide connectivity between instances (VMs) as specified
-by the Neutron API. Refer to [{{prodname}}'s interpretation of Neutron API calls](../../networking/openstack/neutron-api.mdx) for more detail about the
-parts of the Neutron API that the networking-calico provides.
diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/requirements.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/requirements.mdx
deleted file mode 100644
index b87debe924..0000000000
--- a/calico_versioned_docs/version-3.25/getting-started/openstack/requirements.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
----
-description: Requirements for installing Calico on OpenStack nodes.
----
-
-# System requirements
-
-
-
-## OpenStack requirements
-
-The Calico Neutron driver is written in Python 3 and so requires an OpenStack release that
-runs with Python 3. Subject to that, we aim to develop and maintain the Neutron driver
-for {{prodname}} (networking-calico) so that its master code works with OpenStack
-master or any previous Python 3 release, on any operating system, independently of the
-deployment mechanism that is used to install it.
-
-However, we recommend using OpenStack Ussuri or later, and our active support and testing
-of {{prodname}} {{version}} with OpenStack is with Ussuri.
-
-
diff --git a/calico_versioned_docs/version-3.25/network-policy/adopt-zero-trust.mdx b/calico_versioned_docs/version-3.25/network-policy/adopt-zero-trust.mdx
deleted file mode 100644
index 0cad41fbe2..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/adopt-zero-trust.mdx
+++ /dev/null
@@ -1,296 +0,0 @@
----
-description: Best practices to adopt a zero trust network model to secure workloads and hosts. Learn 5 key requirements to control network access for cloud-native strategy.
----
-
-# Adopt a zero trust network model for security
-
-## Big picture
-
-Adopting a zero trust network model is best practice for securing workloads and hosts in your cloud-native strategy.
-
-## Value
-
-Zero Trust Networks are resilient even when attackers manage to breach applications or infrastructure. They make it hard for attackers to move laterally, and reconnaissance activities easier to spot.
-
-Organizations that embrace the change control model in this How-To will be able to tightly secure their network without imposing a drag on innovation in their applications. Security teams can be enablers of business value, not roadblocks.
-
-## Concepts
-
-### The network is always hostile
-
-**Zero Trust Networking** is an approach to network security that is unified by the principle that the network is always assumed to be hostile. This is in direct contrast to perimeter and “segmentation” approaches that focus on separating the world into trusted and untrusted network segments.
-
-Why assume the network is hostile? In many attack scenarios, it is.
-
-- Attackers may compromise “trusted” parts of your network infrastructure: routers, switches, links, etc.
-- Deliberate or accidental misconfiguration can route sensitive traffic over untrusted networks, like the public Internet.
-- Other endpoints on a “trusted” network may be compromised: your application may share a network with thousands of other servers, tens of thousands of other containers, thousands of personal laptops, phones, etc.
-
-Major breaches typically start as a minor compromise of as little as a single component, but attackers then use the network to move laterally toward high value targets: your company’s or customers’ data. In a zone or perimeter model, attackers can move freely inside the perimeter or zone after they have compromised a single endpoint. A Zero Trust Network is resilient to this threat because it enforces strong, cryptographic authentication and access control on each and every network connection.
-
-### Requirements of a Zero Trust Network
-
-Zero Trust Networks rely on network access controls with specific requirements:
-
-**Requirement 1:** All network connections are subject to enforcement (not just those that cross zone boundaries).
-
-**Requirement 2**: Establishing the identity of a remote endpoint is always based on multiple criteria including strong cryptographic proofs of identity. In particular, network-level identifiers like IP address and port are not sufficient on their own as they can be spoofed by a hostile network.
-
-**Requirement 3**: All expected and allowed network flows are explicitly allowed. Any connection not explicitly allowed is denied.
-
-**Requirement 4**: Compromised workloads must not be able to circumvent policy enforcement.
-
-**Requirement 5**: Many Zero Trust Networks also rely on encryption of network traffic to prevent disclosure of sensitive data to hostile entities snooping network traffic. This is not an absolute requirement if private data are not exchanged over the network, but to fit the criteria of a Zero Trust Network, encryption must be used on every network connection if it is required at all. A Zero Trust Network does not distinguish between trusted and untrusted network links or paths. Also note that even when not using encryption for data privacy, cryptographic proofs of authenticity are still used to establish identity.
-
-### How {{prodname}} and Istio implement Zero Trust Network requirements
-
-{{prodname}} works in concert with the Istio service mesh to implement all you need to build a Zero Trust Network in your Kubernetes cluster.
-
-#### Multiple enforcement points
-
-When operating with Istio, incoming requests to your workloads traverse two distinct enforcement points:
-
-1. The host Linux kernel. {{prodname}} policy is enforced in the Linux kernel using iptables at L3-L4.
-1. The Envoy proxy. {{prodname}} policy is enforced in the Envoy proxy at L3-7, with requests being cryptographically authenticated. A lightweight policy decision sidecar called Dikastes assists Envoy in this enforcement.
-
-These multiple enforcement points establish the identity of the remote endpoint based on multiple criteria (Requirement 2). The host Linux kernel enforcement protects your workloads even if the workload pod is compromised and the Envoy proxy bypassed (Requirement 4).
-
-#### {{prodname}} policy store
-
-The policies in the {{prodname}} data store encode the allow-list of allowed flows (Requirement 3).
-
-{{prodname}} network policy is designed to be flexible to fit many different security paradigms, so it can express, for example, both Zero Trust Network-style allow-lists as well as legacy paradigms like zones. You can even layer both of these approaches on top of one another without creating a maintenance mess by composing multiple policy documents.
-
-The How To section of this document explains how to write policy specifically in the style of Zero Trust Networks. Conceptually, you will begin by denying all network flows by default, then add rules that allow the specific expected flows that make up your application. When you finish, only legitimate application flows are allowed and all others are denied.
-
-#### {{prodname}} control plane
-
-The {{prodname}} control plane handles distributing all the policy information from the {{prodname}} data store to each enforcement point, ensuring that all network connections are subject to enforcement (Requirement 4). It translates the high-level declarative policy into the detailed enforcement attributes that change as applications scale up and down to meet demand, and evolve as developers modify them.
-
-#### Istio Citadel Identity System
-
-In {{prodname}} and Istio, workload identities are based on Kubernetes Service Accounts. An Istio component called Citadel handles minting cryptographic keys for each Service Account to prove its identity on the network (Requirement 2) and encrypt traffic (Requirement 5). This allows the Zero Trust Network to be resilient even if attackers compromise network infrastructure like routers or links.
-
-## How to
-
-This section explains how to establish a Zero Trust Network using {{prodname}} and Istio. It is written from the perspective of platform and security engineers, but should also be useful for individual developers looking to understand the process.
-
-Building and maintaining a Zero Trust Network is the job of an entire application delivery organization, that is, everyone involved in delivering a networked application to its end users. This includes:
-
-- Developers, DevOps, and Operators
-- Platform Engineers
-- Network Engineers
-- Security Engineers and Security Operatives
-
-In particular, the view that developers build applications which they hand off to others to figure out how to secure is incompatible with a Zero Trust Network strategy. To function correctly, a Zero Trust Network needs to be configured with detailed information about expected flows---information that developers are in a unique position to know.
-
-At a high level, you will undertake the following steps to establish a Zero Trust Network:
-
-1. Install {{prodname}}.
-1. Install Istio and enable {{prodname}} integration.
-1. Establish workload identity by using Service Accounts.
-1. Write initial allow-list policies for each service.
-
-After your Zero Trust Network is established, you will need to maintain it.
-
-### Install {{prodname}}
-
-Follow the [install instructions](../getting-started/kubernetes/index.mdx) to get {{prodname}} software running in your cluster.
-
-### Install Istio and enable {{prodname}} integration
-
-Follow the instructions to [Enable application layer policy](istio/app-layer-policy.mdx).
-
-The instructions include a “demo” install of Istio for quickly testing out functionality. For a production installation to support a Zero Trust Network, you should instead follow the official Istio install instructions. Be sure to enable mutually authenticated TLS (mTLS) in your install options by setting **global.mtls.enabled** to **true**.
-
-### Establish workload identity by using Service Accounts
-
-Our eventual goal is to write access control policy that authorizes individual expected network flows. We want these flows to be scoped as tightly as practical. In a {{prodname}} Zero Trust Network, the cryptographic identities are Kubernetes Service Accounts. Istio handles crypto-key management for you so that each workload can assert its Service Account identity in a secure manner.
-
-You have some flexibility in how you assign identities for the purpose of your Zero Trust Network policy. The right balance for most people is to use a unique identity for each Kubernetes Service in your application (or Deployment if you have workloads that don’t accept any incoming connections). Assigning identity to entire applications or namespaces is probably too coarse, since applications usually consist of multiple services (or dozens of microservices) with different actual access needs.
-
-You should assign unique identities to microservices even if you happen to know that they access the same things. Your policy will be more readable if the identities correspond to logical components of the application. You can grant them the same permissions easily, and if in the future they need different permissions it will be easier to handle.
-
-After you decide on the set of identities you require, create the Kubernetes Service Accounts, then modify your application configuration so that each Deployment, ReplicaSet, StatefulSet, etc. uses the correct Service Account.
-
-### Write initial allow-list policies for each service
-
-The final step to establishing your Zero Trust Network is to write the policies for each service in your network. The [Application Layer Policy Tutorial](istio/enforce-policy-istio.mdx) gives an overview of setting up policies that allow traffic based on Service Account identity.
-
-For each service you will:
-
-1. Determine the full set of other identities that should access it.
-1. Add rules to allow each of those flows.
-
-After a pod is selected by at least one policy, any traffic not explicitly allowed is denied. This implements the Zero Trust Network paradigm of an explicit allow-list of expected flows.
-
-### Determine the full set of identities that should access each service
-
-There are several approaches to determining the set of identities that should access a service. Work with the developers of the application to generate this list and ensure it is correct. One approach is to create a flow diagram of your entire application. A flow diagram is a kind of graph where each identity is a node, and each expected flow is an edge.
-
-Let’s look at an example application.
-
-![zero-trust-app](/img/calico/zero-trust-app.png)
-
-In this example, requests from end-users all flow through a service called api, where they can trigger calls to other services in the backend. These in turn can call other services. Each arrow in this diagram represents an expected flow, and if two services do not have a connecting arrow, the are not expected to have any network communication. For example, the only services that call the post service are api and search.
-
-For simple applications, especially if they are maintained by a single team, the developers will probably be able to just write down this flow graph from memory or with a quick look at the application code.
-
-If this is difficult to do from memory, you have several options.
-
-1. Run the application in a test environment with policy enabled.
- a. Look at service logs to see what connectivity has broken.
- b. Add rules that allow those flows and iterate until the application functions normally.
- c. Move on to the next service and repeat.
-1. Collect flow logs from a running instance of your application. Calico Enterprise can be used for this purpose, or the Kiali dashboard that comes with Istio.
- a. Process the flow logs to determine the set of flows.
- b. Review the logged flows and add rules for each expected flow.
-1. Use Calico Enterprise for policy, and put it into logging-only mode.
- a. In this mode “denied” connections are logged instead of dropped.
- b. Review the “denied” logs and add rules for each expected flow.
-
-When determining flows from a running application instance, be sure to review each rule you add with application developers to determine if it is legitimate and expected. The last thing you want is for a breach-in-progress to be enshrined as expected flows in policy!
-
-### Write policies with allow rules for each flow
-
-After you have the set of expected flows for each service, you are ready to write {{prodname}} network policy to allow-list those flows and deny all others.
-
-Returning to the example flow graph in the previous section, let’s write the policy for the post service. For the purpose of this example, assume all the services in the application run in a Kubernetes Namespace called microblog. We see from the flow graph that the post service is accessed by the api and search services.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: post-allow-list
- namespace: microblog
-spec:
- selector: svc == 'post'
- types:
- - Ingress
- ingress:
- - action: Allow
- source:
- serviceAccounts:
- names: ['api', 'search']
- namespaceSelector: app == 'microblog'
- protocol: TCP
- destination:
- ports:
- - 8080
-```
-
-Things to notice in this example:
-
-- **Namespace**
-
- Create a {{prodname}} NetworkPolicy in the same **namespace** as the service for the allow-list (microblog).
-
- ```yaml
- metadata:
- name: post-allow-list
- namespace: microblog
- ```
-
-- **Selectors**
-
- The selector controls which pods to apply policy. It should be the same selector used to define the Kubernetes Service.
-
- ```yaml
- spec:
- selector: svc == 'post'
- ```
-
-- **Service account by name**
-
- In the **source:** selector, allow **api** and **search** by name. An alternative to selecting service accounts by name, is by namespaceSelector (next example).
-
- ```yaml
- source:
- serviceAccounts:
- names: ['api', 'search']
- ```
-
-- **Service account by namespaceSelector**
-
- Service Accounts are uniquely identified by name and namespace. Use a **namespaceSelector** to fully-qualify the Service Accounts you are allowing, so if names are repeated in other namespaces they will not be granted access to the service.
-
- ```yaml
- source:
- serviceAccounts:
- names: ['api', 'search']
- namespaceSelector: app == 'microblog'
- ```
-
-- **Rules**
-
- Scope your rules as tightly as possible. In this case we are allowing connection only on TCP port 8080.
-
- ```yaml
- destination:
- ports:
- - 8080
- ```
-
-The above example lists the identities that need access to the post service by name. This style of allow-list works best when the developers responsible for a service have explicit knowledge of who needs access to their service.
-
-However, some development teams don’t explicitly know who needs access to their service, and don’t need to know. The service might be very generic and used by lots of different applications across the organization---for example: a logging service. Instead of listing the Service Accounts that get access to the service explicitly one-by-one, you can use a label selector that selects on Service Accounts.
-
-In the following example, we have changed the **serviceAccount** clause. Instead of a name, we use a label selector. The **selector: svc-post == access** label grants access to the post service.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: post-allow-list
- namespace: microblog
-spec:
- selector: svc == 'post'
- types:
- - Ingress
- ingress:
- - action: Allow
- source:
- serviceAccounts:
- selector: svc-post == 'access'
- namespaceSelector: app == 'microblog'
- protocol: TCP
- destination:
- ports:
- - 8080
-```
-
-Define labels that indicate permission to access services in the cluster. Then, modify the ServiceAccounts for each identity that needs access. In this example, we would add the label **svc-post == access** to the **api** and **search** Service Accounts.
-
-Whether you choose to explicitly name the Service Accounts or use a label selector is up to you, and you can make a different choice for different services. Using explicit names works best for services that have a small number of clients, or when you want the service owner to be involved in the decision to allow something new to access the service. If some other team wants to get access to the service, they call up the owner of the service and ask them to grant access. In contrast, using labels is good when you want more decentralized control. The service owner defines the labels that grant access to the service and trusts the other development teams to label their Service Accounts when they need access.
-
-### Maintain your zero trust network
-
-The allow-list policies are tightly scoped to the exact expected flows in the applications running in the Zero Trust Network. If these applications are under active development the expected flows will change, and policy, therefore, also needs to change. Maintaining a Zero Trust Network means instituting a change control policy that ensures:
-
-- Policies are up to date with application changes
-- Policies are tightly scoped to expected flows
-- Changes keep up with the pace of application development
-
-It is difficult to overstate how important the last point is. If your change control process cannot handle the volume of changes, or introduces too much latency in deploying new features, your transition to a Zero Trust Network is very likely to fail. Either your senior leadership will choose business expediency and overrule your security concerns, or competitors that can roll out new versions faster will stifle your market share. On the other hand, if your change control process does keep pace with application development, it will bring security value without sacrificing the pace of innovation.
-
-The size of the security team is often relatively small compared with application development and operations teams in most organizations. Fortunately, most application changes will not require changes in security policy, but even a small proportion of changes can lead to a large absolute number when dealing with large application teams. For this reason, it is often not feasible for a member of the security team to make every policy change. A classic complaint in large enterprises is that it takes weeks to change a firewall rule---this is often not because the actual workflow is time consuming but because the security team is swamped with a large backlog.
-
-Therefore, we recommend that the authors of the policy changes be developers/devops (i.e. authorship should “shift left”). This allows your change control process to scale naturally as your applications do. When application authors make changes that require policy changes (say, adding a new microservice), they also make the required policy changes to authorize the network activity associated with it.
-
-Here is a simplified application delivery pipeline flow.
-
-![zero-trust-app](/img/calico/zero-trust-deploy.png)
-
-Developers, DevOps, and/or Operators make changes to applications primarily by making changes to the artifacts at the top of the diagram: the source code and associated deployment configuration. These artifacts are put in source control (e.g. git) and control over changes to the running applications are managed as commits to this source repository. In a Kubernetes environment, the deployment configuration is typically the objects that appear on the Kubernetes API, such as Services and Deployment manifests.
-
-What you should do is include the NetworkPolicy as part of those deployment config artifacts. In some organizations, these artifacts are in the same repo as the source code, and in others they reside in a separate repo, but the principle is the same: you manage policy change control as commits to the deployment configuration. This config then works its way through the delivery pipeline and is finally applied to the running Kubernetes cluster.
-
-Your developers will likely require training and support from the security team to get policy correct at first. Many trained developers are not used to thinking about network security. The logical controls expressed in network policy are simple compared with the flexibility they have in source code, so the primary support they will need from you is around the proper security mindset and principles of Zero Trust Networks. You can apply a default deny policy in your cluster to ensure that developers can’t simply forget to apply their own allow-listed policy.
-
-You may wish to review every security policy change request (aka pull request in git workflows) at first. If you do, then be sure you have time allotted, and consider rolling out Zero Trust Network policies incrementally, one application or service at a time. As development teams gain confidence you can pull back and have them do their own reviews. Security professionals can do spot checks on change requests or entire policies to ensure quality remains high in the long term.
-
-## Additional resources
-
-- [Protect hosts](hosts/protect-hosts.mdx)
-- [Global network policy](../reference/resources/globalnetworkpolicy.mdx)
-- [Network policy](../reference/resources/networkpolicy.mdx)
diff --git a/calico_versioned_docs/version-3.25/network-policy/comms/crypto-auth.mdx b/calico_versioned_docs/version-3.25/network-policy/comms/crypto-auth.mdx
deleted file mode 100644
index 480ed2fabf..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/comms/crypto-auth.mdx
+++ /dev/null
@@ -1,144 +0,0 @@
----
-description: Enable TLS authentication and encryption for various Calico components.
----
-
-# Configure encryption and authentication to secure Calico components
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Connections from {{prodname}} components to etcd
-
-
-
-
-Operator based installations do not required communication to etcd, and so this section does not apply.
-
-
-
-
-If you are using the etcd datastore, we recommend enabling mutual TLS authentication on
-its connections as follows.
-
-- [Configure etcd](https://coreos.com/etcd/docs/latest/op-guide/security.html) to encrypt its
- communications with TLS and require clients to present certificates signed by the etcd certificate
- authority.
-
-- Configure each {{prodname}} component to verify the etcd server's identity and to present
- a certificate to the etcd server that is signed by the etcd certificate authority.
- - [{{nodecontainer}}](../../reference/configure-calico-node.mdx)
- - [`calicoctl`](../../operations/calicoctl/configure/etcd.mdx)
- - [CNI plugin](../../reference/configure-cni-plugins.mdx#etcd-location) (Kubernetes and OpenShift only)
- - [Kubernetes controllers](../../reference/kube-controllers/configuration.mdx#configuring-datastore-access) (Kubernetes and OpenShift only)
- - [Felix](../../reference/felix/configuration.mdx#etcd-datastore-configuration)
- - [Typha](../../reference/typha/configuration.mdx#etcd-datastore-configuration) (often deployed in
- larger Kubernetes deployments)
- - [Neutron plugin or ML2 driver](../../networking/openstack/configuration.mdx#neutron-server-etcneutronneutronconf) (OpenStack only)
- - [DHCP agent](../../networking/openstack/configuration.mdx#neutron-server-etcneutronneutronconf) (OpenStack only)
-
-
-
-
-### Connections from {{prodname}} components to kube-apiserver (Kubernetes and OpenShift)
-
-We recommend enabling TLS on kube-apiserver, as well as the client certificate and JSON web token (JWT)
-authentication modules. This ensures that all of its communications with {{prodname}} components occur
-over TLS. The {{prodname}} components present either an X.509 certificate or a JWT to kube-apiserver
-so that kube-apiserver can verify their identities.
-
-### Connections from Felix to Typha (Kubernetes)
-
-
-
-
-Operator based installations automatically configure mutual TLS authentication on connections from
-Felix to Typha.
-
-
-
-
-We recommend enabling mutual TLS authentication on connections from Felix to Typha.
-To do so, you must provision Typha with a server certificate and Felix with a client
-certificate. Each service will need the private key associated with their certificate.
-In addition, you must configure one of the following.
-
-- **SPIFFE identifiers** (recommended): Generate a [SPIFFE](https://github.com/spiffe/spiffe) identifier for Felix,
- set `ClientURISAN` on Typha to Felix's SPIFFE ID, and include Felix's SPIFFE ID in the `URI SAN` field
- of its certificate. Similarly, generate a [SPIFFE](https://github.com/spiffe/spiffe) identifier for Typha,
- set `TyphaURISAN` on Felix to Typha's SPIFFE ID, and include Typha's SPIFFE ID in the `URI SAN` field
- of its certificate.
-
-- **Common Name identifiers**: Configure `ClientCN` on Typha to the value in the `Common Name` of Felix's
- certificate. Configure `ClientCN` on Felix to the value in the `Common Name` of Typha's
- certificate.
-
-:::tip
-
-If you are migrating from Common Name to SPIFFE identifiers, you can set both values.
-If either matches, the communication succeeds.
-
-:::
-
-Here is an example of how you can secure the Felix-Typha communications in your
-cluster:
-
-1. Choose a certificate authority, or set up your own.
-
-1. Obtain or generate the following leaf certificates, signed by that
- authority, and corresponding keys:
-
- - A certificate for each Felix with Common Name `typha-client` and
- extended key usage `ClientAuth`.
-
- - A certificate for each Typha with Common Name `typha-server` and
- extended key usage `ServerAuth`.
-
-1. Configure each Typha with:
-
- - `CAFile` pointing to the certificate authority certificate
-
- - `ServerCertFile` pointing to that Typha's certificate
-
- - `ServerKeyFile` pointing to that Typha's key
-
- - `ClientCN` set to `typha-client`
-
- - `ClientURISAN` unset.
-
-1. Configure each Felix with:
-
- - `TyphaCAFile` pointing to the Certificate Authority certificate
-
- - `TyphaCertFile` pointing to that Felix's certificate
-
- - `TyphaKeyFile` pointing to that Felix's key
-
- - `TyphaCN` set to `typha-server`
-
- - `TyphaURISAN` unset.
-
-For a [SPIFFE](https://github.com/spiffe/spiffe)-compliant deployment you can
-follow the same procedure as above, except:
-
-1. Choose [SPIFFE Identities](https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#2-spiffe-identity)
-
- to represent Felix and Typha.
-
-1. When generating leaf certificates for Felix and Typha, put the relevant
- SPIFFE Identity in the certificate as a URI SAN.
-
-1. Leave `ClientCN` and `TyphaCN` unset.
-
-1. Set Typha's `ClientURISAN` parameter to the SPIFFE Identity for Felix that
- you use in each Felix certificate.
-
-1. Set Felix's `TyphaURISAN` parameter to the SPIFFE Identity for Typha.
-
-For detailed reference information on these parameters, refer to:
-
-- **Typha**: [Felix-Typha TLS configuration](../../reference/typha/configuration.mdx#felix-typha-tls-configuration)
-
-- **Felix**: [Felix-Typha TLS configuration](../../reference/felix/configuration.mdx#felix-typha-tls-configuration)
-
-
-
diff --git a/calico_versioned_docs/version-3.25/network-policy/comms/index.mdx b/calico_versioned_docs/version-3.25/network-policy/comms/index.mdx
deleted file mode 100644
index 24e13343f5..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/comms/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Secure communications for Calico components.
-hide_table_of_contents: true
----
-
-# Secure Calico component communications
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/network-policy/comms/reduce-nodes.mdx b/calico_versioned_docs/version-3.25/network-policy/comms/reduce-nodes.mdx
deleted file mode 100644
index 9028ca8d27..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/comms/reduce-nodes.mdx
+++ /dev/null
@@ -1,83 +0,0 @@
----
-description: Configure the Calico Typha TCP port.
----
-
-# Schedule Typha for scaling to well-known nodes
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Schedule Typha to well-known nodes.
-
-## Value
-
-By scheduling Typha to well-known nodes, you can reduce the number of nodes which expose
-Typha's listen port.
-
-## Concepts
-
-### Typha
-
-Typha is a {{prodname}} component which improves scalability and reduces the impact that
-large clusters may have on the Kubernetes API. Typha agents must accept connections from other agents on a fixed port.
-
-As part of the {{prodname}} bootstrap infrastructure, Typha must be available before
-pod networking begins and uses host networking instead. It opens a port on the node it is
-scheduled on. By default, it can get scheduled to any node and opens TCP 5473.
-
-## How to
-
-### Tell if you have installed Typha
-
-
-
-
-Operator based installations always include Typha.
-
-
-
-
-Check if the `calico-typha` deployment exists in the `kube-system` namespace.
-
-```
-kubectl get deployment -n kube-system calico-typha
-```
-
-
-
-
-### Schedule Typha to well-known nodes
-
-
-
-
-You can use the Installation API to configure a node affinity for Typha pods. The operator supports both
-`preferredDuringSchedulingIgnoredDuringExecution` and `requiredDuringSchedulingIgnoredDuringExecution` options.
-
-For example, to require the scheduler to place Typha on nodes with the label "typha=allowed":
-
-```yaml
-kind: Installation
-apiVersion: operator.tigera.io/v1
-metadata:
- name: default
-spec:
- typhaAffinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - matchExpressions:
- - key: typha
- operator: In
- values:
- - allowed
-```
-
-
-
-
-See [scheduling Typha to well-known nodes](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/).
-
-
-
diff --git a/calico_versioned_docs/version-3.25/network-policy/comms/secure-bgp.mdx b/calico_versioned_docs/version-3.25/network-policy/comms/secure-bgp.mdx
deleted file mode 100644
index 368b00fdf4..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/comms/secure-bgp.mdx
+++ /dev/null
@@ -1,185 +0,0 @@
----
-description: Configure BGP passwords to prevent attackers from injecting false routing information.
----
-
-# Secure BGP sessions
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Use BGP passwords to prevent attackers from injecting false routing information.
-
-## Value
-
-Setting a password on a BGP peering between BGP speakers means that a peering will only
-work when both ends of the peering have the same password. This provides a layer of defense
-against an attacker impersonating an external BGP peer or a workload in the cluster, for
-example to inject malicious routing information into the cluster.
-
-## Concepts
-
-### Password protection on BGP sessions
-
-Password protection is a [standardized](https://tools.ietf.org/html/rfc5925) optional
-feature of BGP sessions. The effect is that the two peers at either end of a BGP session
-can only communicate, and exchange routing information, if they are both configured with
-the same password.
-
-Please note that password use does not cause the data exchange to be _encrypted_. It
-remains relatively easy to _eavesdrop_ on the data exchange, but not to _inject_ false
-information.
-
-### Using Kubernetes secrets to store passwords
-
-In Kubernetes, the Secret resource is designed for holding sensitive information,
-including passwords. Therefore, for this {{prodname}} feature, we use Secrets to
-store BGP passwords.
-
-## How to
-
-To use a password on a BGP peering:
-
-1. Create (or update) a Kubernetes secret in the namespace where {{noderunning}} is
- running, so that it has a key whose value is the desired password. Note the secret
- name and the key name.
-
- :::note
-
- BGP passwords must be 80 characters or fewer. If a
- password longer than that is configured, the BGP sessions with
- that password will fail to be established.
-
- :::
-
-1. Ensure that {{noderunning}} has RBAC permissions to access that secret.
-
-1. Specify the secret and key name on the relevant BGPPeer resource.
-
-### Create or update Kubernetes secret
-
-For example:
-
-```
-kubectl create -f - <
-
-
-When [configuring a BGP peer](../../networking/configuring/bgp.mdx),
-include the secret and key name in the specification of the BGPPeer resource, like this:
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: BGPPeer
-metadata:
- name: bgppeer-global-3040
-spec:
- peerIP: 192.20.30.40
- asNumber: 64567
- password:
- secretKeyRef:
- name: bgp-secrets
- key: rr-password
-```
-
-
-
-
-Include the secret in the default [BGP configuration](../../reference/resources/bgpconfig.mdx)
-similar to the following:
-
-```yaml
-kind: BGPConfiguration
-apiVersion: projectcalico.org/v3
-metadata:
- name: default
-spec:
- logSeverityScreen: Info
- nodeToNodeMeshEnabled: true
- nodeMeshPassword:
- secretKeyRef:
- name: bgp-secrets
- key: rr-password
-```
-
-:::note
-
-Node to node mesh must be enabled to set node to node mesh
-BGP password.
-
-:::
-
-
-
-
-
-## Additional resources
-
-For more detail about the BGPPeer resource, see
-[BGPPeer](../../reference/resources/bgppeer.mdx).
-
-For more on configuring BGP peers, see [configuring BGP peers](../../networking/configuring/bgp.mdx)
-.
diff --git a/calico_versioned_docs/version-3.25/network-policy/comms/secure-metrics.mdx b/calico_versioned_docs/version-3.25/network-policy/comms/secure-metrics.mdx
deleted file mode 100644
index 931a3a0a2f..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/comms/secure-metrics.mdx
+++ /dev/null
@@ -1,512 +0,0 @@
----
-description: Limit access to Calico metric endpoints using network policy.
----
-
-# Secure Calico Prometheus endpoints
-
-## About securing access to {{prodname}}'s metrics endpoints
-
-When using {{prodname}} with Prometheus metrics enabled, we recommend using network policy
-to limit access to {{prodname}}'s metrics endpoints.
-
-## Prerequisites
-
-- {{prodname}} is installed with Prometheus metrics reporting enabled.
-- `calicoctl` is [installed in your PATH and configured to access the data store](../../operations/calicoctl/install.mdx).
-
-## Choosing an approach
-
-This guide provides two example workflows for creating network policies to limit access
-to {{prodname}}'s Prometheus metrics. Choosing an approach depends on your requirements.
-
-- [Using a deny-list approach](#using-a-deny-list-approach)
-
- This approach allows all traffic to your hosts by default, but lets you limit access to specific ports using
- {{prodname}} policy. This approach allows you to restrict access to specific ports, while leaving other
- host traffic unaffected.
-
-- [Using an allow-list approach](#using-an-allow-list-approach)
-
- This approach denies traffic to and from your hosts by default, and requires that all
- desired communication be explicitly allowed by a network policy. This approach is more secure because
- only explicitly-allowed traffic will get through, but it requires you to know all the ports that should be open on the host.
-
-## Using a deny-list approach
-
-### Overview
-
-The basic process is as follows:
-
-1. Create a default network policy that allows traffic to and from your hosts.
-1. Create host endpoints for each node that you'd like to secure.
-1. Create a network policy that denies unwanted traffic to the {{prodname}} metrics endpoints.
-1. Apply labels to allow access to the Prometheus metrics.
-
-### Example for {{nodecontainer}}
-
-This example shows how to limit access to the {{nodecontainer}} Prometheus metrics endpoints.
-
-1. Create a default network policy to allow host traffic
-
- First, create a default-allow policy. Do this first to avoid a drop in connectivity when adding the host endpoints
- later, since host endpoints with no policy default to deny.
-
- To do this, create a file named `default-host-policy.yaml` with the following contents.
-
- ```yaml
- apiVersion: projectcalico.org/v3
- kind: GlobalNetworkPolicy
- metadata:
- name: default-host
- spec:
- # Select all {{prodname}} nodes.
- selector: running-calico == "true"
- order: 5000
- ingress:
- - action: Allow
- egress:
- - action: Allow
- ```
-
- Then, use `calicoctl` to apply this policy.
-
- ```bash
- calicoctl apply -f default-host-policy.yaml
- ```
-
-1. List the nodes on which {{prodname}} is running with the following command.
-
- ```bash
- calicoctl get nodes
- ```
-
- In this case, we have two nodes in the cluster.
-
- ```
- NAME
- kubeadm-master
- kubeadm-node-0
- ```
-
-1. Create host endpoints for each {{prodname}} node.
-
- Create a file named `host-endpoints.yaml` containing a host endpoint for each node listed
- above. In this example, the contents would look like this.
-
- ```yaml
- apiVersion: projectcalico.org/v3
- kind: HostEndpoint
- metadata:
- name: kubeadm-master.eth0
- labels:
- running-calico: 'true'
- spec:
- node: kubeadm-master
- interfaceName: eth0
- expectedIPs:
- - 10.100.0.15
- ---
- apiVersion: projectcalico.org/v3
- kind: HostEndpoint
- metadata:
- name: kubeadm-node-0.eth0
- labels:
- running-calico: 'true'
- spec:
- node: kubeadm-node-0
- interfaceName: eth0
- expectedIPs:
- - 10.100.0.16
- ```
-
- In this file, replace `eth0` with the desired interface name on each node, and populate the
- `expectedIPs` section with the IP addresses on that interface.
-
- Note the use of a label to indicate that this host endpoint is running {{prodname}}. The
- label matches the selector of the network policy created in step 1.
-
- Then, use `calicoctl` to apply the host endpoints with the following command.
-
- ```bash
- calicoctl apply -f host-endpoints.yaml
- ```
-
-1. Create a network policy that restricts access to the {{nodecontainer}} Prometheus metrics port.
-
- Now let's create a network policy that limits access to the Prometheus metrics port such that
- only endpoints with the label `calico-prometheus-access: true` can access the metrics.
-
- To do this, create a file named `calico-prometheus-policy.yaml` with the following contents.
-
- ```yaml
- # Allow traffic to Prometheus only from sources that are
- # labeled as such, but don't impact any other traffic.
- apiVersion: projectcalico.org/v3
- kind: GlobalNetworkPolicy
- metadata:
- name: restrict-calico-node-prometheus
- spec:
- # Select all {{prodname}} nodes.
- selector: running-calico == "true"
- order: 500
- types:
- - Ingress
- ingress:
- # Deny anything that tries to access the Prometheus port
- # but that doesn't match the necessary selector.
- - action: Deny
- protocol: TCP
- source:
- notSelector: calico-prometheus-access == "true"
- destination:
- ports:
- - 9091
- ```
-
- This policy selects all endpoints that have the label `running-calico: true`, and enforces a single ingress deny rule.
- The ingress rule denies traffic to port 9091 unless the source of traffic has the label `calico-prometheus-access: true`, meaning
- all {{prodname}} workload endpoints, host endpoints, and global network sets that do not have the label, as well as any
- other network endpoints unknown to {{prodname}}.
-
- Then, use `calicoctl` to apply this policy.
-
- ```bash
- calicoctl apply -f calico-prometheus-policy.yaml
- ```
-
-1. Apply labels to any endpoints that should have access to the metrics.
-
- At this point, only endpoints that have the label `calico-prometheus-access: true` can reach
- {{prodname}}'s Prometheus metrics endpoints on each node. To grant access, simply add this label to the
- desired endpoints.
-
- For example, to allow access to a Kubernetes pod you can run the following command.
-
- ```bash
- kubectl label pod my-prometheus-pod calico-prometheus-access=true
- ```
-
- If you would like to grant access to a specific IP network, you
- can create a [global network set](../../reference/resources/globalnetworkset.mdx) using `calicoctl`.
-
- For example, you might want to grant access to your management subnets.
-
- ```yaml
- apiVersion: projectcalico.org/v3
- kind: GlobalNetworkSet
- metadata:
- name: calico-prometheus-set
- labels:
- calico-prometheus-access: 'true'
- spec:
- nets:
- - 172.15.0.0/24
- - 172.101.0.0/24
- ```
-
-### Additional steps for Typha deployments
-
-If your {{prodname}} installation uses the Kubernetes API datastore and has greater than 50 nodes, it is likely
-that you have installed Typha. This section shows how to use an additional network policy to secure the Typha
-Prometheus endpoints.
-
-After following the steps above, create a file named `typha-prometheus-policy.yaml` with the following contents.
-
-```yaml
-# Allow traffic to Prometheus only from sources that are
-# labeled as such, but don't impact any other traffic.
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: restrict-calico-node-prometheus
-spec:
- # Select all {{prodname}} nodes.
- selector: running-calico == "true"
- order: 500
- types:
- - Ingress
- ingress:
- # Deny anything that tries to access the Prometheus port
- # but that doesn't match the necessary selector.
- - action: Deny
- protocol: TCP
- source:
- notSelector: calico-prometheus-access == "true"
- destination:
- ports:
- - 9093
-```
-
-This policy selects all endpoints that have the label `running-calico: true`, and enforces a single ingress deny rule.
-The ingress rule denies traffic to port 9093 unless the source of traffic has the label `calico-prometheus-access: true`, meaning
-all {{prodname}} workload endpoints, host endpoints, and global network sets that do not have the label, as well as any
-other network endpoints unknown to {{prodname}}.
-
-Then, use `calicoctl` to apply this policy.
-
-```bash
-calicoctl apply -f typha-prometheus-policy.yaml
-```
-
-### Example for kube-controllers
-
-If your {{prodname}} installation exposes metrics from kube-controllers, you can limit access to those metrics
-with the following network policy.
-
-Create a file named `kube-controllers-prometheus-policy.yaml` with the following contents.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: restrict-kube-controllers-prometheus
- namespace: calico-system
-spec:
- # Select kube-controllers.
- selector: k8s-app == "calico-kube-controllers"
- order: 500
- types:
- - Ingress
- ingress:
- # Deny anything that tries to access the Prometheus port
- # but that doesn't match the necessary selector.
- - action: Deny
- protocol: TCP
- source:
- notSelector: calico-prometheus-access == "true"
- destination:
- ports:
- - 9094
-```
-
-:::note
-
-The above policy is installed in the calico-system namespace. If your cluster has {{prodname}} installed
-in the kube-system namespace, you will need to create the policy in that namespace instead.
-
-:::
-
-Then, use `calicoctl` to apply this policy.
-
-```bash
-calicoctl apply -f kube-controllers-prometheus-policy.yaml
-```
-
-## Using an allow-list approach
-
-### Overview
-
-The basic process is as follows:
-
-1. Create host endpoints for each node that you'd like to secure.
-1. Create a network policy that allows desired traffic to the {{prodname}} metrics endpoints.
-1. Apply labels to allow access to the Prometheus metrics.
-
-### Example for {{nodecontainer}}
-
-1. List the nodes on which {{prodname}} is running with the following command.
-
- ```bash
- calicoctl get nodes
- ```
-
- In this case, we have two nodes in the cluster.
-
- ```
- NAME
- kubeadm-master
- kubeadm-node-0
- ```
-
-1. Create host endpoints for each {{prodname}} node.
-
- Create a file named `host-endpoints.yaml` containing a host endpoint for each node listed
- above. In this example, the contents would look like this.
-
- ```yaml
- apiVersion: projectcalico.org/v3
- kind: HostEndpoint
- metadata:
- name: kubeadm-master.eth0
- labels:
- running-calico: 'true'
- spec:
- node: kubeadm-master
- interfaceName: eth0
- expectedIPs:
- - 10.100.0.15
- ---
- apiVersion: projectcalico.org/v3
- kind: HostEndpoint
- metadata:
- name: kubeadm-node-0.eth0
- labels:
- running-calico: 'true'
- spec:
- node: kubeadm-node-0
- interfaceName: eth0
- expectedIPs:
- - 10.100.0.16
- ```
-
- In this file, replace `eth0` with the desired interface name on each node, and populate the
- `expectedIPs` section with the IP addresses on that interface.
-
- Note the use of a label to indicate that this host endpoint is running {{prodname}}. The
- label matches the selector of the network policy created in step 1.
-
- Then, use `calicoctl` to apply the host endpoints with the following command. This will prevent all
- traffic to and from the host endpoints.
-
- ```bash
- calicoctl apply -f host-endpoints.yaml
- ```
-
- :::note
-
- {{prodname}} allows some traffic as a failsafe even after applying this policy. This can
- be adjusted using the `failsafeInboundHostPorts` and `failsafeOutboundHostPorts` options
- on the [FelixConfiguration resource](../../reference/resources/felixconfig.mdx).
-
- :::
-
-1. Create a network policy that allows access to the {{nodecontainer}} Prometheus metrics port.
-
- Now let's create a network policy that allows access to the Prometheus metrics port such that
- only endpoints with the label `calico-prometheus-access: true` can access the metrics.
-
- To do this, create a file named `calico-prometheus-policy.yaml` with the following contents.
-
- ```yaml
- apiVersion: projectcalico.org/v3
- kind: GlobalNetworkPolicy
- metadata:
- name: restrict-calico-node-prometheus
- spec:
- # Select all {{prodname}} nodes.
- selector: running-calico == "true"
- order: 500
- types:
- - Ingress
- ingress:
- # Allow traffic from selected sources to the Prometheus port.
- - action: Allow
- protocol: TCP
- source:
- selector: calico-prometheus-access == "true"
- destination:
- ports:
- - 9091
- ```
-
- This policy selects all endpoints that have the label `running-calico: true`, and enforces a single ingress allow rule.
- The ingress rule allows traffic to port 9091 from any source with the label `calico-prometheus-access: true`, meaning
- all {{prodname}} workload endpoints, host endpoints, and global network sets that have the label will be allowed access.
-
- Then, use `calicoctl` to apply this policy.
-
- ```bash
- calicoctl apply -f calico-prometheus-policy.yaml
- ```
-
-1. Apply labels to any endpoints that should have access to the metrics.
-
- At this point, only endpoints that have the label `calico-prometheus-access: true` can reach
- {{prodname}}'s Prometheus metrics endpoints on each node. To grant access, simply add this label to the
- desired endpoints.
-
- For example, to allow access to a Kubernetes pod you can run the following command.
-
- ```bash
- kubectl label pod my-prometheus-pod calico-prometheus-access=true
- ```
-
- If you would like to grant access to a specific IP address in your network, you
- can create a [global network set](../../reference/resources/globalnetworkset.mdx) using `calicoctl`.
-
- For example, creating the following network set would grant access to a host with IP 172.15.0.101.
-
- ```yaml
- apiVersion: projectcalico.org/v3
- kind: GlobalNetworkSet
- metadata:
- name: calico-prometheus-set
- labels:
- calico-prometheus-access: 'true'
- spec:
- nets:
- - 172.15.0.101/32
- ```
-
-### Additional steps for Typha deployments
-
-If your {{prodname}} installation uses the Kubernetes API datastore and has greater than 50 nodes, it is likely
-that you have installed Typha. This section shows how to use an additional network policy to secure the Typha
-Prometheus endpoints.
-
-After following the steps above, create a file named `typha-prometheus-policy.yaml` with the following contents.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: restrict-typha-prometheus
-spec:
- # Select all {{prodname}} nodes.
- selector: running-calico == "true"
- order: 500
- types:
- - Ingress
- ingress:
- - action: Allow
- protocol: TCP
- source:
- selector: calico-prometheus-access == "true"
- destination:
- ports:
- - 9093
-```
-
-This policy selects all endpoints that have the label `running-calico: true`, and enforces a single ingress allow rule.
-The ingress rule allows traffic to port 9093 from any source with the label `calico-prometheus-access: true`, meaning
-all {{prodname}} workload endpoints, host endpoints, and global network sets that have the label will be allowed access.
-
-Then, use `calicoctl` to apply this policy.
-
-```bash
-calicoctl apply -f typha-prometheus-policy.yaml
-```
-
-### Example for kube-controllers
-
-If your {{prodname}} installation exposes metrics from kube-controllers, you can limit access to those metrics
-with the following network policy.
-
-Create a file named `kube-controllers-prometheus-policy.yaml` with the following contents.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: restrict-kube-controllers-prometheus
- namespace: calico-system
-spec:
- selector: k8s-app == "calico-kube-controllers"
- order: 500
- types:
- - Ingress
- ingress:
- - action: Allow
- protocol: TCP
- source:
- selector: calico-prometheus-access == "true"
- destination:
- ports:
- - 9094
-```
-
-Then, use `calicoctl` to apply this policy.
-
-```bash
-calicoctl apply -f kube-controllers-prometheus-policy.yaml
-```
diff --git a/calico_versioned_docs/version-3.25/network-policy/encrypt-cluster-pod-traffic.mdx b/calico_versioned_docs/version-3.25/network-policy/encrypt-cluster-pod-traffic.mdx
deleted file mode 100644
index 5b43b0b9e8..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/encrypt-cluster-pod-traffic.mdx
+++ /dev/null
@@ -1,263 +0,0 @@
----
-description: Enable WireGuard for state-of-the-art cryptographic security between pods for Calico clusters.
----
-
-# Encrypt in-cluster pod traffic
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Enable WireGuard to secure on-the-wire, in-cluster pod traffic in a {{prodname}} cluster.
-
-## Value
-
-When this feature is enabled, {{prodname}} automatically creates and manages WireGuard tunnels between nodes providing transport-level security for on-the-wire, in-cluster pod traffic. WireGuard provides [formally verified](https://www.wireguard.com/formal-verification/) secure and [performant tunnels](https://www.wireguard.com/performance/) without any specialized hardware. For a deep dive in to WireGuard implementation, see this [white paper](https://www.wireguard.com/papers/wireguard.pdf).
-
-{{prodname}} supports WireGuard encryption for both IPv4 and IPv6 traffic. These can be independently enabled in the FelixConfiguration resource: `wireguardEnabled`
-enables encrypting IPv4 traffic over an IPv4 underlay network and `wireguardEnabledV6` enables encrypting IPv6 traffic over an IPv6 underlay network.
-
-## Before you begin...
-
-**Terminology**
-
- - Inter-node pod traffic: Traffic leaving a pod from one node destined to a pod on another node
- - Inter-node, host-network traffic: traffic generated by the node itself or a host-networked-pod destined to another node or host-networked-pod
- - Same-node pod traffic: Traffic between pods on the same node
-
-The following platforms:
-
-- Kubernetes, on-premises
-- EKS using Calico CNI
-- EKS using AWS CNI
-- AKS using Azure CNI
-
-**Supported encryption**
-
-- Encryption for inter-node pod traffic
-- Encryption for inter-node, host-network traffic - supported only on managed clusters deployed on EKS and AKS
-
-**Required**
-
-- On all nodes in the cluster that you want to participate in {{prodname}} encryption, verify that the operating system(s) on the nodes are [installed with WireGuard](https://www.wireguard.com/install/).
-
- :::note
-
- Some node operating systems do not support Wireguard, or do not have it installed by default. Enabling {{prodname}} Wireguard encryption does not require all nodes to be installed with Wireguard. However, traffic to or from a node that does not have Wireguard installed will not be encrypted.
-
- :::
-
-- IP addresses for every node in the cluster. This is required to establish secure tunnels between the nodes. {{prodname}} can automatically do this using [IP autodetection methods](../networking/ipam/ip-autodetection.mdx).
-
-## How to
-
-- [Install WireGuard](#install-wireguard)
-- [Enable WireGuard for a cluster](#enable-wireguard-for-a-cluster)
-- [Disable WireGuard for an individual node](#disable-wireguard-for-an-individual-node)
-- [Verify configuration](#verify-configuration)
-- [Disable WireGuard for a cluster](#disable-wireguard-for-a-cluster)
-
-### Install WireGuard
-
-WireGuard is included in Linux 5.6+ kernels, and has been backported to earlier Linux kernels in some Linux distributions.
-
-Install WireGuard on cluster nodes using [instructions for your operating system](https://www.wireguard.com/install/). Note that you may need to reboot your nodes after installing WireGuard to make the kernel modules available on your system.
-
-Use the following instructions for these platforms that are not listed on the WireGuard installation page, before proceeding to [enabling WireGuard](#enable-wireguard-for-a-cluster).
-
-
-
-
-To install WireGuard on the default Amazon Machine Image (AMI):
-
-```bash
- sudo yum install kernel-devel-`uname -r` -y
- sudo yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm -y
- sudo curl -o /etc/yum.repos.d/jdoss-wireguard-epel-7.repo https://copr.fedorainfracloud.org/coprs/jdoss/wireguard/repo/epel-7/jdoss-wireguard-epel-7.repo
- sudo yum install wireguard-dkms wireguard-tools -y
-```
-
-
-
-
-AKS cluster nodes run Ubuntu with a kernel that has WireGuard installed already, so there is no manual installation required.
-
-
-
-
-To install WireGuard for OpenShift v4.8:
-
-1. Install requirements:
-
- - [CoreOS Butane](https://coreos.github.io/butane/getting-started/)
- - [Openshift CLI](https://docs.openshift.com/container-platform/4.2/cli_reference/openshift_cli/getting-started-cli.html)
-
-1. Download and configure the tools needed for kmods.
-
-```bash
-FAKEROOT=$(mktemp -d)
-git clone https://github.com/tigera/kmods-via-containers
-cd kmods-via-containers
-make install FAKEROOT=${FAKEROOT}
-cd ..
-git clone https://github.com/tigera/kvc-wireguard-kmod
-cd kvc-wireguard-kmod
-make install FAKEROOT=${FAKEROOT}
-cd ..
-```
-
-1. Configure/edit `${FAKEROOT}/root/etc/kvc/wireguard-kmod.conf`.
-
- a. You must then set the URLs for the `KERNEL_CORE_RPM`, `KERNEL_DEVEL_RPM` and `KERNEL_MODULES_RPM` packages in the conf file `$FAKEROOT/etc/kvc/wireguard-kmod.conf`. Obtain copies for `kernel-core`, `kernel-devel`, and `kernel-modules` rpms from [RedHat Access](https://access.redhat.com/downloads/content/package-browser) and host it in an http file server that is reachable by your OCP workers.
-
- b. For help configuring `kvc-wireguard-kmod/wireguard-kmod.conf` and Wireguard version to kernel version compatibility, see the [kvc-wireguard-kmod README file](https://github.com/tigera/kvc-wireguard-kmod#quick-config-variables-guide).
-
-1. Get RHEL Entitlement data from your own RHEL8 system from a host in your cluster.
-
- ```bash
- tar -czf subs.tar.gz /etc/pki/entitlement/ /etc/rhsm/ /etc/yum.repos.d/redhat.repo
- ```
-
-1. Copy the `subs.tar.gz` file to your workspace and then extract the contents using the following command.
-
- ```bash
- tar -x -C ${FAKEROOT}/root -f subs.tar.gz
- ```
-
-1. Transpile your machine config using [CoreOS Butane](https://coreos.github.io/butane/getting-started/).
-
- ```bash
- cd kvc-wireguard-kmod
- make ignition FAKEROOT=${FAKEROOT} > mc-wg.yaml
- ```
-
-1. With the KUBECONFIG set for your cluster, run the following command to apply the MachineConfig which will install WireGuard across your cluster.
- ```bash
- oc create -f mc-wg.yaml
- ```
-
-
-
-
-### Enable WireGuard for a cluster
-
-
-
-
-Enable IPv4 WireGuard encryption across all the nodes using the following command.
-
-```bash
-kubectl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":true}}'
-```
-
-Enable IPv6 WireGuard encryption across all the nodes using the following command.
-
-```bash
-kubectl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabledV6":true}}'
-```
-
-To enable both IPv4 and IPv6 WireGuard encryption across all the nodes, use the following command.
-
-```bash
-kubectl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":true,"wireguardEnabledV6":true}}'
-```
-
-
-
-
-Enable IPv4 WireGuard encryption across all the nodes using the following command.
-
-```bash
-calicoctl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":true}}'
-```
-
-Enable IPv6 WireGuard encryption across all the nodes using the following command.
-
-```bash
-calicoctl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabledV6":true}}'
-```
-
-To enable both IPv4 and IPv6 WireGuard encryption across all the nodes, use the following command.
-
-```bash
-calicoctl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":true,"wireguardEnabledV6":true}}'
-```
-
-**Perform the next step for EKS and AKS clusters only, and only if your cluster is using the cloud provider CNI plugin and not Calico CNI.** Enable WireGuard encryption for direct node-to-node communications using the following command.
-
-```bash
-calicoctl patch felixconfiguration default --type='merge' -p '{"spec": {"wireguardHostEncryptionEnabled": true}}'
-```
-
-
-
-
-For OpenShift, add the Felix configuration with WireGuard enabled [under custom resources](../getting-started/kubernetes/openshift/installation.mdx#optionally-provide-additional-configuration).
-
-:::note
-
-The above command can be used to change other WireGuard attributes. For a list of other WireGuard parameters and configuration evaluation, see the [Felix configuration](../reference/resources/felixconfig.mdx#felix-configuration-definition).
-
-:::
-
-:::note
-
-`natOutgoing: true` is set for the default IPv4 IP pool, but not so for IPv6. Wireguard requires `natOutgoing` to be enabled in both IPv4 and IPv6, so [enable NAT outgoing for the IPv6 IP pools](../networking/configuring/workloads-outside-cluster.mdx) when using IPv6 Wireguard.
-
-:::
-
-We recommend that you review and modify the MTU used by {{prodname}} networking when WireGuard is enabled to increase network performance. Follow the instructions in the [Configure MTU to maximize network performance](../networking/configuring/mtu.mdx) guide to set the MTU to a value appropriate for your network.
-
-### Disable WireGuard for an individual node
-
-To disable WireGuard on a specific node with WireGuard installed, modify the node-specific Felix configuration. e.g., to turn off encryption for pod traffic on node `my-node`, use the following command. This command disables WireGuard for both IPv4 and IPv6, modify it accordingly if disabling only either IP version:
-
-```bash
-cat < -o yaml
- ...
- status:
- ...
- wireguardPublicKey: jlkVyQYooZYzI2wFfNhSZez5eWh44yfq1wKVjLvSXgY=
- wireguardPublicKeyV6: hTnWXGM4qk/Z8fQgyGFdpPd4qM9QGR2ey30s31yC6g4=
- ...
-```
-
-### Disable WireGuard for a cluster
-
-To disable WireGuard on all nodes modify the default Felix configuration. For example:
-
-```bash
- calicoctl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":false,"wireguardEnabledV6":false}}'
-```
-
-## Additional resources
-
-- [Secure Calico component communications](comms/index.mdx)
-- [Configure MTU to maximize network performance](../networking/configuring/mtu.mdx)
diff --git a/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/defend-dos-attack.mdx b/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/defend-dos-attack.mdx
deleted file mode 100644
index 8b3171d435..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/defend-dos-attack.mdx
+++ /dev/null
@@ -1,107 +0,0 @@
----
-description: Define DoS mitigation rules in Calico policy to quickly drop connections when under attack. Learn how rules use eBPF and XDP, including hardware offload when available.
----
-
-# Defend against DoS attacks
-
-## Big picture
-
-Calico automatically enforces specific types of deny-list policies at the earliest possible point in the packet processing pipeline, including offloading to NIC hardware whenever possible.
-
-## Value
-
-During a DoS attack, a cluster can receive massive numbers of connection requests from attackers. The faster these connection requests are dropped, the less flooding and overloading to your hosts. When you define DoS mitigation rules in Calico network policy, Calico enforces the rules as efficiently as possible to minimize the impact.
-
-## Concepts
-
-### Earliest packet processing
-
-The earliest point in the packet processing pipeline that packets can be dropped, depends on the Linux kernel version and the capabilities of the NIC driver and NIC hardware. Calico automatically uses the fastest available option.
-
-| Processed by... | Used by Calico if... | Performance |
-| --------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- |
-| NIC hardware | The NIC supports **XDP offload** mode. | Fastest |
-| NIC driver | The NIC driver supports **XDP native** mode. | Faster |
-| Kernel | The kernel supports **XDP generic mode** and Calico is configured to explicitly use it. This mode is rarely used and has no performance benefits over iptables raw mode below. To enable, see [Felix Configuration](../../reference/resources/felixconfig.mdx). | Fast |
-| Kernel | If none of the modes above are available, **iptables raw** mode is used. | Fast |
-
-:::note
-
-XDP modes require Linux kernel v4.16 or later.
-
-:::
-
-## How to
-
-The high-level steps to defend against a DoS attack are:
-
-- [Step 1: Create host endpoints](#step-1-create-host-endpoints)
-- [Step 2: Add CIDRs to deny-list in a global network set](#step-2-add-cidrs-to-deny-list-in-a-global-network-set)
-- [Step 3: Create deny incoming traffic global network policy](#step-3-create-deny-incoming-traffic-global-network-policy)
-
-### Best practice
-
-The following steps walk through the above required steps, assuming no prior configuration is in place. A best practice is to proactively do these steps before an attack (create the host endpoints, network policy, and global network set). In the event of a DoS attack, you can quickly respond by just adding the CIDRs that you want to deny-list to the global network set.
-
-### Step 1: Create host endpoints
-
-First, you create the HostEndpoints corresponding to the network interfaces where you want to enforce DoS mitigation rules. In the following example, the HostEndpoint secures the interface named **eth0** with IP **10.0.0.1** on node **jasper**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: HostEndpoint
-metadata:
- name: production-host
- labels:
- apply-dos-mitigation: 'true'
-spec:
- interfaceName: eth0
- node: jasper
- expectedIPs: ['10.0.0.1']
-```
-
-### Step 2: Add CIDRs to deny-list in a global network set
-
-Next, you create a Calico **GlobalNetworkset**, adding the CIDRs that you want to deny-list. In the following example, the global network set deny-lists the CIDR ranges **1.2.3.4/32** and **5.6.0.0/16**:
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkSet
-metadata:
- name: dos-mitigation
- labels:
- dos-deny-list: 'true'
-spec:
- nets:
- - '1.2.3.4/32'
- - '5.6.0.0/16'
-```
-
-### Step 3: Create deny incoming traffic global network policy
-
-Finally, create a Calico GlobalNetworkPolicy adding the GlobalNetworkSet label (**dos-deny-list** in the previous step) as a selector to deny ingress traffic. To more quickly enforce the denial of forwarded traffic to the host at the packet level, use the **doNotTrack** and **applyOnForward** options.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: dos-mitigation
-spec:
- selector: apply-dos-mitigation == 'true'
- doNotTrack: true
- applyOnForward: true
- types:
- - Ingress
- ingress:
- - action: Deny
- source:
- selector: dos-deny-list == 'true'
-```
-
-## Additional resources
-
-- [Global network sets](../../reference/resources/globalnetworkset.mdx)
-- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx)
-- [Create a host endpoint](../../reference/resources/hostendpoint.mdx)
-- [Introduction to XDP](https://www.iovisor.org/technology/xdp)
-- [Advanced XDP documentation](https://prototype-kernel.readthedocs.io/en/latest/networking/XDP/index.html)
diff --git a/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/high-connection-workloads.mdx b/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/high-connection-workloads.mdx
deleted file mode 100644
index d005bd9100..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/high-connection-workloads.mdx
+++ /dev/null
@@ -1,89 +0,0 @@
----
-description: Create a Calico network policy rule to bypass Linux conntrack for traffic to workloads that experience extremely large number of connections.
----
-
-# Enable extreme high-connection workloads
-
-## Big picture
-
-Use a {{prodname}} network policy rule to bypass Linux conntrack for traffic to workloads that experience extremely large number of connections.
-
-## Value
-
-When the number of connections on a node exceeds the number of connections that Linux conntrack can track, connections can be rejected or dropped. {{prodname}} network policy can be used to selectively bypass Linux conntrack for traffic to/from these types of workloads.
-
-## Concepts
-
-### Linux conntrack
-
-Connection tracking (“conntrack”) is a core feature of the Linux kernel’s networking stack. It allows the kernel to keep track of all logical network connections or flows, and thereby identify all of the packets that make up each flow so they can be handled consistently together. Conntrack is an essential part of the mainline Linux network processing pipeline, normally improving performance, and enabling NAT and stateful access control.
-
-### Extreme high-connection workloads
-
-Some niche workloads handling extremely high number of simultaneous connections, or very high rate of short lived connections, can exceed the maximum number of connections Linux conntrack is able to track. One real world example of such a workload is an extreme scale memcached server handling 50k+ connections per second.
-
-### {{prodname}} doNotTrack network policy
-
-The {{prodname}} global network policy option, **doNotTrack**, indicates to apply the rules in the policy before connection tracking, and that packets allowed by these rules should not be tracked. The policy is applied early in the Linux packet processing pipeline, before any regular network policy rules, and independent of the policy order field.
-
-Unlike normal network policy rules, doNotTrack network policy rules are stateless, meaning you must explicitly specify rules to allow return traffic that would normally be automatically allowed by conntrack. For example, for a server on port 999, the policy must include an ingress rule allowing inbound traffic to port 999, and an egress rule to allow outbound traffic from port 999.
-
-In a doNotTrack policy:
-
-- Ingress rules apply to all incoming traffic through a host endpoint, regardless of where the traffic is going
-- Egress rules apply only to traffic that is sent from the host endpoint (not a local workload)
-
-Finally, you must add an **applyOnForward: true expression** for a **doNotTrack policy** to work.
-
-## Before you begin...
-
-Before creating a **doNotTrack** network policy, read this [blog](https://www.tigera.io/blog/when-linux-conntrack-is-no-longer-your-friend/) to understand use cases, benefits, and trade offs.
-
-## How to
-
-### Bypass connection traffic for high connection server
-
-In the following example, a memcached server pod with **hostNetwork: true** was scheduled on the node memcached-node-1. We create a HostEndpoint for the node. Next, we create a GlobalNetwork Policy with symmetrical rules for ingress and egress with doNotTrack and applyOnForward set to true.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: HostEndpoint
-metadata:
- name: memcached-node-1-eth0
- labels:
- memcached: server
-spec:
- interfaceName: eth0
- node: memcached-node-1
- expectedIPs:
- - 10.128.0.162
----
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: memcached-server
-spec:
- selector: memcached == 'server'
- applyOnForward: true
- doNotTrack: true
- ingress:
- - action: Allow
- protocol: TCP
- source:
- selector: memcached == 'client'
- destination:
- ports:
- - 12211
- egress:
- - action: Allow
- protocol: TCP
- source:
- ports:
- - 12211
- destination:
- selector: memcached == 'client'
-```
-
-## Additional resources
-
-[Global network policy](../../reference/resources/globalnetworkpolicy.mdx)
diff --git a/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/index.mdx b/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/index.mdx
deleted file mode 100644
index 65e6316852..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Use Calico network policy early in the Linux packet processing pipeline to handle extreme traffic scenarios.
-hide_table_of_contents: true
----
-
-# Policy for extreme traffic
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-labels.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-labels.mdx
deleted file mode 100644
index 0dd6d5b625..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-labels.mdx
+++ /dev/null
@@ -1,110 +0,0 @@
----
-description: Calico automatic labels for use with resources.
----
-
-# Calico automatic labels
-
-As a convenience, {{prodname}} provides immutable labels that are used for specific resources when evaluating selectors in policies. The labels make it easier to match resources in common ways (such as matching a namespace by name).
-
-## Labels for matching namespaces
-
-The label `projectcalico.org/name` is set to the name of the namespace. This allows for matching namespaces by name when using a `namespaceSelector` field.
-
-For example, the following GlobalNetworkPolicy applies to workloads with label, `color: red` in namespaces named, `"foo"` and `"bar"`. The policy allows ingress traffic to port 8080 from all workloads in a third namespace named, `"baz"`:
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: foo-and-bar
-spec:
- namespaceSelector: projectcalico.org/name in {"foo", "bar"}
- selector: color == "red"
- types:
- - Ingress
- ingress:
- - action: Allow
- source:
- namespaceSelector: projectcalico.org/name == "baz"
- destination:
- ports:
- - 8080
-```
-
-Be aware that the default values for `namespaceSelector` for NetworkPolicy and GlobalNetworkPolicy are different. For example:
-
-**In a network policy**,
-
- ```yaml
- namespaceSelector:
- selector: foo == "bar"
- ```
-means "resources in the same namespace as the network policy that matches foo == 'bar'".
-
-**In a global network policy**,
-
- ```yaml
- namespaceSelector:
- selector: foo == "bar"
- ```
-means "resources in any namespace and non-namespaced resources that match foo == 'bar'".
-
-Further,
-
- ```yaml
- namespaceSelector: projectcalico.org/name == "some-namespace"
- selector: foo == "bar"
- ```
-is equivalent to:
-
- ```yaml
- namespaceSelector:
- selector: (foo == "bar") && (projectcalico.org/namespace == "some-namespace")
- ```
-
-### Labels for matching service accounts
-
-Similarly, the `projectcalico.org/name` label is applied to ServiceAccounts and allows for matching by name in a `serviceAccountSelector`.
-
-### Kubernetes labels for matching namespaces
-
-Kubernetes also has [automatic labeling](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/#automatic-labelling), for example `kubernetes.io/metadata.name`. The Kubernetes namespace label serves the same purpose and can be used in the same way as the {{prodname}} label. The `projectcalico.org/name` label predates the automatic Kubernetes label.
-
-## Labels for matching workload endpoints
-
-WorkloadEndpoints (which represent Pods in Kubernetes, or VM instances in OpenStack), receive several automatic labels:
-
-* `projectcalico.org/orchestrator` is applied to all WorkloadEndpoints and allows Kubernetes Pods to be distinguished from OpenStack VM instances, and from HostEndpoints (which do not have the label):
-
-* `has(projectcalico.org/orchestrator)` matches only WorkloadEndpoints
-* `projectcalico.org/orchestrator == "k8s"` matches only Kubernetes Pods
-
-* For WorkloadEndpoints that represent Kubernetes Pods, `projectcalico.org/namespace` contains the name of the pod's namespace. `projectcalico.org/namespace` predates the addition of `namespaceSelector` fields to GlobalNetworkPolicies; it serves the same purpose as the `projectcalico.org/name` label in a `namespaceSelector` field. The following GlobalNetworkPolicy is exactly equivalent to the example shown in the Namespaces section:
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: foo-and-bar
-spec:
- selector: projectcalico.org/namespace in {"foo", "bar"} && color == "red"
- types:
- - Ingress
- ingress:
- - action: Allow
- source:
- selector: projectcalico.org/namespace == "baz"
- destination:
- ports:
- - 8080
-```
-
-## Use the correct selector with labels in policies
-
-{{prodname}} labels must be used with the correct selector or the policy will not work as designed (and there are no error messages in Manager UI or when applying the YAML).
-
-| Calico label | Usage requirements | Use in these resources... |
-| --------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
-| `projectcalico.org/name` | Use with a **namespaceSelector** or **serviceAccountSelector**. | - Network policy - Staged network policy
Namespaced resources that apply only to workload endpoint resources in the namespace. |
-| `projectcalico.org/namespace` | Use only with selectors.
Use the label as the label name, and a namespace name as the value to compare against (for example projectcalico.org/namespace == "default"). | - Global network policy - Staged global network policy
Cluster-wide (non-namespaced) resources that apply to workload endpoint resources in all namespaces, and to host endpoint resources. |
-
diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-network-policy.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-network-policy.mdx
deleted file mode 100644
index a99d1a64a4..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-network-policy.mdx
+++ /dev/null
@@ -1,258 +0,0 @@
----
-description: Create your first Calico network policies. Shows the rich features using sample policies that extend native Kubernetes network policy.
----
-
-# Get started with Calico network policy
-
-## Big picture
-
-Enforce which network traffic that is allowed or denied using rules in Calico network policy.
-
-## Value
-
-### Extends Kubernetes network policy
-
-Calico network policy provides a richer set of policy capabilities than Kubernetes including: policy ordering/priority, deny rules, and more flexible match rules. While Kubernetes network policy applies only to pods, Calico network policy can be applied to multiple types of endpoints including pods, VMs, and host interfaces. Finally, when used with Istio service mesh, Calico network policy supports securing applications layers 5-7 match criteria, and cryptographic identity.
-
-### Write once, works everywhere
-
-No matter which cloud provider you use now, adopting Calico network policy means you write the policy once and it is portable. If you move to a different cloud provider, you don’t need to rewrite your Calico network policy. Calico network policy is a key feature to avoid cloud provider lock-in.
-
-### Works seamlessly with Kubernetes network policies
-
-You can use Calico network policy in addition to Kubernetes network policy, or exclusively. For example, you could allow developers to define Kubernetes network policy for their microservices. For broader and higher-level access controls that developers cannot override, you could allow only security or Ops teams to define Calico network policies.
-
-## Concepts
-
-### Endpoints
-
-Calico network policies apply to **endpoints**. In Kubernetes, each pod is a Calico endpoint. However, Calico can support other kinds of endpoints. There are two types of Calico endpoints: **workload endpoints** (such as a Kubernetes pod or OpenStack VM) and **host endpoints** (an interface or group of interfaces on a host).
-
-### Namespaced and global network policies
-
-**Calico network policy** is a namespaced resource that applies to pods/containers/VMs in that namespace.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: allow-tcp-6379
- namespace: production
-```
-
-**Calico global network policy** is a non-namespaced resource and can be applied to any kind of endpoint (pods, VMs, host interfaces) independent of namespace.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: allow-tcp-port-6379
-```
-
-Because global network policies use **kind: GlobalNetworkPolicy**, they are grouped separately from **kind: NetworkPolicy**. For example, global network policies will not be returned from `calicoctl get networkpolicy`, and are rather returned from `calicoctl get globalnetworkpolicy`.
-
-### kubectl vs calicoctl
-
-Calico network policies and Calico global network policies are applied using calicoctl. Syntax is similar to Kubernetes, but there a few differences. For help, see [calicoctl user reference](../../../reference/calicoctl/overview.mdx).
-
-### Ingress and egress
-
-Each network policy rule applies to either **ingress** or **egress** traffic. From the point of view of an endpoint (pod, VM, host interface), **ingress** is incoming traffic to the endpoint, and **egress** is outgoing traffic from the endpoint. In a Calico network policy, you create ingress and egress rules independently (egress, ingress, or both).
-
-You can specify whether policy applies to ingress, egress, or both using the **types** field. If you do not use the types field, Calico defaults to the following values.
-
-| Ingress rule present? | Egress rule present? | Value |
-| :-------------------: | :------------------: | :-------------: |
-| No | No | Ingress |
-| Yes | No | Ingress |
-| No | Yes | Egress |
-| Yes | Yes | Ingress, Egress |
-
-### Network traffic behaviors: deny and allow
-
-The Kubernetes network policy specification defines the following behavior:
-
-- If no network policies apply to a pod, then all traffic to/from that pod is allowed.
-- If one or more network policies apply to a pod containing ingress rules, then only the ingress traffic specifically allowed by those policies is allowed.
-- If one or more network policies apply to a pod containing egress rules, then only the egress traffic specifically allowed by those policies is allowed.
-
-For compatibility with Kubernetes, **Calico network policy** follows the same behavior for Kubernetes pods. For other endpoint types (VMs, host interfaces), Calico network policy is default deny. That is, only traffic specifically allowed by network policy is allowed, even if no network policies apply to the endpoint.
-
-## Before you begin
-
-`calicoctl` must be **installed** and **configured** before use. `calicoctl` will use etcd as the datastore by default, but many {{prodname}} installation manifests configure Kubernetes as the datastore. You can find more information on how to configure `calicoctl` in the following link:
-
-- [Configure `calicoctl`](../../../operations/calicoctl/configure/overview.mdx)
-
-## How to
-
-- [Control traffic to/from endpoints in a namespace](#control-traffic-tofrom-endpoints-in-a-namespace)
-- [Control traffic to/from endpoints independent of namespace](#control-traffic-tofrom-endpoints-independent-of-namespace)
-- [Control traffic to/from endpoints using IP addresses or CIDR ranges](#control-traffic-tofrom-endpoints-using-ip-addresses-or-cidr-ranges)
-- [Apply network policies in specific order](#apply-network-policies-in-specific-order)
-- [Generate logs for specific traffic](#generate-logs-for-specific-traffic)
-
-### Control traffic to/from endpoints in a namespace
-
-In the following example, ingress traffic to endpoints in the **namespace: production** with label **color: red** is allowed, only if it comes from a pod in the same namespace with **color: blue**, on port **6379**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: allow-tcp-6379
- namespace: production
-spec:
- selector: color == 'red'
- ingress:
- - action: Allow
- protocol: TCP
- source:
- selector: color == 'blue'
- destination:
- ports:
- - 6379
-```
-
-To allow ingress traffic from endpoints in other namespaces, use a **namespaceSelector** in the policy rule. A namespaceSelector matches namespaces based on the labels that are applied in the namespace. In the following example, ingress traffic is allowed from endpoints in namespaces that match **shape == circle**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: allow-tcp-6379
- namespace: production
-spec:
- selector: color == 'red'
- ingress:
- - action: Allow
- protocol: TCP
- source:
- selector: color == 'blue'
- namespaceSelector: shape == 'circle'
- destination:
- ports:
- - 6379
-```
-
-### Control traffic to/from endpoints independent of namespace
-
-The following Calico network policy is similar to the previous example, but uses **kind: GlobalNetworkPolicy** so it applies to all endpoints, regardless of namespace.
-
-In the following example, incoming TCP traffic to any pods with label **color: red** is denied if it comes from a pod with **color: blue**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: deny-blue
-spec:
- selector: color == 'red'
- ingress:
- - action: Deny
- protocol: TCP
- source:
- selector: color == 'blue'
-```
-
-As with **kind: NetworkPolicy**, you can allow or deny ingress traffic from endpoints in specific namespaces using a namespaceSelector in the policy rule:
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: deny-circle-blue
-spec:
- selector: color == 'red'
- ingress:
- - action: Deny
- protocol: TCP
- source:
- selector: color == 'blue'
- namespaceSelector: shape == 'circle'
-```
-
-### Control traffic to/from endpoints using IP addresses or CIDR ranges
-
-Instead of using a selector to define which traffic is allowed to/from the endpoints in a network policy, you can also specify an IP block in CIDR notation.
-
-In the following example, outgoing traffic is allowed from pods with the label **color: red** if it goes to an IP address in the **1.2.3.4/24** CIDR block.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: allow-egress-external
- namespace: production
-spec:
- selector: color == 'red'
- types:
- - Egress
- egress:
- - action: Allow
- destination:
- nets:
- - 1.2.3.0/24
-```
-
-### Apply network policies in specific order
-
-To control the order/sequence of applying network policies, you can use the **order** field (with precedence from the lowest value to highest). Defining policy **order** is important when you include both **action: allow** and **action: deny** rules that may apply to the same endpoint.
-
-In the following example, the policy **allow-cluster-internal-ingress** (order: 10) will be applied before the **policy drop-other-ingress** (order: 20).
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: drop-other-ingress
-spec:
- order: 20
- #...deny policy rules here...
-```
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: allow-cluster-internal-ingress
-spec:
- order: 10
- #...allow policy rules here...
-```
-
-### Generate logs for specific traffic
-
-In the following example, incoming TCP traffic to an application is denied, and each connection attempt is logged to syslog.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-Metadata:
- name: allow-tcp-6379
- namespace: production
-Spec:
- selector: role == 'database'
- types:
- - Ingress
- - Egress
- ingress:
- - action: Log
- protocol: TCP
- source:
- selector: role == 'frontend'
- - action: Deny
- protocol: TCP
- source:
- selector: role == 'frontend'
-```
-
-### Create policy for established connections
-
-Policies are immediately applied to any new connections. However, for existing connections that are already open, the policy changes will only take effect after the connection has been reestablished. This means that any ongoing sessions may not immediately reflect policy changes until they are initiated again.
-
-## Additional resources
-
-- For additional Calico network policy features, see [Calico network policy](../../../reference/resources/networkpolicy.mdx) and [Calico global network policy](../../../reference/resources/globalnetworkpolicy.mdx)
-- For an alternative to using IP addresses or CIDRs in policy, see [Network sets](../../../reference/resources/networkset.mdx)
-- For details on the calicoctl command line tool, see [calicoctl user reference](../../../reference/calicoctl/overview.mdx)
diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-policy-tutorial.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-policy-tutorial.mdx
deleted file mode 100644
index 0fc30bf6c0..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-policy-tutorial.mdx
+++ /dev/null
@@ -1,219 +0,0 @@
----
-description: Learn how to create more advanced Calico network policies (namespace, allow and deny all ingress and egress).
----
-
-# Calico policy tutorial
-
-Calico network policies **extend** the functionalities of Kubernetes network policies. To demonstrate this, this tutorial follows a similar approach to the [Kubernetes Advanced Network Policy Tutorial](../kubernetes-policy/kubernetes-policy-advanced.mdx), but instead uses Calico network policies and highlights differences between the two policy types, making use of features that are not available in Kubernetes network policies.
-
-## Requirements
-
-- A working Kubernetes cluster and access to it using kubectl and calicoctl
-- Your Kubernetes nodes have connectivity to the public internet
-- You are familiar with [Calico NetworkPolicy](calico-network-policy.mdx)
-
-## Tutorial flow
-
-1. Create the namespace and NGINX service
-2. Configure default deny
-3. Allow egress traffic from busybox
-4. Allow ingress traffic to NGINX
-5. Clean up
-
-## 1. Create the namespace and nginx service
-
-We'll use a new namespace for this guide. Run the following commands to create the namespace and a plain NGINX service listening on port 80.
-
-```bash
-kubectl create ns advanced-policy-demo
-kubectl create deployment --namespace=advanced-policy-demo nginx --image=nginx
-kubectl expose --namespace=advanced-policy-demo deployment nginx --port=80
-```
-
-### Verify access - allowed all ingress and egress
-
-Open up a second shell session which has `kubectl` connectivity to the Kubernetes cluster and create a busybox pod to test policy access. This pod will be used throughout this tutorial to test policy access.
-
-```bash
-kubectl run --namespace=advanced-policy-demo access --rm -ti --image busybox /bin/sh
-```
-
-This will open up a shell session inside the `busybox` pod, as shown below.
-
-```
-Waiting for pod advanced-policy-demo/access-472357175-y0m47 to be running, status is Pending, pod ready: false
-
-If you don't see a command prompt, try pressing enter.
-/ #
-```
-
-Now from within the busybox "access" pod execute the following command to test access to the nginx service.
-
-```bash
-wget -q --timeout=5 nginx -O -
-```
-
-It returns the HTML of the nginx welcome page.
-
-Still within the busybox "access" pod, issue the following command to test access to google.com.
-
-```bash
-wget -q --timeout=5 google.com -O -
-```
-
-It returns the HTML of the google.com home page.
-
-## 2. Lock down all traffic
-
-We will begin by using a default deny [Global Calico Network Policy](../../../reference/resources/globalnetworkpolicy.mdx) (which you can only do using Calico) that will help us adopt best practices in using a [zero trust network model](../../adopt-zero-trust.mdx) to secure our workloads. Note that Global Calico Network Policies are not namespaced and effect all pods that match the policy selector. In contrast, Kubernetes Network Policies are namespaced, so you would need to create a default deny policy per namespace to achieve the same effect. Note that to simplify this tutorial we exclude pods in the `kube-system`,`calico-system` and `calico-apiserver` namespace, so we don't have to consider the policies required to keep Kubernetes itself running smoothly when we apply our default deny.
-
-```bash
-calicoctl create -f - <
-
-
-Welcome to nginx!...
-```
-
-Next, try to retrieve the home page of google.com.
-
-```bash
-wget -q --timeout=5 google.com -O -
-```
-
-It will return the HTML of the google home page.
-
-We have allowed our access pod access to the outside internet and the nginx service using Calico Network Policies!
-
-# 5. Clean up
-
-To clean up this tutorial session run the following commands to clean up the network policies and remove the demo namespace.
-
-```bash
-calicoctl delete policy allow-busybox-egress -n advanced-policy-demo
-calicoctl delete policy allow-nginx-ingress -n advanced-policy-demo
-calicoctl delete gnp default-deny
-kubectl delete ns advanced-policy-demo
-```
diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/index.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/index.mdx
deleted file mode 100644
index fce333b50a..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Calico network policy lets you secure both workloads and hosts.
-hide_table_of_contents: true
----
-
-# Calico policy
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/network-policy-openstack.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/network-policy-openstack.mdx
deleted file mode 100644
index c30d615136..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/network-policy-openstack.mdx
+++ /dev/null
@@ -1,99 +0,0 @@
----
-description: Extend OpenStack security groups by applying Calico network policy and using labels to identify VMs within network policy rules.
----
-
-# Get started with Calico network policy for OpenStack
-
-## Big picture
-
-Use {{prodname}} network policy to extend security beyond OpenStack security groups.
-
-## Value
-
-For **deployment users**, OpenStack security groups provides enough features and flexibility. But for **deployment administrators**, limited labeling in VM security groups makes it difficult to address all security use cases that arise. {{prodname}} network policy provides special VM labels so you can identify VMs and impose additional restrictions that cannot be bypassed by users’ security group configuration.
-
-## Concepts
-
-### Multi-region deployments
-
-Using the OpenStack API, it is difficult to apply policy to cross-region network traffic because security groups are local to a single region. In {{prodname}}, each region in your OpenStack deployment becomes a separate {{prodname}} namespace in a single etcd datastore. With regions mapped to namespaces, you can easily define {{prodname}} network policy for communications between VMs in different regions.
-
-### Labels: more flexibility, greater security
-
-{{prodname}} provides predefined [VM endpoint labels](../../../networking/openstack/labels.mdx) (projects, security groups, and namespaces) for OpenStack deployments. You can use these labels in selector fields in {{prodname}} network policy to identify the VMs for allow/deny policy.
-
-### Policy ordering and enforcement
-
-{{prodname}} network policy is always enforced before OpenStack security groups, and cannot be overridden by user-level security group configuration.
-
-## Before you begin...
-
-- [Set up {{prodname}} for OpenStack](../../../networking/openstack/dev-machine-setup.mdx)
-- If you are using a multi-region VM deployment, [follow these extra steps](../../../networking/openstack/multiple-regions.mdx)
-
-## How to
-
-- [Restrict all ingress traffic between specific security groups](#restrict-all-ingress-traffic-between-specific-security-groups)
-- [Allow specific traffic between VMs in different regions](#allow-specific-traffic-between-vms-in-different-regions)
-
-### Restrict all ingress traffic between specific security groups
-
-In the following example, we create a **GlobalNetworkPolicy** that is applied before any OpenStack security group policy. It prevents all ingress communication between the OpenStack **superman** and **lexluthor** projects. We use the predefined {{prodname}} VM endpoint label, **openstack-project-name**, to identify projects.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: deny-lexluthor-to-superman
-spec:
- order: 10
- selector: "projectcalico.org/openstack-project-name == 'superman'"
- types:
- - Ingress
- ingress:
- - action: Deny
- source:
- selector: "projectcalico.org/openstack-project-name == 'lexluthor'"
----
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: deny-superman-to-lexluthor
-spec:
- order: 10
- selector: "projectcalico.org/openstack-project-name == 'lexluthor'"
- types:
- - Ingress
- ingress:
- - action: Deny
- source:
- selector: "projectcalico.org/openstack-project-name == 'superman'"
-```
-
-### Allow specific traffic between VMs in different regions
-
-In the following example, we use the predefined VM endpoint label, **openstack-security_group_ID**. Traffic is allowed to VMs with the label, **openstack-a773…** on port 80, from VMs in any region with the label, **openstack-85cc…**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: allow-tcp-80
-spec:
- selector: 'has(sg.projectcalico.org/openstack-a7734e61-b545-452d-a3cd-0189cbd9747a)'
- types:
- - Ingress
- ingress:
- - action: Allow
- protocol: TCP
- source:
- selector: 'has(sg.projectcalico.org/openstack-85cc3048-abc3-43cc-89b3-377341426ac5)'
- destination:
- ports:
- - 80
-```
-
-## Additional resources
-
-- For additional {{prodname}} network policy features, see [{{prodname}} network policy](../../../reference/resources/networkpolicy.mdx) and [Calico global network policy](../../../reference/resources/globalnetworkpolicy.mdx)
-- For details on the OpenStack integration with {{prodname}}, see [{{prodname}} for OpenStack](../../../networking/openstack/dev-machine-setup.mdx)
diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/index.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/index.mdx
deleted file mode 100644
index 7626a83757..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/get-started/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: If you are new to Kubernetes, start with "Kubernetes policy" and learn the basics of enforcing policy for pod traffic. Otherwise, dive in and create more powerful policies with Calico policy. The good news is, Kubernetes and Calico policies are very similar and work alongside each other -- so managing both types is easy.
-hide_table_of_contents: true
----
-
-# Get started with policy
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-default-deny.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-default-deny.mdx
deleted file mode 100644
index 0b9d2384e8..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-default-deny.mdx
+++ /dev/null
@@ -1,151 +0,0 @@
----
-description: Create a default deny network policy so pods that are missing policy are not allowed traffic until appropriate network policy is defined.
----
-
-# Enable a default deny policy for Kubernetes pods
-
-## Big picture
-
-Enable a default deny policy for Kubernetes pods using Kubernetes or {{prodname}} network policy.
-
-## Value
-
-A **default deny** network policy provides an enhanced security posture so pods without policy (or incorrect policy) are not allowed traffic until appropriate network policy is defined.
-
-## Features
-
-This how-to guide uses the following {{prodname}} features:
-
-- **NetworkPolicy**
-- **GlobalNetworkPolicy**
-
-## Concepts
-
-### Default deny/allow behavior
-
-**Default allow** means all traffic is allowed by default, unless otherwise specified. **Default deny** means all traffic is denied by default, unless explicitly allowed. **Kubernetes pods are default allow**, unless network policy is defined to specify otherwise.
-
-For compatibility with Kubernetes, **{{prodname}} network policy** enforcement follows the standard convention for Kubernetes pods:
-
-- If no network policies apply to a pod, then all traffic to/from that pod is allowed.
-- If one or more network policies apply to a pod with type ingress, then only the ingress traffic specifically allowed by those policies is allowed.
-- If one or more network policies apply to a pod with type egress, then only the egress traffic specifically allowed by those policies is allowed.
-
-For other endpoint types (VMs, host interfaces), the default behavior is to deny traffic. Only traffic specifically allowed by network policy is allowed, even if no network policies apply to the endpoint.
-
-## Before you begin
-
-To apply the sample {{prodname}} network policies in the following section, [install calicoctl](../../operations/calicoctl/install.mdx).
-
-## How to
-
-- [Create a default deny network policy](#crate-a-default-deny-network-policy)
-- [Create a global default deny network policy](#create-a-global-default-deny-network-policy)
-
-### Create a default deny network policy
-
-Immediately after installation, a best practice is to create a namespaced default deny network policy to secure pods without policy or incorrect policy until you can put policies in place and test them.
-
-In the following example, we create a {{prodname}} default deny **NetworkPolicy** for all workloads in the namespace, **engineering**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: default-deny
- namespace: engineering
-spec:
- selector: all()
- types:
- - Ingress
- - Egress
-```
-
-Here's an equivalent default deny **Kubernetes network policy** for all pods in the namespace, **engineering**
-
-```yaml
-apiVersion: networking.k8s.io/v1
-kind: NetworkPolicy
-metadata:
- name: default-deny
- namespace: engineering
-spec:
- podSelector: {}
- policyTypes:
- - Ingress
- - Egress
-```
-
-### Create a global default deny policy
-
-A default deny policy ensures that unwanted traffic (ingress and egress) is denied by default without you having to remember default deny/allow behavior of Kubernetes and {{prodname}} policies. This policy can also help mitigate risks of lateral malicious attacks.
-
-#### Best practice #1: Allow, stage, then deny
-
-We recommend that you create a global default deny policy after you complete writing policy for the traffic that you want to allow. The following steps summarizes the best practice to test and lock down the cluster to block unwanted traffic:
-
-1. Create a global default deny policy and test it in a staging environment. (The policy will show all the traffic that would be blocked if it were converted into a deny.)
-1. Create network policies to individually allow the traffic shown as blocked in step 1 until no connections are denied.
-1. Enforce the global default deny policy.
-
-#### Best practice #2: Keep the scope to non-system pods
-
-A global default deny policy applies to the entire cluster including all workloads in all namespaces, hosts (computers that run the hypervisor for VMs or container runtime for containers), including Kubernetes control plane and {{prodname}} control plane nodes and pods.
-
-For this reason, the best practice is to create a global default deny policy for **non-system pods** as shown in the following example.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: deny-app-policy
-spec:
- namespaceSelector: has(projectcalico.org/name) && projectcalico.org/name not in {"kube-system", "calico-system", "tigera-system"}
- types:
- - Ingress
- - Egress
- egress:
- # allow all namespaces to communicate to DNS pods
- - action: Allow
- protocol: UDP
- destination:
- selector: 'k8s-app == "kube-dns"'
- ports:
- - 53
- - action: Allow
- protocol: TCP
- destination:
- selector: 'k8s-app == "kube-dns"'
- ports:
- - 53
-```
-
-Note the following:
-
-- Even though we call this policy "global default deny", the above policy is not explicitly denying traffic. By selecting the traffic with the `namespaceSelector` but not specifying an allow, the traffic is denied after all other policy is evaluated. This design also makes it unnecessary to ensure any specific order (priority) for the default-deny policy.
-- Allowing access to `kube-dns` simplifies per-pod policies because you don't need to duplicate the DNS rules in every policy
-- The policy deliberately excludes the `kube-system`, `calico-system`, and `tigera-system` namespaces by using a negative `namespaceSelector` to avoid impacting any control plane components
-
-In a staging environment, verify that the policy does not block any necessary traffic before enforcing it.
-
-### Don't try this!
-
-The following policy works and looks fine on the surface. But as described in Best practices #2, the policy is too broad in scope and could break your cluster. Therefore, we do not recommend adding this type of policy, even if you have verified allowed traffic in your staging environment.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: default.default-deny
-spec:
- tier: default
- selector: all()
- types:
- - Ingress
- - Egress
-```
-
-## Additional resources
-
-- [Network policy](../../reference/resources/networkpolicy.mdx)
-- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx)
\ No newline at end of file
diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/index.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/index.mdx
deleted file mode 100644
index f2cfe5b06b..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Manage your Kubernetes network policies right alongside the more powerful Calico network policies.
-hide_table_of_contents: true
----
-
-# Kubernetes policy
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx
deleted file mode 100644
index f3d32f518c..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx
+++ /dev/null
@@ -1,101 +0,0 @@
----
-description: An interactive demo that visually shows how applying Kubernetes policy allows and denies connections.
----
-
-# Kubernetes policy, demo
-
-The included demo sets up a frontend and backend service, as well as a client service, all
-running on Kubernetes. It then configures network policy on each service.
-
-## Prerequisites
-
-To create a Kubernetes cluster which supports the Kubernetes network policy API, follow
-one of our [getting started guides](../../../getting-started/index.mdx).
-
-## Running the stars example
-
-### 1) Create the frontend, backend, client, and management-ui apps.
-
-```shell
-kubectl create -f {{tutorialFilesURL}}/00-namespace.yaml
-kubectl create -f {{tutorialFilesURL}}/01-management-ui.yaml
-kubectl create -f {{tutorialFilesURL}}/02-backend.yaml
-kubectl create -f {{tutorialFilesURL}}/03-frontend.yaml
-kubectl create -f {{tutorialFilesURL}}/04-client.yaml
-```
-
-Wait for all the pods to enter `Running` state.
-
-```bash
-kubectl get pods --all-namespaces --watch
-```
-
-> Note that it may take several minutes to download the necessary Docker images for this demo.
-
-The management UI runs as a `NodePort` Service on Kubernetes, and shows the connectivity
-of the Services in this example.
-
-You can view the UI by visiting `http://:30002` in a browser.
-
-Once all the pods are started, they should have full connectivity. You can see this by visiting the UI. Each service is
-represented by a single node in the graph.
-
-- `backend` -> Node "B"
-- `frontend` -> Node "F"
-- `client` -> Node "C"
-
-### 2) Enable isolation
-
-Running following commands will prevent all access to the frontend, backend, and client Services.
-
-```shell
-kubectl create -n stars -f {{tutorialFilesURL}}/default-deny.yaml
-kubectl create -n client -f {{tutorialFilesURL}}/default-deny.yaml
-```
-
-#### Confirm isolation
-
-Refresh the management UI (it may take up to 10 seconds for changes to be reflected in the UI).
-Now that we've enabled isolation, the UI can no longer access the pods, and so they will no longer show up in the UI.
-
-### 3) Allow the UI to access the services using network policy objects
-
-Apply the following YAML files to allow access from the management UI.
-
-```shell
-kubectl create -f {{tutorialFilesURL}}/allow-ui.yaml
-kubectl create -f {{tutorialFilesURL}}/allow-ui-client.yaml
-```
-
-After a few seconds, refresh the UI - it should now show the Services, but they should not be able to access each other anymore.
-
-### 4) Create the backend-policy.yaml file to allow traffic from the frontend to the backend
-
-```shell
-kubectl create -f {{tutorialFilesURL}}/backend-policy.yaml
-```
-
-Refresh the UI. You should see the following:
-
-- The frontend can now access the backend (on TCP port 6379 only).
-- The backend cannot access the frontend at all.
-- The client cannot access the frontend, nor can it access the backend.
-
-### 5) Expose the frontend service to the client namespace
-
-```shell
-kubectl create -f {{tutorialFilesURL}}/frontend-policy.yaml
-```
-
-The client can now access the frontend, but not the backend. Neither the frontend nor the backend
-can initiate connections to the client. The frontend can still access the backend.
-
-To use {{prodname}} to enforce egress policy on Kubernetes pods, see [the advanced policy demo](kubernetes-policy-advanced.mdx).
-
-### 6) (Optional) Clean up the demo environment
-
-You can clean up the demo by deleting the demo Namespaces:
-
-```bash
-kubectl delete ns client stars management-ui
-```
diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-network-policy.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-network-policy.mdx
deleted file mode 100644
index 06a4be6807..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-network-policy.mdx
+++ /dev/null
@@ -1,179 +0,0 @@
----
-description: Learn Kubernetes policy syntax, rules, and features for controlling network traffic.
----
-
-# Get started with Kubernetes network policy
-
-## Big picture
-
-Kubernetes network policy lets administrators and developers enforce which network traffic is allowed using rules.
-
-## Value
-
-Kubernetes network policy lets developers secure access to and from their applications using the same simple language they use to deploy them. Developers can focus on their applications without understanding low-level networking concepts. Enabling developers to easily secure their applications using network policies supports a shift left DevOps environment.
-
-## Concepts
-
-The Kubernetes Network Policy API provides a standard way for users to define network policy for controlling network traffic. However, Kubernetes has no built-in capability to enforce the network policy. To enforce network policy, you must use a network plugin such as Calico.
-
-### Ingress and egress
-
-The bulk of securing network traffic typically revolves around defining egress and ingress rules. From the point of view of a Kubernetes pod, **ingress** is incoming traffic to the pod, and **egress** is outgoing traffic from the pod. In Kubernetes network policy, you create ingress and egress “allow” rules independently (egress, ingress, or both).
-
-### Default deny/allow behavior
-
-**Default allow** means all traffic is allowed by default, unless otherwise specified.
-**Default deny** means all traffic is denied by default, unless explicitly allowed.
-
-## How to
-
-Before you create your first Kubernetes network policy, you need to understand the default network policy behaviors. If no Kubernetes network policies apply to a pod, then all traffic to/from the pod are allowed (default-allow). As a result, if you do not create any network policies, then all pods are allowed to communicate freely with all other pods. If one or more Kubernetes network policies apply to a pod, then only the traffic specifically defined in that network policy are allowed (default-deny).
-
-You are now ready to start fine-tuning traffic that should be allowed.
-
-- [Create ingress policies](#create-ingress-policies)
-- [Allow ingress traffic from pods in the same namespace](#allow-ingress-traffic-from-pods-in-the-same-namespace)
-- [Allow ingress traffic from pods in a different namespace](#allow-ingress-traffic-from-pods-in-a-different-namespace)
-- [Create egress policies](#create-egress-policies)
-- [Allow egress traffic from pods in the same namespace](#allow-egress-traffic-from-pods-in-the-same-namespace)
-- [Allow egress traffic to IP addresses or CIDR range](#allow-egress-traffic-to-ip-addresses-or-cidr-range)
-- [Best practice: create deny-all default network policy](#best-practice-create-deny-all-default-network-policy)
-- [Create deny-all default ingress and egress network policy](#create-deny-all-default-ingress-and-egress-network-policy)
-
-### Create ingress policies
-
-Create ingress network policies to allow inbound traffic from other pods.
-
-Network policies apply to pods within a specific **namespace**. Policies can include one or more ingress rules. To specify which pods in the namespace the network policy applies to, use a **pod selector**. Within the ingress rule, use another pod selector to define which pods allow incoming traffic, and the **ports** field to define on which ports traffic is allowed.
-
-#### Allow ingress traffic from pods in the same namespace
-
-In the following example, incoming traffic to pods with label **color=blue** are allowed only if they come from a pod with **color=red**, on port **80**.
-
-```yaml
-kind: NetworkPolicy
-apiVersion: networking.k8s.io/v1
-metadata:
- name: allow-same-namespace
- namespace: default
-spec:
- podSelector:
- matchLabels:
- color: blue
- ingress:
- - from:
- - podSelector:
- matchLabels:
- color: red
- ports:
- - port: 80
-```
-
-#### Allow ingress traffic from pods in a different namespace
-
-To allow traffic from pods in a different namespace, use a namespace selector in the ingress policy rule. In the following policy, the namespace selector matches one or more Kubernetes namespaces and is combined with the pod selector that selects pods within those namespaces.
-
-:::note
-
-Namespace selectors can be used only in policy rules. The **spec.podSelector** applies to pods only in the same namespace as the policy.
-
-:::
-
-In the following example, incoming traffic is allowed only if they come from a pod with label **color=red**, in a namespace with label **shape=square**, on port **80**.
-
-```yaml
-kind: NetworkPolicy
-apiVersion: networking.k8s.io/v1
-metadata:
- name: allow-different-namespace
- namespace: default
-spec:
- podSelector:
- matchLabels:
- color: blue
- ingress:
- - from:
- - podSelector:
- matchLabels:
- color: red
- namespaceSelector:
- matchLabels:
- shape: square
- ports:
- - port: 80
-```
-
-### Create egress policies
-
-Create egress network policies to allow outbound traffic from pods.
-
-#### Allow egress traffic from pods in the same namespace
-
-The following policy allows pod outbound traffic to other pods in the same namespace that match the pod selector. In the following example, outbound traffic is allowed only if they go to a pod with label **color=red**, on port **80**.
-
-```yaml
-kind: NetworkPolicy
-apiVersion: networking.k8s.io/v1
-metadata:
- name: allow-egress-same-namespace
- namespace: default
-spec:
- podSelector:
- matchLabels:
- color: blue
- egress:
- - to:
- - podSelector:
- matchLabels:
- color: red
- ports:
- - port: 80
-```
-
-#### Allow egress traffic to IP addresses or CIDR range
-
-Egress policies can also be used to allow traffic to specific IP addresses and CIDR ranges. Typically, IP addresses/ranges are used to handle traffic that is external to the cluster for static resources or subnets.
-
-The following policy allows egress traffic to pods in CIDR, **172.18.0.0/24**.
-
-```yaml
-kind: NetworkPolicy
-apiVersion: networking.k8s.io/v1
-metadata:
- name: allow-egress-external
- namespace: default
-spec:
- podSelector:
- matchLabels:
- color: red
- egress:
- - to:
- - ipBlock:
- cidr: 172.18.0.0/24
-```
-
-### Best practice: create deny-all default network policy
-
-To ensure that all pods in the namespace are secure, a best practice is to create a default network policy. This avoids accidentally exposing an app or version that doesn’t have policy defined.
-
-#### Create deny-all default ingress and egress network policy
-
-The following network policy implements a default **deny-all** ingress and egress policy, which prevents all traffic to/from pods in the **policy-demo** namespace. Note that the policy applies to all pods in the policy-demo namespace, but does not explicitly allow any traffic. All pods are selected, but because the default changes when pods are selected by a network policy, the result is: **deny all ingress and egress traffic**. (Unless the traffic is allowed by another network policy).
-
-```yaml
-kind: NetworkPolicy
-apiVersion: networking.k8s.io/v1
-metadata:
- name: default-deny
- namespace: policy-demo
-spec:
- podSelector:
- matchLabels: {}
- policyTypes:
- - Ingress
- - Egress
-```
-
-## Additional resources
-
-- [Kubernetes Network Policy API documentation](https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/)
diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx
deleted file mode 100644
index b0cf5131d9..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx
+++ /dev/null
@@ -1,342 +0,0 @@
----
-description: Learn how to create more advanced Kubernetes network policies (namespace, allow and deny all ingress and egress).
----
-
-# Kubernetes policy, advanced tutorial
-
-The Kubernetes `NetworkPolicy` API allows users to express ingress and egress policies (starting with Kubernetes 1.8.0) to Kubernetes pods
-based on labels and ports.
-
-This guide walks through using Kubernetes `NetworkPolicy` to define more complex network policies.
-
-## Requirements
-
-- A working Kubernetes cluster and access to it using kubectl
-- Your Kubernetes nodes have connectivity to the public internet
-- You are familiar with [Kubernetes NetworkPolicy](kubernetes-policy-basic.mdx)
-
-## Tutorial flow
-
-1. Create the Namespace and Nginx Service
-1. Deny all ingress traffic
-1. Allow ingress traffic to Nginx
-1. Deny all egress traffic
-1. Allow egress traffic to kube-dns
-1. Cleanup Namespace
-
-## 1. Create the namespace and nginx service
-
-We'll use a new namespace for this guide. Run the following commands to create it and a plain nginx service listening on port 80.
-
-```bash
-kubectl create ns advanced-policy-demo
-kubectl create deployment --namespace=advanced-policy-demo nginx --image=nginx
-kubectl expose --namespace=advanced-policy-demo deployment nginx --port=80
-```
-
-### Verify access - allowed all ingress and egress
-
-Open up a second shell session which has `kubectl` connectivity to the Kubernetes cluster and create a busybox pod to test policy access. This pod will be used throughout this tutorial to test policy access.
-
-```bash
-kubectl run --namespace=advanced-policy-demo access --rm -ti --image busybox /bin/sh
-```
-
-This should open up a shell session inside the `access` pod, as shown below.
-
-```
-Waiting for pod advanced-policy-demo/access-472357175-y0m47 to be running, status is Pending, pod ready: false
-
-If you don't see a command prompt, try pressing enter.
-/ #
-```
-
-Now from within the busybox "access" pod execute the following command to test access to the nginx service.
-
-```bash
-wget -q --timeout=5 nginx -O -
-```
-
-It should return the HTML of the nginx welcome page.
-
-Still within the busybox "access" pod, issue the following command to test access to google.com.
-
-```bash
-wget -q --timeout=5 google.com -O -
-```
-
-It should return the HTML of the google.com home page.
-
-## 2. Deny all ingress traffic
-
-Enable ingress isolation on the namespace by deploying a [default deny all ingress traffic policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/#default-deny-all-ingress-traffic).
-
-```bash
-kubectl create -f - <
-
-
-Welcome to nginx!...
-```
-
-After creating the policy, we can now access the nginx Service.
-
-## 4. Deny all egress traffic
-
-Enable egress isolation on the namespace by deploying a [default deny all egress traffic policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/#default-deny-all-egress-traffic).
-
-```bash
-kubectl create -f - <
-
-
-Welcome to nginx!...
-```
-
-Next, try to retrieve the home page of google.com.
-
-```bash
-wget -q --timeout=5 google.com -O -
-```
-
-It should return:
-
-```
-wget: download timed out
-```
-
-Access to `google.com` times out because it can resolve DNS but has no egress access to anything other than pods with labels matching `app: nginx` in the `advanced-policy-demo` namespace.
-
-# 7. Clean up namespace
-
-You can clean up after this tutorial by deleting the advanced policy demo namespace.
-
-```bash
-kubectl delete ns advanced-policy-demo
-```
diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx
deleted file mode 100644
index 8210d8d19f..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx
+++ /dev/null
@@ -1,207 +0,0 @@
----
-description: Learn how to use basic Kubernetes network policy to securely restrict traffic to/from pods.
----
-
-# Kubernetes policy, basic tutorial
-
-This guide provides a simple way to try out Kubernetes `NetworkPolicy` with {{prodname}}. It requires a Kubernetes cluster configured with {{prodname}} networking, and expects that you have `kubectl` configured to interact with the cluster.
-
-You can quickly and easily deploy such a cluster by following one of the [installation guides](../../../getting-started/kubernetes/index.mdx).
-
-## Configure namespaces
-
-This guide will deploy pods in a Kubernetes namespace. Let's create the `Namespace` object for this guide.
-
-```bash
-kubectl create ns policy-demo
-```
-
-## Create demo pods
-
-We'll use Kubernetes `Deployment` objects to easily create pods in the namespace.
-
-1. Create some nginx pods in the `policy-demo` namespace.
-
- ```bash
- kubectl create deployment --namespace=policy-demo nginx --image=nginx
- ```
-
-1. Expose them through a service.
-
- ```bash
- kubectl expose --namespace=policy-demo deployment nginx --port=80
- ```
-
-1. Ensure the nginx service is accessible.
-
- ```bash
- kubectl run --namespace=policy-demo access --rm -ti --image busybox /bin/sh
- ```
-
- This should open up a shell session inside the `access` pod, as shown below.
-
- ```
- Waiting for pod policy-demo/access-472357175-y0m47 to be running, status is Pending, pod ready: false
-
- If you don't see a command prompt, try pressing enter.
-
- / #
- ```
-
-1. From inside the `access` pod, attempt to reach the `nginx` service.
-
- ```bash
- wget -q nginx -O -
- ```
-
- You should see a response from `nginx`. Great! Our service is accessible. You can exit the pod now.
-
-## Enable isolation
-
-Let's turn on isolation in our `policy-demo` namespace. {{prodname}} will then prevent connections to pods in this namespace.
-
-Running the following command creates a NetworkPolicy which implements a default deny behavior for all pods in the `policy-demo` namespace.
-
-```bash
-kubectl create -f - <
diff --git a/calico_versioned_docs/version-3.25/network-policy/hosts/kubernetes-nodes.mdx b/calico_versioned_docs/version-3.25/network-policy/hosts/kubernetes-nodes.mdx
deleted file mode 100644
index 22cd391b74..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/hosts/kubernetes-nodes.mdx
+++ /dev/null
@@ -1,214 +0,0 @@
----
-description: Protect Kubernetes nodes with host endpoints managed by Calico.
----
-
-# Protect Kubernetes nodes
-
-## Big picture
-
-Secure Kubernetes nodes with host endpoints managed by {{prodname}}.
-
-## Value
-
-{{prodname}} can automatically create host endpoints for your Kubernetes nodes. This means {{prodname}} can manage the lifecycle of host endpoints as your cluster evolves, ensuring nodes are always protected by policy.
-
-## Concepts
-
-## Host endpoints
-
-Each host has one or more network interfaces that it uses to communicate externally. You can represent these interfaces in Calico using host endpoints and then use network policy to secure them.
-
-{{prodname}} host endpoints can have labels, and they work the same as labels on workload endpoints. The network policy rules can apply to both workload and host endpoints using label selectors.
-
-Automatic host endpoints secure all of the host's interfaces (i.e. in Linux, all the interfaces in the host network namespace). They are created by setting `interfaceName: "*"`.
-
-## Automatic host endpoints
-
-{{prodname}} creates a wildcard host endpoint for each node, with the host endpoint containing the same labels and IP addresses as its corresponding node.
-{{prodname}} will ensure these managed host endpoints maintain the same labels and IP addresses as its node by periodic syncs.
-This means that policy targeting these automatic host endpoints will function correctly with the policy put in place to select those nodes, even if over time the node's IPs or labels change.
-
-Automatic host endpoints are differentiated from other host endpoints by the label `projectcalico.org/created-by: calico-kube-controllers`.
-Enable or disable automatic host endpoints by configuring the default KubeControllersConfiguration resource.
-
-## Before you begin...
-
-Have a running {{prodname}} cluster with `calicoctl` installed.
-
-## How to
-
-- [Enable automatic host endpoints](#enable-automatic-host-endpoints)
-- [Apply network policy to automatic host endpoints](#apply-network-policy-to-automatic-host-endpoints)
-
-### Enable automatic host endpoints
-
-To enable automatic host endpoints, edit the default KubeControllersConfiguration instance, and set `spec.controllers.node.hostEndpoint.autoCreate` to `true`:
-
-```bash
-calicoctl patch kubecontrollersconfiguration default --patch='{"spec": {"controllers": {"node": {"hostEndpoint": {"autoCreate": "Enabled"}}}}}'
-```
-
-If successful, host endpoints are created for each of your cluster's nodes:
-
-```bash
-calicoctl get heps -owide
-```
-
-The output may look similar to this:
-
-```
-calicoctl get heps -owide
-NAME NODE INTERFACE IPS PROFILES
-ip-172-16-101-147.us-west-2.compute.internal-auto-hep ip-172-16-101-147.us-west-2.compute.internal * 172.16.101.147,192.168.228.128 projectcalico-default-allow
-ip-172-16-101-54.us-west-2.compute.internal-auto-hep ip-172-16-101-54.us-west-2.compute.internal * 172.16.101.54,192.168.107.128 projectcalico-default-allow
-ip-172-16-101-79.us-west-2.compute.internal-auto-hep ip-172-16-101-79.us-west-2.compute.internal * 172.16.101.79,192.168.91.64 projectcalico-default-allow
-ip-172-16-101-9.us-west-2.compute.internal-auto-hep ip-172-16-101-9.us-west-2.compute.internal * 172.16.101.9,192.168.71.192 projectcalico-default-allow
-ip-172-16-102-63.us-west-2.compute.internal-auto-hep ip-172-16-102-63.us-west-2.compute.internal * 172.16.102.63,192.168.108.192 projectcalico-default-allow
-```
-
-### Apply network policy to automatic host endpoints
-
-To apply policy that targets all Kubernetes nodes, first add a label to the nodes.
-The label will be synced to their automatic host endpoints.
-
-For example, to add the label **kubernetes-host** to all nodes and their host endpoints:
-
-```bash
-kubectl label nodes --all kubernetes-host=
-```
-
-And an example policy snippet:
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: all-nodes-policy
-spec:
- selector: has(kubernetes-host)
- #
-```
-
-To select a specific set of host endpoints (and their corresponding Kubernetes nodes), use a policy selector that selects a label unique to that set of host endpoints.
-For example, if we want to add the label **environment=dev** to nodes named node1 and node2:
-
-```bash
-kubectl label node node1 environment=dev
-kubectl label node node2 environment=dev
-```
-
-With the labels in place and automatic host endpoints enabled, host endpoints for node1 and node2 will be updated with the **environment=dev** label.
-We can write policy to select that set of nodes with a combination of selectors:
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: some-nodes-policy
-spec:
- selector: has(kubernetes-host) && environment == 'dev'
- #
-```
-
-## Tutorial
-
-This tutorial will lock down Kubernetes node ingress to only allow SSH and required ports for Kubernetes to function.
-We will apply two policies: one for the control plane nodes. and one for the worker nodes.
-
-:::note
-
-Note: This tutorial was tested on a cluster created with kubeadm v1.18.2 on AWS, using a "stacked etcd" [topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/). Stacked etcd topology means the etcd pods are running on the masters. kubeadm uses stacked etcd by default.
-If your Kubernetes cluster is on a different platform, is running a variant of Kubernetes, or is running a topology with an external etcd cluster,
-please review the required ports for control plane and worker nodes in your cluster and adjust the policies in this tutorial as needed.
-
-:::
-
-First, let's restrict ingress traffic to the control plane nodes. The ingress policy below contains three rules.
-The first rule allows access to the API server port from anywhere. The second rule allows all traffic to localhost, which
-allows Kubernetes to access control plane processes. These control plane processes includes the etcd server client API, the scheduler, and the controller-manager.
-This rule also allows localhost access to the kubelet API and calico/node health checks.
-And the final rule allows the etcd pods to peer with each other and allows the masters to access each others kubelet API.
-
-If you have not modified the failsafe ports, you should still have SSH access to the nodes after applying this policy.
-Now apply the ingress policy for the Kubernetes masters:
-
-```
-calicoctl apply -f - << EOF
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: ingress-k8s-masters
-spec:
- selector: has(node-role.kubernetes.io/master)
- # This rule allows ingress to the Kubernetes API server.
- ingress:
- - action: Allow
- protocol: TCP
- destination:
- ports:
- # kube API server
- - 6443
- # This rule allows all traffic to localhost.
- - action: Allow
- destination:
- nets:
- - 127.0.0.0/8
- # This rule is required in multi-master clusters where etcd pods are colocated with the masters.
- # Allow the etcd pods on the masters to communicate with each other. 2380 is the etcd peer port.
- # This rule also allows the masters to access the kubelet API on other masters (including itself).
- - action: Allow
- protocol: TCP
- source:
- selector: has(node-role.kubernetes.io/master)
- destination:
- ports:
- - 2380
- - 10250
-EOF
-```
-
-Note that the above policy selects the standard **node-role.kubernetes.io/master** label that kubeadm sets on control plane nodes.
-
-Next, we need to apply policy to restrict ingress to the Kubernetes workers.
-Before adding the policy we will add a label to all of our worker nodes, which then gets added to its automatic host endpoint.
-For this tutorial we will use **kubernetes-worker**. An example command to add the label to worker nodes:
-
-```bash
-kubectl get node -l '!node-role.kubernetes.io/master' -o custom-columns=NAME:.metadata.name | tail -n +2 | xargs -I{} kubectl label node {} kubernetes-worker=
-```
-
-The workers' ingress policy consists of two rules. The first rule allows all traffic to localhost. As with the masters,
-the worker nodes need to access their localhost kubelet API and calico/node healthcheck.
-The second rule allows the masters to access the workers kubelet API. Now apply the policy:
-
-```
-calicoctl apply -f - << EOF
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: ingress-k8s-workers
-spec:
- selector: has(kubernetes-worker)
- # Allow all traffic to localhost.
- ingress:
- - action: Allow
- destination:
- nets:
- - 127.0.0.0/8
- # Allow only the masters access to the nodes kubelet API.
- - action: Allow
- protocol: TCP
- source:
- selector: has(node-role.kubernetes.io/master)
- destination:
- ports:
- - 10250
-EOF
-```
-
-## Additional resources
-
-- [Protect hosts tutorial](protect-hosts-tutorial.mdx)
-- [Apply policy to Kubernetes node ports](../services/kubernetes-node-ports.mdx)
-- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx)
-- [Host endpoints](../../reference/resources/hostendpoint.mdx)
diff --git a/calico_versioned_docs/version-3.25/network-policy/hosts/protect-hosts-tutorial.mdx b/calico_versioned_docs/version-3.25/network-policy/hosts/protect-hosts-tutorial.mdx
deleted file mode 100644
index 518252af56..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/hosts/protect-hosts-tutorial.mdx
+++ /dev/null
@@ -1,192 +0,0 @@
----
-description: Learn how to secure incoming traffic from outside the cluster using Calico host endpoints with network policy, including allowing controlled access to specific Kubernetes services.
----
-
-# Protect hosts tutorial
-
-Imagine that the administrator of a Kubernetes cluster wants to secure it as much as
-possible against incoming traffic from outside the cluster. But suppose that
-the cluster provides various useful services that are exposed as Kubernetes
-NodePorts, i.e., as well-known TCP port numbers that appear to be available on
-any node in the cluster. The administrator does want to expose some
-of those NodePorts to traffic from outside.
-
-In this example we will use pre-DNAT policy applied to the external interfaces
-of each cluster node:
-
-- to disallow incoming traffic from outside, in general
-
-- but then to allow incoming traffic to particular NodePorts.
-
-We use pre-DNAT policy for these purposes, instead of normal host endpoint
-policy, because:
-
-1. We want the protection against general external traffic to apply regardless
- of where that traffic is destined for - for example, to a locally hosted
- pod, or to a pod on another node, or to a local server process running on
- the host itself. Pre-DNAT policy is enforced in all of those cases - as we
- want - whereas normal host endpoint policy is not enforced for traffic going
- to a local pod.
-
-2. We want to write this policy in terms of the advertised NodePorts, not in
- terms of whatever internal port numbers those may be transformed to.
- kube-proxy on the ingress node will use a DNAT to change a NodePort number
- and IP address to those of one of the pods that backs the relevant Service.
- Our policy therefore needs to take effect _before_ that DNAT - and that
- means that it must be a pre-DNAT policy.
-
-:::note
-
-Note: This tutorial is intended to be used with named host endpoints, i.e. host endpoints with `interfaceName` set to a specific interface name.
-This tutorial does not work, as-is, with host endpoints with `interfaceName: "*"`.
-
-:::
-
-Here is the pre-DNAT policy that we need to disallow incoming external traffic
-in general:
-
-```bash
-calicoctl apply -f - <
-```
-
-and then using `host-endpoint==''` as the selector of the
-`allow-nodeport` policy, instead of `has(host-endpoint)`.
diff --git a/calico_versioned_docs/version-3.25/network-policy/hosts/protect-hosts.mdx b/calico_versioned_docs/version-3.25/network-policy/hosts/protect-hosts.mdx
deleted file mode 100644
index b4673b68fe..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/hosts/protect-hosts.mdx
+++ /dev/null
@@ -1,184 +0,0 @@
----
-description: Calico network policy not only protects workloads, but also hosts. Create a Calico network policies to restrict traffic to/from hosts.
----
-
-# Protect hosts
-
-## Big picture
-
-Use {{prodname}} network policy to restrict traffic to/from hosts.
-
-## Value
-
-Restricting traffic between hosts and the outside world is not unique to {{prodname}}; many solutions provide this capability. However, the advantage of using {{prodname}} to protect the host is you can use the same {{prodname}} policy configuration as workloads. You only need to learn one tool. Write a cluster-wide policy, and it is immediately applied to every host.
-
-## Concepts
-
-### Hosts and workloads
-
-In the context of {{prodname}} configuration, a **workload** is a virtualized compute instance, like a VM or container. A **host** is the computer that runs the hypervisor (for VMs), or container runtime (for containers). We say it “hosts” the workloads as guests.
-
-### Host endpoints
-
-Each host has one or more network interfaces that it uses to communicate externally. You can use {{prodname}} network policy to secure these interfaces (called host endpoints). {{prodname}} host endpoints can have labels, and they work the same as labels on workload endpoints. The network policy rules can apply to both workload and host endpoints using label selectors.
-
-### Failsafe rules
-
-It is easy to inadvertently cut all host connectivity because of nonexistent or misconfigured network policy. To avoid this, {{prodname}} provides failsafe rules with default/configurable ports that are open on all host endpoints.
-
-### Default behavior of workload to host traffic
-
-By default, {{prodname}} blocks all connections from a workload to its local host. You can control whether connections from a workload endpoint to its local host are dropped, returned, or accepted using a simple parameter.
-
-{{prodname}} allows all connections from processes running on the host to guest workloads on the host. This allows host processes to run health checks and debug guest workloads.
-
-### Default behavior of external traffic to/from host
-
-If a host endpoint is added and network policy is not in place, the {{prodname}} default is to deny traffic to/from that endpoint (except for traffic allowed by failsafe rules). For host endpoints, {{prodname}} blocks traffic only to/from interfaces that it’s been explicitly told about in network policy. Traffic to/from other interfaces is ignored.
-
-### Other host protection
-
-In terms of design consistency in {{prodname}}, you may wonder about the following use cases.
-
-**Does {{prodname}} protect a local host from workloads?**
-Yes. DefaultEndpointToHostAction controls whether or not workloads can access their local host.
-
-**Does {{prodname}} protect a workload from the host it is running on?**
-No. {{prodname}} allows connections the host makes to the workloads running on that host. Some orchestrators like Kubernetes depend on this connectivity for health checking the workload. Moreover, processes running on the local host are often privileged enough to override local {{prodname}} policy. Be very cautious with the processes that you allow to run in the host's root network namespace.
-
-## Before you begin...
-
-If you are already running {{prodname}} for Kubernetes, you are good to go. If you want to install {{prodname}} on a non-cluster machine for host protection only, see [Non-cluster hosts](../../getting-started/bare-metal/index.mdx).
-
-## How to
-
-- [Avoid accidentally cutting all host connectivity ](#avoid-accidentally-cutting-all-host-connectivity)
-- [Use policy to restrict host traffic](#use-policy-to-restrict-host-traffic)
-- [Control default behavior of workload endpoint to host traffic](#control-default-behavior-of-workload-endpoint-to-host-traffic)
-
-### Avoid accidentally cutting all host connectivity
-
-To avoid inadvertently cutting all host connectivity because of nonexistent or misconfigured network policy, {{prodname}} uses failsafe rules that open specific ports and CIDRs on all host endpoints.
-
-Review the following table to determine if the defaults work for your implementation. If not, change the default ports using the parameters, **FailsafeInboundHostPorts** and **FailsafeOutboundHostPorts** in [Configuring Felix](../../reference/felix/configuration.mdx#environment-variables).
-
-| Port | Protocol | CIDR | Direction | Purpose |
-| ---- | -------- | --------- | ------------------ | ------------------------------------ |
-| 22 | TCP | 0.0.0.0/0 | Inbound | SSH access |
-| 53 | UDP | 0.0.0.0/0 | Outbound | DNS queries |
-| 67 | UDP | 0.0.0.0/0 | Outbound | DHCP access |
-| 68 | UDP | 0.0.0.0/0 | Inbound | DHCP access |
-| 179 | TCP | 0.0.0.0/0 | Inbound & Outbound | BGP access ({{prodname}} networking) |
-| 2379 | TCP | 0.0.0.0/0 | Inbound & Outbound | etcd access |
-| 2380 | TCP | 0.0.0.0/0 | Inbound & Outbound | etcd access |
-| 6443 | TCP | 0.0.0.0/0 | Inbound & Outbound | Kubernetes API server access |
-| 6666 | TCP | 0.0.0.0/0 | Inbound & Outbound | etcd self-hosted service access |
-| 6667 | TCP | 0.0.0.0/0 | Inbound & Outbound | etcd self-hosted service access |
-
-### Use policy to restrict host traffic
-
-#### Step 1: Create policy to restrict host traffic
-
-Although failsafe rules provide protection from removing all connectivity to a host, you should create a GlobalNetworkPolicy policy that restricts host traffic.
-
-In the following example, we use a **GlobalNetworkPolicy** that applies to all worker nodes (defined by a label). Ingress SSH access is allowed from a defined "management" subnet.
-
-**Ingress traffic** is also allowed for ICMP, and on TCP port 10250 (default kubelet port). **Egress** traffic is allowed to etcd on a particular IP, and UDP on port 53 and 67 for DNS and DHCP.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: k8s-worker
-spec:
- selector: "role == 'k8s-worker'"
- order: 0
- ingress:
- - action: Allow
- protocol: TCP
- source:
- nets:
- - ''
- destination:
- ports: [22]
- - action: Allow
- protocol: ICMP
- - action: Allow
- protocol: TCP
- destination:
- ports: [10250]
- egress:
- - action: Allow
- protocol: TCP
- destination:
- nets:
- - '/32'
- ports: [2379]
- - action: Allow
- protocol: UDP
- destination:
- ports: [53, 67]
-```
-
-#### Step 2: Create host endpoints
-
-For each host point that you want to secure with policy, you must create a **HostEndpoint** object. To do that, you need the name of the {{prodname}} node on the host that owns the interface; in most cases, it is the same as the hostname of the host.
-
-In the following example, we create a HostEndpoint for the host named **my-host** with the interface named **eth0**, with **IP 10.0.0.1**. Note that the value for **node:** must match the hostname used on the {{prodname}} node object.
-
-When the HostEndpoint is created, traffic to or from the interface is dropped unless policy is in place.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: HostEndpoint
-metadata:
- name: my-host-eth0
- labels:
- role: k8s-worker
- environment: production
-spec:
- interfaceName: eth0
- node: my-host
- expectedIPs: ['10.0.0.1']
-```
-
-### Control default behavior of workload endpoint to host traffic
-
-The default {{prodname}} behavior blocks all connections from workloads to their local host (after traffic passes any egress policy applied to the workload). You can change this behavior using the **DefaultEndpointToHostAction** parameter in Felix configuration.
-
-This parameter works at the IP table level, where you can specify packet behavior to **Drop** (default), **Accept**, or **Return**.
-
-To change this parameter for all hosts, edit the **FelixConfiguration** object named “default.”
-
-1. Get a copy of the object to edit.
-
- ```bash
- calicoctl get felixconfiguration default --export -o yaml > default-felix-config.yaml
- ```
-
-1. Open the file in a text editor and add the parameter, **defaultEndpointToHostAction**. For example:
-
- ```yaml
- apiVersion: projectcalico.org/v3
- kind: FelixConfiguration
- metadata:
- name: default
- spec:
- ipipEnabled: true
- logSeverityScreen: Info
- reportingInterval: 0s
- defaultEndpointToHostAction: Accept
- ```
-
-1. Update the FelixConfiguration on the cluster.
- ```bash
- calicoctl apply -f default-felix-config.yaml
- ```
-
-## Additional resources
-
-- [Apply policy to Kubernetes node ports](../services/kubernetes-node-ports.mdx)
-- [Protect Kubernetes nodes with host endpoints managed by {{prodname}}](kubernetes-nodes.mdx)
-- [Defend against DoS attacks](../extreme-traffic/defend-dos-attack.mdx)
-- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx)
-- [Host endpoint](../../reference/resources/hostendpoint.mdx)
diff --git a/calico_versioned_docs/version-3.25/network-policy/index.mdx b/calico_versioned_docs/version-3.25/network-policy/index.mdx
deleted file mode 100644
index c4222c19d8..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Calico Network Policy and Calico Global Network Policy are the fundamental resources to secure workloads and hosts, and to adopt a zero trust security model.
-hide_table_of_contents: true
----
-
-# Security
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/network-policy/istio/app-layer-policy.mdx b/calico_versioned_docs/version-3.25/network-policy/istio/app-layer-policy.mdx
deleted file mode 100644
index 2419bfbab8..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/istio/app-layer-policy.mdx
+++ /dev/null
@@ -1,200 +0,0 @@
----
-description: Enforce network policy for Istio service mesh including matching on HTTP methods and paths.
----
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-# Enforce network policy for Istio
-
-## Big picture
-
-{{prodname}} integrates seamlessly with Istio to enforce network policy within the Istio service mesh.
-
-## Value
-
-{{prodname}} network policy for Istio lets you enforce application layer attributes like HTTP methods or paths, and cryptographically secure identities for Istio-enabled apps.
-
-## Concepts
-
-### Benefits of the Istio integration
-
-The {{prodname}} support for Istio service mesh has the following benefits:
-
-- **Pod traffic controls**
-
- Lets you restrict ingress traffic inside and outside pods and mitigate common threats to Istio-enabled apps.
-
-- **Supports security goals**
-
- Enables adoption of a zero trust network model for security, including traffic encryption, multiple enforcement points, and multiple identity criteria for authentication.
-
-- **Familiar policy language**
-
- Kubernetes network policies and {{prodname}} network policies work as is; users do not need to learn another network policy model to adopt Istio.
-
-See [Enforce network policy using Istio tutorial](enforce-policy-istio.mdx) to learn how application layer policy provides second-factor authentication for the mythical Yao Bank.
-
-## Before you begin
-
-**Required**
-
-- [{{prodname}} is installed](../../getting-started/kubernetes/index.mdx)
-- [calicoctl is installed and configured](../../operations/calicoctl/install.mdx)
-
-**Istio support**
-
-Following Istio versions have been verified to work with application layer policies:
-
-- Istio v1.15.2
-- Istio v1.10.2
-
-Istio v1.9.x and lower are **not** supported.
-
-Although we expect future minor versions to work with the corresponding manifest below (for example, v1.10.2 or v1.15.2), manifest compatibility depends entirely on the upstream changes in the respective Istio release.
-
-## How to
-
-1. [Enable application layer policy](#enable-application-layer-policy)
-2. [Install Calico CSI Driver](#install-calico-csi-driver)
-3. [Install Istio](#install-istio)
-4. [Update Istio sidecar injector](#update-istio-sidecar-injector)
-5. [Add Calico authorization services to the mesh](#add-calico-authorization-services-to-the-mesh)
-6. [Add namespace labels](#add-namespace-labels)
-
-### Enable application layer policy
-
-To enable the application layer policy, you must enable the **Policy Sync API** on Felix cluster-wide.
-
-In the default **FelixConfiguration**, set the field, `policySyncPathPrefix` to `/var/run/nodeagent`:
-
-
-
-
-```bash
-calicoctl patch FelixConfiguration default --patch \
- '{"spec": {"policySyncPathPrefix": "/var/run/nodeagent"}}'
-```
-
-
-
-
-```bash
-kubectl patch FelixConfiguration default --type=merge --patch \
- '{"spec": {"policySyncPathPrefix": "/var/run/nodeagent"}}'
-```
-
-
-
-
-Additionally, if you have installed Calico via the operator, you can optionally disable flexvolumes.
-Flexvolumes were used in earlier implementations and have since been deprecated.
-
-```bash
-kubectl patch installation default --type=merge -p '{"spec": {"flexVolumePath": "None"}}'
-```
-
-### Install Calico CSI Driver
-
-{{prodname}} utilizes a Container Storage Interface (CSI) driver to help set up the policy sync API on every node.
-Apply the following to install the Calico CSI driver
-
-```bash
-kubectl apply -f {{manifestsUrl}}/manifests/csi-driver.yaml
-```
-
-### Install Istio
-
-1. Verify [application layer policy requirements](../../getting-started/kubernetes/requirements.mdx#application-layer-policy-requirements).
-2. Install Istio using [installation guide in the project documentation](https://istio.io/v1.15/docs/setup/install/).
-
-```bash
-curl -L https://git.io/getLatestIstio | ISTIO_VERSION=1.15.2 sh -
-cd $(ls -d istio-* --color=never)
-./bin/istioctl install
-```
-
-Next, create the following [PeerAuthentication](https://istio.io/v1.15/docs/reference/config/security/peer_authentication/) policy.
-
-Replace `namespace` below by `rootNamespace` value, if it's customized in your environment.
-
-```bash
-kubectl create -f - <
-
-
-```bash
-curl {{manifestsUrl}}/manifests/alp/istio-inject-configmap-1.15.yaml -o istio-inject-configmap.yaml
-kubectl patch configmap -n istio-system istio-sidecar-injector --patch "$(cat istio-inject-configmap.yaml)"
-```
-
-[View sample manifest]({{manifestsUrl}}/manifests/alp/istio-inject-configmap-1.15.yaml)
-
-
-
-
-```bash
-curl {{manifestsUrl}}/manifests/alp/istio-inject-configmap-1.10.yaml -o istio-inject-configmap.yaml
-kubectl patch configmap -n istio-system istio-sidecar-injector --patch "$(cat istio-inject-configmap.yaml)"
-```
-
-[View sample manifest]({{manifestsUrl}}/manifests/alp/istio-inject-configmap-1.10.yaml)
-
-
-
-
-### Add Calico authorization services to the mesh
-
-Apply the following manifest to configure Istio to query {{prodname}} for application layer policy authorization decisions.
-
-This applies to Istio v1.15.x and v1.10.x:
-
-```bash
-kubectl apply -f {{manifestsUrl}}/manifests/alp/istio-app-layer-policy-envoy-v3.yaml
-```
-
-[View sample manifest]({{manifestsUrl}}/manifests/alp/istio-app-layer-policy-envoy-v3.yaml)
-
-### Add namespace labels
-
-You can control enforcement of application layer policy on a per-namespace basis. However, this only works on pods that are started with the Envoy and {{prodname}} Dikastes sidecars (as noted in the step, Update Istio sidecar injector). Pods that do not have the {{prodname}} sidecars, enforce only standard {{prodname}} network policy.
-
-To enable Istio and application layer policy in a namespace, add the label `istio-injection=enabled`.
-
-```bash
-kubectl label namespace istio-injection=enabled
-```
-
-If the namespace already has pods in it, you must recreate them for this to take effect.
-
-:::note
-
-Envoy must be able to communicate with the `istio-pilot.istio-system service`. If you apply any egress policies to your pods, you _must_ enable access.
-
-```bash
-kubectl apply -f {{tutorialFilesURL}}/allow-istio-pilot.yaml
-```
-
-:::
-
-## Additional resources
-
-- [Enforce network policy using Istio tutorial](enforce-policy-istio.mdx)
-- [Use HTTP methods and paths in policy rules](http-methods.mdx)
diff --git a/calico_versioned_docs/version-3.25/network-policy/istio/enforce-policy-istio.mdx b/calico_versioned_docs/version-3.25/network-policy/istio/enforce-policy-istio.mdx
deleted file mode 100644
index eb52f8465c..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/istio/enforce-policy-istio.mdx
+++ /dev/null
@@ -1,232 +0,0 @@
----
-description: Learn how Calico integrates with Istio to provide fine-grained access control using Calico network policies enforced within the service mesh and network layer.
----
-
-# Enforce Calico network policy using Istio (tutorial)
-
-This tutorial sets up a microservices application, then demonstrates how to use {{prodname}} application layer policy to mitigate some common threats.
-
-:::note
-
-This tutorial was verified using Istio v1.10.2. Some content may not apply to the latest Istio version.
-
-:::
-
-## Prerequisites
-
-1. Build a Kubernetes cluster.
-2. Install {{prodname}} on Kubernetes:
-
-- If {{prodname}} is not installed on Kubernetes, see [Calico on Kubernetes](../../getting-started/kubernetes/quickstart.mdx).
-- If {{prodname}} is already installed on Kubernetes, verify that [Calico networking](../../networking/index.mdx) (or a non-Calico CNI) and {{prodname}} network policy are installed.
-
-3. Install the [calicoctl command line tool](../../operations/calicoctl/install.mdx).
- **Note**: Ensure calicoctl is configured to connect with your datastore.
-4. [Enable application layer policy](app-layer-policy.mdx).
- **Note**: Label the default namespace for the Istio sidecar injection (`istio-injection=enabled`).
- `kubectl label namespace default istio-injection=enabled`
-
-### Install the demo application
-
-We will use a simple microservice application to demonstrate {{prodname}}
-application layer policy. The [YAO Bank](https://github.com/projectcalico/yaobank) application creates a
-customer-facing web application, a microservice that serves up account
-summaries, and an [etcd](https://github.com/coreos/etcd) datastore.
-
-```bash
-kubectl apply -f {{tutorialFilesURL}}/10-yaobank.yaml
-```
-
-:::note
-
-You can also
-[view the manifest in your browser](/files/10-yaobank.yaml).
-
-:::
-
-Verify that the application pods have been created and are ready.
-
-```bash
-kubectl get pods
-```
-
-When the demo application has come up, you will see three pods.
-
-```
-NAME READY STATUS RESTARTS AGE
-customer-2809159614-qqfnx 3/3 Running 0 21h
-database-1601951801-m4w70 3/3 Running 0 21h
-summary-2817688950-g1b3n 3/3 Running 0 21h
-```
-
-### Determining ingress IP and port
-
-You will use the `istio-ingressgateway` service to access the YAO Bank
-application. Determine your ingress host and port [following the Istio instructions](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#determining-the-ingress-ip-and-ports). Once you have the `INGRESS_HOST` and `INGRESS_PORT` variables set, you can
-set the `GATEWAY_URL` as follows.
-
-```bash
-export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT
-```
-
-Point your browser to `http://$GATEWAY_URL/` to confirm the YAO Bank application is functioning
-correctly. It may take several minutes for all the services to come up and respond, during which
-time you may see 404 or 500 errors.
-
-### The need for policy
-
-Although {{prodname}} & Istio are running in the cluster, we have not defined any authentication
-policy. Istio was configured to mutually authenticate traffic between the pods in your application,
-so only connections with Istio-issued certificates are allowed, and all inter-pod traffic is encrypted with TLS. That's already a big step in the right direction.
-
-But, let's consider some deficiencies in this security architecture:
-
-- All incoming connections from workloads in the Istio mesh are equally trusted
-- Possession of a key & certificate pair is the _only_ access credential considered.
-
-To understand why these might be a problem, let's take them one at a time.
-
-#### Trusting workloads
-
-Trusting connections from any workload in the Istio mesh is a poor security architecture because,
-like Kubernetes, Istio is designed to host multiple applications. Some of those applications may
-not be as trusted as others. They may be operated by different users or teams with wildly different
-security requirements. We don't want our secure financial application microservices accessible from
-some hacky prototype another developer is cooking up.
-
-Even within our own application, the best practice is to limit access as much
-as possible. Only pods that need access to a service should get it. Consider
-the YAO Bank application. The customer web service does not need, and should
-not have direct access to the backend database. The customer web service needs
-to directly interact with clients outside the cluster, some of whom may be
-malicious. Unfortunately, vulnerabilities in web applications are all too
-common. For example, an [unpatched vulnerability in Apache Struts](https://nvd.nist.gov/vuln/detail/CVE-2017-5638)
- is what allowed
-attackers their initial access into the Equifax network where they then
-launched a devastating attack to steal millions of people's financial
-information.
-
-Imagine what would happen if an attacker were to gain control of the customer web pod in our
-application. Let's simulate this by executing a remote shell inside that pod.
-
-```bash
-kubectl exec -ti customer- -c customer -- bash
-```
-
-Notice that from here, we get direct access to the backend database. For example, we can list all the entries in the database like this:
-
-```bash
-curl http://database:2379/v2/keys?recursive=true | python -m json.tool
-```
-
-(Piping to `python -m json.tool` nicely formats the output.)
-
-#### Single-factor authentication
-
-The possession of a key and certificate pair is a very strong assertion that a
-connection is authentic because it is based on cryptographic proofs that are
-believed to be nearly impossible to forge. When we authenticate connections
-this way we can say with extremely high confidence that the party on the other
-end is in possession of the corresponding key. However, this is only a proxy
-for what we actually want to be confident of: that the party on the other end
-really is the authorized workload we want to communicate with. Keeping the
-private key a secret is vital to this confidence, and occasionally attackers
-can find ways to trick applications into giving up secrets they should not.
-For example, the [Heartbleed](https://owasp.org/www-community/vulnerabilities/Heartbleed_Bug) vulnerability in OpenSSL allowed attackers to
-trick an affected application into reading out portions of its memory,
-compromising private keys.
-
-#### Network policy
-
-We can mitigate both of the above deficiencies with a {{prodname}} policy.
-
- wget {{tutorialFilesURL}}/30-policy.yaml
- calicoctl create -f 30-policy.yaml
-
-:::note
-
-You can also
-[view the manifest in your browser](/files/30-policy.yaml).
-
-:::
-
-Let's examine this policy piece by piece. It consists of three policy objects, one for each
-microservice.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: customer
-spec:
- selector: app == 'customer'
- ingress:
- - action: Allow
- http:
- methods: ['GET']
- egress:
- - action: Allow
-```
-
-This policy protects the customer web app. Since this application is customer facing, we do not
-restrict what can communicate with it. We do, however, restrict its communications to HTTP `GET`
-requests.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: summary
-spec:
- selector: app == 'summary'
- ingress:
- - action: Allow
- source:
- serviceAccounts:
- names: ['customer']
- egress:
- - action: Allow
-```
-
-The second policy protects the account summary microservice. We know the only consumer of this
-service is the customer web app, so we restrict the source of incoming connections to the service
-account for the customer web app.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: database
-spec:
- selector: app == 'database'
- ingress:
- - action: Allow
- source:
- serviceAccounts:
- names: ["summary"]
- egress:
- - action: Allow
-```
-
-The third policy protects the database. Only the summary microservice should have direct access to
-the database.
-
-Let's verify our policy is working as intended. First, return to your browser and refresh to
-ensure policy enforcement has not broken the application.
-
-Next, return to the customer web app. Recall that we simulated an attacker gaining control of that
-pod by executing a remote shell inside it.
-
-```bash
-kubectl exec -ti customer- -c customer bash
-```
-
-Repeat our attempt to access the database.
-
-```bash
-curl -I http://database:2379/v2/keys?recursive=true
-```
-
-We have left out the JSON formatting because we do not expect to get a valid JSON response. This
-time we should get a `403 Forbidden` response. Only the account summary microservice has database
-access according to our policy.
diff --git a/calico_versioned_docs/version-3.25/network-policy/istio/http-methods.mdx b/calico_versioned_docs/version-3.25/network-policy/istio/http-methods.mdx
deleted file mode 100644
index 4d9b51bf66..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/istio/http-methods.mdx
+++ /dev/null
@@ -1,47 +0,0 @@
----
-description: Create a Calico network policy for Istio-enabled apps to restrict ingress traffic matching HTTP methods or paths.
----
-
-# Use HTTP methods and paths in policy rules
-
-## Big picture
-
-Use Calico network policy for Istio-enabled apps to restrict ingress traffic that matches HTTP methods or paths.
-
-## Value
-
-Istio is ideal for applying policy for operational goals and for security that operates at the application layer. However, for security goals inside and outside the cluster, Calico network policy is required. Using special Calico network policy designed for Istio-enabled apps, you can restrict ingress traffic inside and outside pods using HTTP methods (for example, GET requests).
-
-## Concepts
-
-### HTTP match criteria: ingress traffic only
-
-Calico network policy supports restricting traffic based on HTTP methods and paths only for ingress traffic.
-
-## Before you begin...
-
-[Enable application layer policy](app-layer-policy.mdx)
-
-## How to
-
-**Restrict ingress traffic using HTTP match criteria**
-
-In the following example, the trading app is allowed ingress traffic only for HTTP GET requests that match the exact path **/projects/calico**, or that begins with the prefix, **/users**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: customer
-spec:
- selector: app == 'tradingapp'
- ingress:
- - action: Allow
- http:
- methods: ['GET']
- paths:
- - exact: '/projects/calico'
- - prefix: '/users'
- egress:
- - action: Allow
-```
diff --git a/calico_versioned_docs/version-3.25/network-policy/istio/index.mdx b/calico_versioned_docs/version-3.25/network-policy/istio/index.mdx
deleted file mode 100644
index 26453af30c..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/istio/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Configure the Calico "application layer policy" with application layer-specific attributes for Istio service mesh.
-hide_table_of_contents: true
----
-
-# Policy for Istio
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/network-policy/non-privileged.mdx b/calico_versioned_docs/version-3.25/network-policy/non-privileged.mdx
deleted file mode 100644
index bd3c30488b..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/non-privileged.mdx
+++ /dev/null
@@ -1,95 +0,0 @@
----
-description: Run long-lived Calico components without root or system admin privileges.
----
-
-# Run Calico node as non-privileged and non-root
-
-## Big picture
-
-Run long-lived {{prodname}} components in non-privileged and non-root containers.
-
-## Value
-
-Running {{prodname}} in non-privileged and non-root mode is an option for users who
-want to secure {{prodname}} as much as possible, and who do not care about
-{{prodname}} features beyond the basic {{prodname}} networking and network policy.
-The tradeoff for more security is the overhead of {{prodname}} networking management.
-For example, you no longer receive {{prodname}} corrections to misconfigurations caused
-by other components within your cluster, along with limited support for new features.
-
-## Concepts
-
-To run {{prodname}} as securely as possible, long-running {{prodname}} components
-(for example calico/node), can be run without privileged and root permissions in their respective
-containers. Note that to set up these components, the init containers still need to run with
-privileged and root permissions, but the risk to cluster security is minimal because of the
-ephemeral nature of init containers.
-
-## Supported
-
-- Operator installation only.
-
-## Unsupported
-
-- {{prodname}} Enterprise
-- eBPF dataplane
-- WorkloadSourceSpoofing felix option and the related `cni.projectcalico.org/allowedSourcePrefixes` annotation
-
-:::note
-
-Support for features added after Calico v3.21 is not guaranteed.
-
-:::
-
-## How to
-
-1. Follow the Tigera {{prodname}} operator [installation instructions](../getting-started/kubernetes/quickstart.mdx).
- If you have already installed the operator, skip to the next step.
-
-1. Edit the {{prodname}} installation to set the `nonPrivileged` field to `Enabled`.
-
- ```
- kubectl edit installation default
- ```
-
- Your installation resource should look similar to the following:
-
- ```yaml
- apiVersion: operator.tigera.io/v1
- kind: Installation
- metadata:
- name: default
- spec:
- calicoNetwork:
- bgp: Enabled
- hostPorts: Enabled
- ipPools:
- - blockSize: 26
- cidr: 192.168.0.0/16
- encapsulation: VXLANCrossSubnet
- natOutgoing: Enabled
- nodeSelector: all()
- linuxDataplane: Iptables
- multiInterfaceMode: None
- nodeAddressAutodetectionV4:
- firstFound: true
- cni:
- ipam:
- type: Calico
- type: Calico
- controlPlaneReplicas: 2
- flexVolumePath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/
- nodeUpdateStrategy:
- rollingUpdate:
- maxUnavailable: 1
- type: RollingUpdate
- nonPrivileged: Enabled
- variant: Calico
- ```
-
-1. The `calico-node` pods in the `calico-system` namespace should now restart. Verify that they restart properly.
- ```
- watch kubectl get pods -n calico-system
- ```
-
-{{prodname}} should now be running `calico-node` in non-privileged and non-root containers.
diff --git a/calico_versioned_docs/version-3.25/network-policy/policy-rules/external-ips-policy.mdx b/calico_versioned_docs/version-3.25/network-policy/policy-rules/external-ips-policy.mdx
deleted file mode 100644
index fc5ceeefae..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/policy-rules/external-ips-policy.mdx
+++ /dev/null
@@ -1,112 +0,0 @@
----
-description: Limit egress and ingress traffic using IP address either directly within Calico network policy or managed as Calico network sets.
----
-
-# Use external IPs or networks rules in policy
-
-## Big picture
-
-Use {{prodname}} network policy to limit traffic to/from external non-{{prodname}} workloads or networks.
-
-## Value
-
-Modern applications often integrate with third-party APIs and SaaS services that live outside Kubernetes clusters. To securely enable access to those integrations, network security teams must be able to limit IP ranges for egress and ingress traffic to workloads. This includes using IP lists or ranges to deny-list bad actors or embargoed countries.
-
-Using {{prodname}} network policy, you can define IP addresses/CIDRs directly in policy to limit traffic to external networks. Or using {{prodname}} network sets, you can easily scale out by using the same set of IPs in multiple policies.
-
-## Concepts
-
-### IP addresses/CIDRs
-
-IP addresses and CIDRs can be specified directly in both Kubernetes and {{prodname}} network policy rules. {{prodname}} network policy supports IPV4 and IPV6 CIDRs.
-
-### Network sets
-
-A **network set** resource is an arbitrary set of IP subnetworks/CIDRs that can be matched by standard label selectors in Kubernetes or {{prodname}} network policy. This is useful to reference a set of IP addresses using a selector from a namespaced network policy resource. It is typically used when you want to scale/reuse the same set of IP addresses in policy.
-
-A **global network set** resource is similar, but can be selected only by {{prodname}} global network policies.
-
-## How to
-
-- [Limit traffic to or from external networks, IPs in network policy](#limit-traffic-to-or-from-external-networks-ips-in-network-policy)
-- [Limit traffic to or from external networks, global network set](#limit-traffic-to-or-from-external-networks-global-network-set)
-
-### Limit traffic to or from external networks, IPs in network policy
-
-In the following example, a {{prodname}} NetworkPolicy allows egress traffic from pods with the label **color: red**, if it goes to an IP address in the 192.0.2.0/24 CIDR block.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: allow-egress-external
- namespace: production
-spec:
- selector: color == 'red'
- types:
- - Egress
- egress:
- - action: Allow
- destination:
- nets:
- - 192.0.2.0/24
-```
-
-### Limit traffic to or from external networks, global network set
-
-In this example, we use a {{prodname}} **GlobalNetworkSet** and reference it in a **GlobalNetworkPolicy**.
-
-In the following example, a {{prodname}} **GlobalNetworkSet** deny-lists the CIDR ranges 192.0.2.55/32 and 203.0.113.0/24:
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkSet
-metadata:
- name: ip-protect
- labels:
- ip-deny-list: 'true'
-spec:
- nets:
- - 192.0.2.55/32
- - 203.0.113.0/24
-```
-
-Next, we create two {{prodname}} **GlobalNetworkPolicy** objects. The first is a high “order” policy that allows traffic as a default for things that don’t match our second policy, which is low “order” and uses the **GlobalNetworkSet** label as a selector to deny ingress traffic (IP-deny-list in the previous step). In the label selector, we also include the term **!has(projectcalico.org/namespace)**, which prevents this policy from matching pods or NetworkSets that also have this label. To more quickly enforce the denial of forwarded traffic to the host at the packet level, use the **doNotTrack** and **applyOnForward** options.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: forward-default-allow
-spec:
- selector: apply-ip-protect == 'true'
- order: 1000
- doNotTrack: true
- applyOnForward: true
- types:
- - Ingress
- ingress:
- - action: Allow
----
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: ip-protect
-spec:
- selector: apply-ip-protect == 'true'
- order: 0
- doNotTrack: true
- applyOnForward: true
- types:
- - Ingress
- ingress:
- - action: Deny
- source:
- selector: ip-deny-list == 'true' && !has(projectcalico.org/namespace)
-```
-
-## Additional resources
-
-- To understand how to use global network sets to mitigate common threats, see [Defend against DoS attacks](../extreme-traffic/defend-dos-attack.mdx)
-- [Global network sets](../../reference/resources/globalnetworkset.mdx)
-- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx)
diff --git a/calico_versioned_docs/version-3.25/network-policy/policy-rules/icmp-ping.mdx b/calico_versioned_docs/version-3.25/network-policy/policy-rules/icmp-ping.mdx
deleted file mode 100644
index 22a18b75fa..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/policy-rules/icmp-ping.mdx
+++ /dev/null
@@ -1,130 +0,0 @@
----
-description: Control where ICMP/ping is used by creating a Calico network policy to allow and deny ICMP/ping messages for workloads and host endpoints.
----
-
-# Use ICMP/ping rules in policy
-
-## Big picture
-
-Use {{prodname}} network policy to allow and deny ICMP/ping messages.
-
-## Value
-
-The **Internet Control Message Protocol (ICMP)** provides valuable network diagnostic functions, but it can also be used maliciously. Attackers can use
-it to learn about your network, or for DoS attacks. Using {{prodname}} network policy, you can control where ICMP is used. For example, you can:
-
-- Allow ICMP ping, but only for workloads, host endpoints (or both)
-- Allow ICMP for pods launched by operators for diagnostic purposes, but block other uses
-- Temporarily enable ICMP to diagnose a problem, then disable it after the problem is resolved
-- Deny/allow ICMPv4 and/or ICMPv6
-
-## Concepts
-
-### ICMP packet type and code
-
-{{prodname}} network policy also lets you deny and allow ICMP traffic based on specific types and codes. For example, you can specify ICMP type 5, code 2 to match specific ICMP redirect packets.
-
-For details, see [ICMP type and code](https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages).
-
-## How to
-
-- [Deny all ICMP, all workloads and host endpoints](#deny-all-icmp-all-workloads-and-host-endpoints)
-- [Allow ICMP ping, all workloads and host endpoints](#allow-icmp-ping-all-workloads-and-host-endpoints)
-- [Allow ICMP matching protocol type and code, all Kubernetes pods](#allow-icmp-matching-protocol-type-and-code-all-Kubernetes-pods)
-
-### Deny all ICMP, all workloads and host endpoints
-
-In this example, we introduce a "deny all ICMP" **GlobalNetworkPolicy**.
-
-This policy **selects all workloads and host endpoints**. It enables a default deny for all workloads and host endpoints, in addition to the explicit ICMP deny rules specified in the policy.
-
-If your ultimate goal is to allow some traffic, have your regular "allow" policies in place before applying a global deny-all ICMP traffic policy.
-
-In this example, all workloads and host endpoints are blocked from sending or receiving **ICMPv4** and **ICMPv6** messages.
-
-If **ICMPv6** messages are not used in your deployment, it is still good practice to deny them specifically as shown below.
-
-In any "deny-all" {{prodname}} network policy, be sure to specify a lower order (**order:200**) than regular policies that might allow traffic.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: block-icmp
-spec:
- order: 200
- selector: all()
- types:
- - Ingress
- - Egress
- ingress:
- - action: Deny
- protocol: ICMP
- - action: Deny
- protocol: ICMPv6
- egress:
- - action: Deny
- protocol: ICMP
- - action: Deny
- protocol: ICMPv6
-```
-
-### Allow ICMP ping, all workloads and host endpoints
-
-In this example, workloads and host endpoints can receive **ICMPv4 type 8** and **ICMPv6 type 128** ping requests that come from other workloads and host endpoints.
-
-All other traffic may be allowed by other policies. If traffic is not explicitly allowed, it will be denied by default.
-
-The policy applies only to **ingress** traffic. (Egress traffic is not affected, and default deny is not enforced for egress.)
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: allow-ping-in-cluster
-spec:
- selector: all()
- types:
- - Ingress
- ingress:
- - action: Allow
- protocol: ICMP
- source:
- selector: all()
- icmp:
- type: 8 Ping request
- - action: Allow
- protocol: ICMPv6
- source:
- selector: all()
- icmp:
- type: 128 Ping request
-```
-
-### Allow ICMP matching protocol type and code, all Kubernetes pods
-
-In this example, only Kubernetes pods that match the selector **projectcalico.org/orchestrator == 'kubernetes'** are allowed to receive ICMPv4 **code: 1 host unreachable** messages.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: allow-host-unreachable
-spec:
- selector: projectcalico.org/orchestrator == 'kubernetes'
- types:
- - Ingress
- ingress:
- - action: Allow
- protocol: ICMP
- icmp:
- type: 3 Destination unreachable
- code: 1 Host unreachable
-```
-
-## Additional resources
-
-For more on the ICMP match criteria, see:
-
-- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx)
-- [Network policy](../../reference/resources/networkpolicy.mdx)
diff --git a/calico_versioned_docs/version-3.25/network-policy/policy-rules/index.mdx b/calico_versioned_docs/version-3.25/network-policy/policy-rules/index.mdx
deleted file mode 100644
index c035f8e7de..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/policy-rules/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Control traffic to/from endpoints using Calico network policy rules.
-hide_table_of_contents: true
----
-
-# Policy rules
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/network-policy/policy-rules/namespace-policy.mdx b/calico_versioned_docs/version-3.25/network-policy/policy-rules/namespace-policy.mdx
deleted file mode 100644
index 26b83b6738..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/policy-rules/namespace-policy.mdx
+++ /dev/null
@@ -1,89 +0,0 @@
----
-description: Use namespaces and namespace selectors in Calico network policy to group or separate resources. Use network policies to allow or deny traffic to/from pods that belong to specific namespaces.
----
-
-# Use namespace rules in policy
-
-## Big picture
-
-Use {{prodname}} network policies to reference pods in other namespaces.
-
-## Value
-
-Kubernetes namespaces let you group/separate resources to meet a variety of use cases. For example, you can use namespaces to separate development, production, and QA environments, or allow different teams to use the same cluster. You can use namespace selectors in {{prodname}} network policies to allow or deny traffic to/from pods in specific namespaces.
-
-## How to
-
-- [Control traffic to/from endpoints in a namespace](#control-traffic-tofrom-endpoints-in-a-namespace)
-- [Use Kubernetes RBAC to control namespace label assignment](#use-kubernetes-rbac-to-control-namespace-label-assignment)
-
-### Control traffic to/from endpoints in a namespace
-
-In the following example, ingress traffic is allowed to endpoints in the **namespace: production** with label **color: red**, and only from a pod in the same namespace with **color: blue**, on **port 6379**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: allow-tcp-6379
- namespace: production
-spec:
- selector: color == 'red'
- ingress:
- - action: Allow
- protocol: TCP
- source:
- selector: color == 'blue'
- destination:
- ports:
- - 6379
-```
-
-To allow ingress traffic from endpoints in other namespaces, use a **namespaceSelector** in the policy rule. A namespaceSelector matches one or more namespaces based on the labels that are applied on the namespace. In the following example, ingress traffic is also allowed from endpoints with **color: blue** in namespaces with **shape: circle**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: allow-tcp-6379
- namespace: production
-spec:
- selector: color == 'red'
- ingress:
- - action: Allow
- protocol: TCP
- source:
- selector: color == 'blue'
- namespaceSelector: shape == 'circle'
- destination:
- ports:
- - 6379
-```
-
-### Use Kubernetes RBAC to control namespace label assignment
-
-Network policies can be applied to endpoints using selectors that match labels on the endpoint, the endpoint's namespace, or the endpoint's service account. By applying selectors based on the endpoint's namespace, you can use Kubernetes RBAC to control which users can assign labels to namespaces. This allows you to separate groups who can deploy pods from those who can assign labels to namespaces.
-
-In the following example, users in the development environment can communicate only with pods that have a namespace labeled, `environment == "development"`.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: restrict-development-access
-spec:
- namespaceSelector: 'environment == "development"'
- ingress:
- - action: Allow
- source:
- namespaceSelector: 'environment == "development"'
- egress:
- - action: Allow
- destination:
- namespaceSelector: 'environment == "development"'
-```
-
-## Additional resources
-
-- For more network policies, see [Network policy](../../reference/resources/networkpolicy.mdx)
-- To apply policy to all namespaces, see [Global network policy](../../reference/resources/globalnetworkpolicy.mdx)
diff --git a/calico_versioned_docs/version-3.25/network-policy/policy-rules/policy-rules-overview.mdx b/calico_versioned_docs/version-3.25/network-policy/policy-rules/policy-rules-overview.mdx
deleted file mode 100644
index c313a002b6..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/policy-rules/policy-rules-overview.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
----
-description: Define network connectivity for Calico endpoints using policy rules and label selectors.
----
-
-# Basic rules
-
-## Big picture
-
-Use Calico policy rules and label selectors that match Calico endpoints (pods, OpenStack VMs, and host interfaces) to define network connectivity.
-
-## Value
-
-Using label selectors to identify the endpoints (pods, OpenStack VMs, host interfaces) that a policy applies to, or that should be selected by policy rules, means you can define policy without knowing the IP addresses of the endpoints. This is ideal for handling dynamic workloads with ephemeral IPs (such as Kubernetes pods).
-
-## How to
-
-Read [Get started with Calico policy](../get-started/calico-policy/calico-network-policy.mdx) and [Kubernetes policy](../get-started/kubernetes-policy/kubernetes-network-policy.mdx), which cover all the basics of using label selectors in policies to select endpoints the policies apply to, or in policy rules.
-
-## Additional resources
-
-- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx)
-- [Network policy](../../reference/resources/networkpolicy.mdx)
diff --git a/calico_versioned_docs/version-3.25/network-policy/policy-rules/service-accounts.mdx b/calico_versioned_docs/version-3.25/network-policy/policy-rules/service-accounts.mdx
deleted file mode 100644
index bcd327eadf..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/policy-rules/service-accounts.mdx
+++ /dev/null
@@ -1,118 +0,0 @@
----
-description: Use Kubernetes service accounts in policies to validate cryptographic identities and/or manage RBAC controlled high-priority rules across teams.
----
-
-# Use service accounts rules in policy
-
-## Big picture
-
-Use {{prodname}} network policy to allow/deny traffic for Kubernetes service accounts.
-
-## Value
-
-Using {{prodname}} network policy, you can leverage Kubernetes service accounts with RBAC for flexible control over how policies are applied in a cluster. For example, the security team can have RBAC permissions to:
-
-- Control which service accounts the developer team can use within a namespace
-- Write high-priority network policies for those service accounts (that the developer team cannot override)
-
-The network security team can maintain full control of security, while selectively allowing developer operations where it makes sense.
-
-Using **Istio-enabled apps** with {{prodname}} network policy, the cryptographic identity associated with the service account is checked (along with the network identity) to achieve two-factor authentication.
-
-## Concepts
-
-### Use smallest set of permissions required
-
-Operations on service accounts are controlled by RBAC, so you can grant permissions only to trusted entities (code and/or people) to create, modify, or delete service accounts. To perform any operation in a workload, clients are required to authenticate with the Kubernetes API server.
-
-If you do not explicitly assign a service account to a pod, it uses the default ServiceAccount in the namespace.
-
-You should not grant broad permissions to the default service account for a namespace. If an application needs access to the Kubernetes API, create separate service accounts with the smallest set of permissions required.
-
-### Service account labels
-
-Like all other Kubernetes objects, service accounts have labels. You can use labels to create ‘groups’ of service accounts. {{prodname}} network policy lets you select workloads by their service account using:
-
-- An exact match on service account name
-- A service account label selector expression
-
-## Before you begin...
-
-Configure unique Kubernetes service accounts for your applications.
-
-## How to
-
-- [Limit ingress traffic for workloads by service account name](#limit-ingress-traffic-for-workloads-by-service-account-name)
-- [Limit ingress traffic for workloads by service account label](#limit-ingress-traffic-for-workloads-by-service-account-label)
-- [Use Kubernetes RBAC to control service account label assignment](#use-kubernetes-rbac-to-control-service-account-label-assignment)
-
-### Limit ingress traffic for workloads by service account name
-
-In the following example, ingress traffic is allowed from any workload whose service account matches the names **api-service** or **user-auth-service**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: demo-calico
- namespace: prod-engineering
-spec:
- ingress:
- - action: Allow
- source:
- serviceAccounts:
- names:
- - api-service
- - user-auth-service
- selector: 'app == "db"'
-```
-
-### Limit ingress traffic for workloads by service account label
-
-In the following example, ingress traffic is allowed from any workload whose service account matches the label selector, **app == web-frontend**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: allow-web-frontend
- namespace: prod-engineering
-spec:
- ingress:
- - action: Allow
- source:
- serviceAccounts:
- selector: 'app == "web-frontend"'
- selector: 'app == "db"'
-```
-
-### Use Kubernetes RBAC to control service account label assignment
-
-Network policies can be applied to endpoints using selectors that match labels on the endpoint, the endpoint's namespace, or the endpoint's service account. By applying selectors based on the endpoint's service account, you can use Kubernetes RBAC to control which users can assign labels to service accounts. This allows you to separate groups who can deploy pods from those who can assign labels to service accounts.
-
-In the following example, pods with an intern service account can communicate only with pods with service accounts labeled, `role: intern`.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: restrict-intern-access
- namespace: prod-engineering
-spec:
- serviceAccountSelector: 'role == "intern"'
- ingress:
- - action: Allow
- source:
- serviceAccounts:
- selector: 'role == "intern"'
- egress:
- - action: Allow
- destination:
- serviceAccounts:
- selector: 'role == "intern"'
-```
-
-## Additional resources
-
-- [Network policy](../../reference/resources/networkpolicy.mdx)
-- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx)
diff --git a/calico_versioned_docs/version-3.25/network-policy/policy-rules/service-policy.mdx b/calico_versioned_docs/version-3.25/network-policy/policy-rules/service-policy.mdx
deleted file mode 100644
index ba2220ae01..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/policy-rules/service-policy.mdx
+++ /dev/null
@@ -1,119 +0,0 @@
----
-description: Use Kubernetes Service names in policy rules.
----
-
-# Use service rules in policy
-
-## Big picture
-
-Use {{prodname}} network policy to allow/deny traffic for Kubernetes services.
-
-## Value
-
-Using {{prodname}} network policy, you can leverage Kubernetes Service names to easily define access to Kubernetes services. Using service names in policy enables you to:
-
-- Allow or deny access to the Kubernetes API service.
-- Reference port information already declared by the application, making it easier to keep policy up-to-date as application requirements change.
-
-## How to
-
-- [Allow access to the Kubernetes API for a specific namespace](#allow-access-to-the-kubernetes-api-for-a-specific-namespace)
-- [Allow access to Kubernetes DNS for the entire cluster](#allow-access-to-kubernetes-dns-for-the-entire-cluster)
-- [Allow access from a specified service](#allow-access-from-a-specified-service)
-
-### Allow access to the Kubernetes API for a specific namespace
-
-In the following example, egress traffic is allowed to the `kubernetes` service in the `default` namespace for all pods in the namespace `my-app`. This service is the typical
-access point for the Kubernetes API server.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: allow-api-access
- namespace: my-app
-spec:
- selector: all()
- egress:
- - action: Allow
- destination:
- services:
- name: kubernetes
- namespace: default
-```
-
-Endpoint addresses and ports to allow will be automatically detected from the service.
-
-### Allow access to Kubernetes DNS for the entire cluster
-
-In the following example, a GlobalNetworkPolicy is used to select all pods in the cluster to apply a rule which ensures
-all pods can access the Kubernetes DNS service.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: allow-kube-dns
-spec:
- selector: all()
- egress:
- - action: Allow
- destination:
- services:
- name: kube-dns
- namespace: kube-system
-```
-
-:::note
-
-This policy also enacts a default-deny behavior for all pods, so make sure any other required application traffic is allowed by a policy.
-
-:::
-
-## Allow access from a specified service
-
-In the following example, ingress traffic is allowed from the `frontend-service` service in the `frontend` namespace for all pods in the namespace `backend`.
-This allows all pods that back the `frontend-service` service to send traffic to all pods in the `backend` namespace.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: allow-frontend-service-access
- namespace: backend
-spec:
- selector: all()
- ingress:
- - action: Allow
- source:
- services:
- name: frontend-service
- namespace: frontend
-```
-
-We can also further specify the ports that the `frontend-service` service is allowed to access. The following example limits access from the `frontend-service`
-service to port 80.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: NetworkPolicy
-metadata:
- name: allow-frontend-service-access
- namespace: backend
-spec:
- selector: all()
- ingress:
- - action: Allow
- protocol: TCP
- source:
- services:
- name: frontend-service
- namespace: frontend
- destination:
- ports: [80]
-```
-
-## Additional resources
-
-- [Network policy](../../reference/resources/networkpolicy.mdx)
-- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx)
diff --git a/calico_versioned_docs/version-3.25/network-policy/services/index.mdx b/calico_versioned_docs/version-3.25/network-policy/services/index.mdx
deleted file mode 100644
index 9a8084c99b..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/services/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Apply Calico policy to Kubernetes node ports, and to services that are exposed externally as cluster IPs.
-hide_table_of_contents: true
----
-
-# Policy for Kubernetes services
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/network-policy/services/kubernetes-node-ports.mdx b/calico_versioned_docs/version-3.25/network-policy/services/kubernetes-node-ports.mdx
deleted file mode 100644
index c870f71de1..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/services/kubernetes-node-ports.mdx
+++ /dev/null
@@ -1,135 +0,0 @@
----
-description: Restrict access to Kubernetes node ports using Calico global network policy. Follow the steps to secure the host, the node ports, and the cluster.
----
-
-# Apply Calico policy to Kubernetes node ports
-
-## Big picture
-
-Restrict access to node ports to specific external clients.
-
-## Value
-
-Exposing services to external clients using node ports is a standard Kubernetes feature. However, if you want to restrict access to node ports to specific external clients, you need to use Calico global network policy.
-
-## Concepts
-
-### Network policy with preDNAT field
-
-In a Kubernetes cluster, kube-proxy will DNAT a request to the node's port and IP address to one of the pods that backs the service. For Calico global network policy to both allow normal ingress cluster traffic and deny other general ingress traffic, it must take effect before DNAT. To do this, you simply add a **preDNAT** field to a Calico global network policy. The preDNAT field:
-
-- Applies before DNAT
-- Applies only to ingress rules
-- Enforces all ingress traffic through a host endpoint, regardless of destination
- The destination can be a locally hosted pod, a pod on another node, or a process running on the host.
-
-## Before you begin...
-
-For services that you want to expose to external clients, configure Kubernetes services with type **NodePort**.
-
-## How to
-
-To securely expose a Kubernetes service to external clients, you must implement all of the following steps.
-
-- [Allow cluster ingress traffic, but deny general ingress traffic](#allow-cluster-ingress-traffic-but-deny-general-ingress-traffic)
-- [Allow local host egress traffic](#allow-local-host-egress-traffic)
-- [Create host endpoints with appropriate network policy](#create-host-endpoints-with-appropriate-network-policy)
-- [Allow ingress traffic to specific node ports](#allow-ingress-traffic-to-specific-node-ports)
-
-### Allow cluster ingress traffic but deny general ingress traffic
-
-In the following example, we create a global network policy to allow cluster ingress traffic (**allow-cluster-internal-ingress**): for the nodes’ IP addresses (**1.2.3.4/16**), and for pod IP addresses assigned by Kubernetes (**100.100.100.0/16**). By adding a preDNAT field, Calico global network policy is applied before regular DNAT on the Kubernetes cluster.
-
-In this example, we use the **selector: has(kubernetes-host)** -- so the policy is applicable to any endpoint with a **kubernetes-host** label (but you can easily specify particular nodes).
-
-Finally, when you specify a preDNAT field, you must also add the **applyOnForward: true** field.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: allow-cluster-internal-ingress-only
-spec:
- order: 20
- preDNAT: true
- applyOnForward: true
- ingress:
- - action: Allow
- source:
- nets: [1.2.3.4/16, 100.100.100.0/16]
- - action: Deny
- selector: has(kubernetes-host)
-```
-
-### Allow local host egress traffic
-
-We also need a global network policy to allow egress traffic through each node's external interface. Otherwise, when we define host endpoints for those interfaces, no egress traffic will be allowed from local processes (except for traffic that is allowed by the [Failsafe rules](../../reference/host-endpoints/failsafe.mdx).
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: allow-outbound-external
-spec:
- order: 10
- egress:
- - action: Allow
- selector: has(kubernetes-host)
-```
-
-### Create host endpoints with appropriate network policy
-
-In this example, we assume that you have already defined Calico host endpoints with network policy that is appropriate for the cluster. (For example, you wouldn’t want a host endpoint with a “default deny all traffic to/from this host” network policy because that is counter to the goal of allowing/denying specific traffic.) For help, see [host endpoints](../../reference/resources/hostendpoint.mdx).
-
-All of our previously-defined global network policies have a selector that makes them applicable to any endpoint with a **kubernetes-host label**; so we will include that label in our definitions. For example, for **eth0** on **node1**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: HostEndpoint
-metadata:
- name: node1-eth0
- labels:
- kubernetes-host: ingress
-spec:
- interfaceName: eth0
- node: node1
- expectedIPs:
- - INSERT_IP_HERE
-```
-
-When creating each host endpoint, replace `INSERT_IP_HERE` with the IP address on eth0. The `expectedIPs` field is required so that any selectors within ingress or egress rules can properly match the host endpoint.
-
-### Allow ingress traffic to specific node ports
-
-Now we can allow external access to the node ports by creating a global network policy with the preDNAT field. In this example, **ingress traffic is allowed** for any host endpoint with **port: 31852**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: allow-nodeport
-spec:
- preDNAT: true
- applyOnForward: true
- order: 10
- ingress:
- - action: Allow
- protocol: TCP
- destination:
- selector: has(kubernetes-host)
- ports: [31852]
- selector: has(kubernetes-host)
-```
-
-To make the NodePort accessible only through particular nodes, give the nodes a particular label. For example:
-
-```yaml
-nodeport-external-ingress: true
-```
-
-Then, use **nodeport-external-ingress: true** as the selector of the **allow-nodeport** policy, instead of **has(kubernetes-host)**.
-
-## Additional resources
-
-- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx)
-- [Host endpoints](../../reference/resources/hostendpoint.mdx)
diff --git a/calico_versioned_docs/version-3.25/network-policy/services/services-cluster-ips.mdx b/calico_versioned_docs/version-3.25/network-policy/services/services-cluster-ips.mdx
deleted file mode 100644
index ccb10b829c..0000000000
--- a/calico_versioned_docs/version-3.25/network-policy/services/services-cluster-ips.mdx
+++ /dev/null
@@ -1,193 +0,0 @@
----
-description: Expose Kubernetes service cluster IPs over BGP using Calico, and restrict who can access them using Calico network policy.
----
-
-# Apply Calico policy to services exposed externally as cluster IPs
-
-## Big picture
-
-Control access to services exposed through clusterIPs that are advertised outside the cluster using BGP.
-
-## Value
-
-{{prodname}} network policy uses standard Kubernetes Services that allow you to expose services within clusters to external clients in the following ways:
-
-- [Apply policy to Kubernetes nodeports](kubernetes-node-ports.mdx)
-- Using cluster IPs over BGP (described in this article)
-
-## Concepts
-
-### Advertise cluster IPs outside the cluster
-
-A **cluster IP** is a virtual IP address that represents a Kubernetes Service. Kube Proxy on each host translates the clusterIP into a pod IP for one of the pods backing the service, acting as a reverse proxy and load balancer.
-
-Cluster IPs were originally designed for use within the Kubernetes cluster. {{prodname}} allows you to advertise Cluster IPs externally -- so external clients can use them to access services hosted inside the cluster. This means that {{prodname}} ingress policy can be applied at **one or both** of the following locations:
-
-- Host interface, when the traffic destined for the clusterIP first ingresses the cluster
-- Pod interface of the backend pod
-
-### Traffic routing: local versus cluster modes
-
-{{prodname}} implements [Kubernetes service external traffic policy](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip), which controls whether external traffic is routed to node-local or cluster-wide endpoints. The following table summarizes key differences between these settings. The default is **cluster mode**.
-
-| **Service setting** | **Traffic is load balanced...** | **Pros and cons** | **Required service type** |
-| ------------------------------------------- | --------------------------------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- |
-| **externalTrafficPolicy: Cluster**(default) | Across all nodes in the cluster | Equal distribution of traffic among all pods running a service.
Possible unnecessary network hops between nodes for ingress external traffic.When packets are rerouted to pods on another node, traffic is SNAT’d (source network address translation).
Destination pod can see the proxying node’s IP address rather than the actual client IP. | **ClusterIP** |
-| **externalTrafficPolicy: Local** | Across the nodes with the endpoints for the service | Avoids extra hops so better for apps that ingress a lot external traffic.
Traffic is not SNAT’d so actual client IPs are preserved.
Traffic distributed among pods running a service may be imbalanced. | **LoadBalancer** (for cloud providers), or **NodePort** (for node’s static port) |
-
-## Before you begin...
-
-[Configure Calico to advertise cluster IPs over BGP](../../networking/configuring/advertise-service-ips.mdx).
-
-## How to
-
-Selecting which mode to use depends on your goals and resources. At an operational level, **local mode** simplifies policy, but load balancing may be uneven in certain scenarios. **Cluster mode** requires more work to manage clusterIPs, SNAT, and create policies that reference specific IP addresses, but you always get even load balancing.
-
-- [Secure externally exposed cluster IPs, local mode](#secure-externally-exposed-cluster-ips-local-mode)
-- [Secure externally exposed cluster IPs, cluster mode](#secure-externally-exposed-cluster-ips-cluster-mode)
-
-### Secure externally exposed cluster IPs, local mode
-
-Using **local mode**, the original source address of external traffic is preserved, and you can define policy directly using standard {{prodname}} network policy.
-
-1. Create {{prodname}} **NetworkPolicies** or **GlobalNetworkPolicies** that select the same set of pods as your Kubernetes Service.
-1. Add rules to allow the external traffic.
-1. If desired, add rules to allow in-cluster traffic.
-
-### Secure externally exposed cluster IPs, cluster mode
-
-In the following steps, we define **GlobalNetworkPolicy** and **HostEndpoints**.
-
-#### Step 1: Verify Kubernetes Service manifest
-
-Ensure that your Kubernetes Service manifest explicitly lists the clusterIP; do not allow Kubernetes to automatically assign the clusterIP because you need it for your policies in the following steps.
-
-#### Step 2: Create global network policy at the host interface
-
-In this step, you create a **GlobalNetworkPolicy** that selects all **host endpoints**. It controls access to the cluster IP, and prevents unauthorized clients from outside the cluster from accessing it. The hosts then forwards only authorized traffic.
-
-**Set policy to allow external traffic for cluster IPs**
-
-Add rules to allow the external traffic for each clusterIP. The following example allows connections to two cluster IPs. Make sure you add **applyOnForward** and **preDNAT** rules.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: allow-cluster-ips
-spec:
- selector: k8s-role == 'node'
- types:
- - Ingress
- applyOnForward: true
- preDNAT: true
- ingress:
- # Allow 50.60.0.0/16 to access Cluster IP A
- - action: Allow
- source:
- nets:
- - 50.60.0.0/16
- destination:
- nets:
- - 10.20.30.40/32 Cluster IP A
- # Allow 70.80.90.0/24 to access Cluster IP B
- - action: Allow
- source:
- nets:
- - 70.80.90.0/24
- destination:
- nets:
- - 10.20.30.41/32 Cluster IP B
-```
-
-**Add a rule to allow traffic destined for the pod CIDR**
-
-Without this rule, normal pod-to-pod traffic is blocked because the policy applies to forwarded traffic.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: allow-to-pods
-spec:
- selector: k8s-role == 'node'
- types:
- - Ingress
- applyOnForward: true
- preDNAT: true
- ingress:
- # Allow traffic forwarded to pods
- - action: Allow
- destination:
- nets:
- - 192.168.0.0/16 Pod CIDR
-```
-
-**Add a rule to allow traffic destined for all host endpoints**
-
-Or, you can add rules that allow specific host traffic including Kubernetes and {{prodname}}. Without this rule, normal host traffic is blocked.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: allow-traffic-hostendpoints
-spec:
- selector: k8s-role == 'node'
- types:
- - Ingress
- # Allow traffic to the node (not nodePorts, TCP) (not nodePorts, TCP)
- - action: Allow
- protocol: TCP
- destination:
- selector: k8s-role == 'node'
- notPorts: ["30000:32767"] #nodePort range
- # Allow traffic to the node (not nodePorts, TCP) (not nodePorts, UDP)
- - action: Allow
- protocol: UDP
- destination:
- selector: k8s-role == 'node'
- notPorts: ["30000:32767"] #nodePort range
-```
-
-#### Step 3: Create a global network policy that selects pods
-
-In this step, you create a **GlobalNetworkPolicy** that selects the **same set of pods as your Kubernetes Service**. Add rules that allow host endpoints to access the service ports.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: GlobalNetworkPolicy
-metadata:
- name: allow-nodes-svc-a
-spec:
- selector: k8s-svc == 'svc-a'
- types:
- - Ingress
- ingress:
- - action: Allow
- protocol: TCP
- source:
- selector: k8s-role == 'node'
- destination:
- ports: [80, 443]
- - action: Allow
- protocol: UDP
- source:
- selector: k8s-role == 'node'
- destination:
- ports: [80, 443]
-```
-
-#### Step 4: (Optional) Create network polices or global network policies that allow in-cluster traffic to access the service
-
-#### Step 5: Create HostEndpoints
-
-Create HostEndpoints for the interface of each host that will receive traffic for the clusterIPs. Be sure to label them so they are selected by the policy in Step 2 (Add a rule to allow traffic destined for the pod CIDR), and the rules in Step 3.
-
-In the previous example policies, the label **k8s-role: node** is used to identify these HostEndpoints.
-
-## Additional resources
-
-- [Enable service IP advertisement](../../networking/configuring/advertise-service-ips.mdx)
-- [Defend against DoS attacks](../extreme-traffic/defend-dos-attack.mdx)
-- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx)
diff --git a/calico_versioned_docs/version-3.25/networking/configuring/advertise-service-ips.mdx b/calico_versioned_docs/version-3.25/networking/configuring/advertise-service-ips.mdx
deleted file mode 100644
index b0217ce8d7..0000000000
--- a/calico_versioned_docs/version-3.25/networking/configuring/advertise-service-ips.mdx
+++ /dev/null
@@ -1,246 +0,0 @@
----
-description: Configure Calico to advertise Kubernetes service cluster IPs and external IPs outside the cluster using BGP.
----
-
-# Advertise Kubernetes service IP addresses
-
-## Big picture
-
-Enable {{prodname}} to advertise Kubernetes service IPs outside a cluster. {{prodname}} supports advertising a service’s cluster IPs and external IPs.
-
-## Value
-
-Typically, Kubernetes service cluster IPs are accessible only within the cluster, so external access to the service requires a dedicated load balancer or ingress controller. In cases where a service’s cluster IP is not routable, the service can be accessed using its external IP.
-
-Just as {{prodname}} supports advertising **pod IPs** over BGP, it also supports advertising Kubernetes **service IPs** outside a cluster over BGP. This avoids the need for a dedicated load balancer. This feature also supports equal cost multi-path (ECMP) load balancing across nodes in the cluster, as well as source IP address preservation for local services when you need more control.
-
-## Concepts
-
-### BGP makes it easy
-
-In Kubernetes, all requests for a service are redirected to an appropriate endpoint (pod) backing that service. Because {{prodname}} uses BGP, external traffic can be routed directly to Kubernetes services by advertising Kubernetes service IPs into the BGP network.
-
-If your deployment is configured to peer with BGP routers outside the cluster, those routers (plus any other upstream places the routers propagate to) can send traffic to a Kubernetes service IP for routing to one of the available endpoints for that service.
-
-### Advertising service IPs: quick glance
-
-{{prodname}} implements the Kubernetes **externalTrafficPolicy** using kube-proxy to direct incoming traffic to a correct pod. Advertisement is handled differently based on the service type that you configure for your service.
-
-| **Service mode** | **Cluster IP advertisement** | **Traffic is...** | Source IP address is... |
-| ----------------- | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------- |
-| Cluster (default) | All nodes in the cluster statically advertise a route to the service CIDR. | Load balanced across nodes in the cluster using ECMP, then forwarded to appropriate pod in the service using SNAT. May incur second hop to another node, but good overall load balancing. | Obscured by SNAT |
-| Local | The nodes with a pod backing the service advertise a specific route (/32 or /128) to the service's IP. | Load balanced across nodes with endpoints for the service. Avoids second hop for LoadBalancer and NodePort type services, traffic may be unevenly load balanced. (Other traffic is load balanced across nodes in the cluster.) | Preserved |
-
-If your {{prodname}} deployment is configured to peer with BGP routers outside the cluster, those routers - plus any further upstream places that those routers propagate to - will be able to send traffic to a Kubernetes service cluster IP, and that traffic is routed to one of the available endpoints for that service.
-
-### Tips for success
-
-- Generally, we recommend using “Local” for the following reasons:
- - If any of your network policy uses rules to match by specific source IP addresses, using Local is the obvious choice because the source IP address is not altered, and the policy will still work.
- - Return traffic is routed directly to the source IP because “Local” services do not require undoing the source NAT (unlike “Cluster” services).
-- Cluster IP advertisement works best with a ToR that supports ECMP. Otherwise, all traffic for a given route is directed to a single node.
-
-## Before you begin...
-
-**Required**
-
-- [Configure BGP peering](bgp.mdx) between {{prodname}} and your network infrastructure
-- For ECMP load balancing to services, the upstream routers must be configured to use BGP multipath.
-- You need at least one external node outside the cluster that acts as a router, route reflector, or ToR that is peered with calico nodes inside the cluster.
-- Services must be configured with the correct service mode (“Cluster” or “Local”) for your implementation. For `externalTrafficPolicy: Local`, the service must be type `LoadBalancer` or `NodePort`.
-
-**Limitations**
-
-- OpenShift, versions 4.5 and 4.6
- There is a [bug](https://github.com/kubernetes/kubernetes/issues/91374) where the source IP is not preserved by NodePort services or traffic via a Service ExternalIP with externalTrafficPolicy:Local.
-
- OpenShift users on v4.5 or v4.6 can use this [workaround to avoid SNAT with ExternalIP](https://docs.openshift.com/container-platform/4.7/nodes/clusters/nodes-cluster-enabling-features.html):
-
- ```
- oc edit featuregates.config.openshift.io cluster
- spec:
- customNoUpgrade:
- enabled:
- - ExternalPolicyForExternalIP
- ```
-
- Kubernetes users on version v1.18 or v1.19 can enable source IP preservation for NodePort services using the ExternalPolicyForExternalIP feature gate.
-
- Source IP preservation for NodePort and services and ExternalIPs is enabled by default in OpenShift v4.7+, and Kubernetes v1.20+.
-
-## How to
-
-- [Advertise service cluster IP addresses](#advertise-service-cluster-ip-addresses)
-- [Advertise service external IP addresses](#advertise-service-external-ip-addresses)
-- [Advertise service load balancer IP addresses](#advertise-service-load-balancer-ip-addresses)
-- [Exclude certain nodes from advertisement](#exclude-certain-nodes-from-advertisement)
-
-### Advertise service cluster IP addresses
-
-1. Determine the service cluster IP range. (Or ranges, if your cluster is [dual stack](../ipam/ipv6.mdx).)
-
- The range(s) for your cluster can be inferred from the `--service-cluster-ip-range` option passed to the Kubernetes API server. For help, see the [Kubernetes API server reference guide](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/).
-
-1. Check to see if you have a default BGPConfiguration.
-
- ```bash
- calicoctl get bgpconfig default
- ```
-
-1. Based on above results, update or create a BGPConfiguration.
-
- **Update default BGPConfiguration**
- Patch the BGPConfiguration using the following command, using your own service cluster IP CIDR in place of "10.0.0.0/24":
-
- ```bash
- calicoctl patch bgpconfig default --patch \
- '{"spec": {"serviceClusterIPs": [{"cidr": "10.0.0.0/24"}]}}'
- ```
-
- **Create default BGPConfiguration**
- Use the following sample command to create a default BGPConfiguration. Add your CIDR blocks, covering the cluster IPs to be advertised, in the `serviceClusterIPs` field, for example:
-
- ```bash
- calicoctl create -f - < 100).
-
-For a deeper look at common on-premises deployment models, see [Calico over IP Fabrics](../../reference/architecture/design/l2-interconnect-fabric.mdx).
-
-## Before you begin...
-
-[calicoctl](../../operations/calicoctl/install.mdx) must be installed and configured.
-
-## How to
-
-:::note
-
-Significantly changing {{prodname}}'s BGP topology, such as changing from full-mesh to peering with ToRs, may result in temporary loss of pod network connectivity during the reconfiguration process. It is recommended to only make such changes during a maintenance window.
-
-:::
-
-- [Configure a global BGP peer](#configure-a-global-bgp-peer)
-- [Configure a per-node BGP peer](#configure-a-per-node-bgp-peer)
-- [Configure a node to act as a route reflector](#configure-a-node-to-act-as-a-route-reflector)
-- [Disable the default BGP node-to-node mesh](#disable-the-default-bgp-node-to-node-mesh)
-- [Change from node-to-node mesh to route reflectors without any traffic disruption](#change-from-node-to-node-mesh-to-route-reflectors-without-any-traffic-disruption)
-- [View BGP peering status for a node](#view-bgp-peering-status-for-a-node)
-- [Change the default global AS number](#change-the-default-global-as-number)
-- [Change AS number for a particular node](#change-as-number-for-a-particular-node)
-
-### Configure a global BGP peer
-
-Global BGP peers apply to all nodes in your cluster. This is useful if your network topology includes BGP speakers that will be peered with every {{prodname}} node in your deployment.
-
-The following example creates a global BGP peer that configures every {{prodname}} node to peer with **192.20.30.40** in AS **64567**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: BGPPeer
-metadata:
- name: my-global-peer
-spec:
- peerIP: 192.20.30.40
- asNumber: 64567
-```
-
-### Configure a per-node BGP peer
-
-Per-node BGP peers apply to one or more nodes in the cluster. You can choose which nodes by specifying the node’s name exactly, or using a label selector.
-
-The following example creates a BGPPeer that configures every {{prodname}} node with the label, **rack: rack-1** to peer with **192.20.30.40** in AS **64567**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: BGPPeer
-metadata:
- name: rack1-tor
-spec:
- peerIP: 192.20.30.40
- asNumber: 64567
- nodeSelector: rack == 'rack-1'
-```
-
-### Configure a node to act as a route reflector
-
-{{prodname}} nodes can be configured to act as route reflectors. To do this, each node that you want to act as a route reflector must have a cluster ID - typically an unused IPv4 address.
-
-To configure a node to be a route reflector with cluster ID 244.0.0.1, run the following command.
-
-
-
-
-```bash
-kubectl annotate node my-node projectcalico.org/RouteReflectorClusterID=244.0.0.1
-```
-
-
-
-
-```bash
-calicoctl patch node my-node -p '{"spec": {"bgp": {"routeReflectorClusterID": "244.0.0.1"}}}'
-```
-
-
-
-
-Typically, you will want to label this node to indicate that it is a route reflector, allowing it to be easily selected by a BGPPeer resource. You can do this with kubectl. For example:
-
-```bash
-kubectl label node my-node route-reflector=true
-```
-
-Now it is easy to configure route reflector nodes to peer with each other and other non-route-reflector nodes using label selectors. For example:
-
-```yaml
-kind: BGPPeer
-apiVersion: projectcalico.org/v3
-metadata:
- name: peer-with-route-reflectors
-spec:
- nodeSelector: all()
- peerSelector: route-reflector == 'true'
-```
-
-:::note
-
-Adding `routeReflectorClusterID` to a node spec will remove it from the node-to-node mesh immediately, tearing down the
-existing BGP sessions. Adding the BGP peering will bring up new BGP sessions. This will cause a short (about 2 seconds)
-disruption to dataplane traffic of workloads running in the nodes where this happens. To avoid this, make sure no
-workloads are running on the nodes, by provisioning new nodes or by running `kubectl drain` on the node (which may
-itself cause a disruption as workloads are drained).
-
-:::
-
-### Disable the default BGP node-to-node mesh
-
-The default **node-to-node BGP mesh** may be turned off to enable other BGP topologies. To do this, modify the default **BGP configuration** resource.
-
-Run the following command to disable the BGP full-mesh:
-
-```bash
-calicoctl patch bgpconfiguration default -p '{"spec": {"nodeToNodeMeshEnabled": false}}'
-```
-
-:::note
-
-If the default BGP configuration resource does not exist, you need to create it first. See [BGP configuration](../../reference/resources/bgpconfig.mdx) for more information.
-
-:::
-
-:::note
-
-Disabling the node-to-node mesh will break pod networking until/unless you configure replacement BGP peerings using BGPPeer resources.
-You may configure the BGPPeer resources before disabling the node-to-node mesh to avoid pod networking breakage.
-
-:::
-
-### Change from node-to-node mesh to route reflectors without any traffic disruption
-
-Switching from node-to-node BGP mesh to BGP route reflectors involves tearing down BGP sessions and bringing up new ones. This causes a short
-dataplane network disruption (of about 2 seconds) for workloads running on the nodes in the cluster. To avoid this, you may provision
-route reflector nodes and bring their BGP sessions up before tearing down the node-to-node mesh sessions.
-
-Follow these steps to do so:
-
-1. [Provision new nodes to be route reflectors.](#configure-a-node-to-act-as-a-route-reflector) The nodes [should not be schedulable](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)
- and they should have `routeReflectorClusterID` in their spec. These won't be part of the existing
- node-to-node BGP mesh, and will be the route reflectors when the mesh is disabled. These nodes should also have a label like
- `route-reflector` to select them for the BGP peerings. [Alternatively](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/),
- you can drain workloads from existing nodes in your cluster by running `kubectl drain ` to configure them to be route reflectors,
- but this will cause a disruption on the workloads on those nodes as they are drained.
-
-2. Also set up a [BGPPeer](#configure-a-node-to-act-as-a-route-reflector) spec to configure route reflector nodes to peer with each other and other non-route-reflector nodes
- using label selectors.
-
-3. Wait for these peerings to be established. This can be [verified](#view-bgp-peering-status-for-a-node) by running `sudo calicoctl node status` on the nodes. Alternatively, you can create a [`CalicoNodeStatus` resource](../../reference/resources/caliconodestatus.mdx) to get BGP session status for the node.
-
-4. [Disable the BGP node-to-node mesh for the cluster.](#disable-the-default-bgp-node-to-node-mesh)
-
-5. If you did drain workloads from the nodes or created them as unschedulable, mark the nodes as schedulable again (e.g. by running `kubectl uncordon `).
-
-### View BGP peering status for a node
-
-Create a [CalicoNodeStatus resource](../../reference/resources/caliconodestatus.mdx) to monitor BGP session status for the node.
-
-Alternatively, you can run the `calicoctl node status` command on a given node to learn more about its BGP status.
-
-:::note
-
-This command communicates with the local {{prodname}} agent, so you must execute it on the node whose status you are attempting to view.
-
-:::
-
-### Change the default global AS number
-
-By default, all Calico nodes use the 64512 autonomous system, unless a per-node AS has been specified for the node. You can change the global default for all nodes by modifying the default **BGPConfiguration** resource. The following example command sets the global default AS number to **64513**.
-
-```bash
-calicoctl patch bgpconfiguration default -p '{"spec": {"asNumber": "64513"}}'
-```
-
-:::note
-
-If the default BGP configuration resource does not exist, you need to create it first. See [BGP configuration](../../reference/resources/bgpconfig.mdx) for more information.
-
-:::
-
-### Change AS number for a particular node
-
-You can configure an AS for a particular node by modifying the node object using `calicoctl`. For example, the following command changes the node named **node-1** to belong to **AS 64514**.
-
-```bash
-calicoctl patch node node-1 -p '{"spec": {"bgp": {"asNumber": "64514"}}}'
-```
-
-## Additional resources
-
-- [Node resource](../../reference/resources/node.mdx)
-- [BGP configuration resource](../../reference/resources/bgpconfig.mdx)
-- [BGP peer resource](../../reference/resources/bgppeer.mdx)
diff --git a/calico_versioned_docs/version-3.25/networking/configuring/index.mdx b/calico_versioned_docs/version-3.25/networking/configuring/index.mdx
deleted file mode 100644
index d37320632e..0000000000
--- a/calico_versioned_docs/version-3.25/networking/configuring/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Configure Calico networking options, including overlay, non-overlay, BGP, service advertisement, MTU, NAT, and using kube-proxy in IPVS mode.
-hide_table_of_contents: true
----
-
-# Configure Networking
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/networking/configuring/mtu.mdx b/calico_versioned_docs/version-3.25/networking/configuring/mtu.mdx
deleted file mode 100644
index aded7da66e..0000000000
--- a/calico_versioned_docs/version-3.25/networking/configuring/mtu.mdx
+++ /dev/null
@@ -1,142 +0,0 @@
----
-description: Optimize network performance for workloads by configuring the MTU in Calico to best suit your underlying network.
----
-
-# Configure MTU to maximize network performance
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Configure the maximum transmission unit (MTU) for your {{prodname}} environment.
-
-## Value
-
-Optimize network performance for workloads by configuring the MTU in {{prodname}} to best suit your underlying network.
-
-Increasing the MTU can improve performance, and decreasing the MTU can resolve packet loss and fragmentation problems when it is too high.
-
-## Concepts
-
-### MTU and {{prodname}} defaults
-
-The maximum transmission unit (MTU) setting determines the largest packet size that can be transmitted through your network. MTU is configured on the veth attached to each workload, and tunnel devices (if you enable IP in IP, VXLAN, or WireGuard).
-
-In general, maximum performance is achieved by using the highest MTU value that does not cause fragmentation or dropped packets on the path. Maximum bandwidth increases and CPU consumption may drop for a given traffic rate. The improvement is often more significant when pod to pod traffic is being encapsulated (IP in IP, VXLAN, or WireGuard), and splitting and combining such traffic cannot be offloaded to your NICs.
-
-By default, {{prodname}} will auto-detect the correct MTU for your cluster based on node configuration and enabled networking modes. This guide explains how you can override auto-detection
-of MTU by providing an explicit value if needed.
-
-To ensure auto-detection of MTU works correctly, make sure that the correct encapsulation modes are set in your [felix configuration](../../reference/resources/felixconfig.mdx). Disable any unused encapsulations (`vxlanEnabled`, `ipipEnabled`, `wireguardEnabled` and `wireguardEnabledV6`) in your felix configuration to ensure that auto-detection can pick the optimal MTU for your cluster.
-
-## Before you begin...
-
-For help on using IP in IP and/or VXLAN overlays, see [Configure overlay networking](vxlan-ipip.mdx).
-
-For help on using WireGuard encryption, see [Configure WireGuard encryption](../../network-policy/encrypt-cluster-pod-traffic.mdx).
-
-## How to
-
-- [Determine MTU size](#determine-mtu-size)
-- [Configure MTU](#configure-mtu)
-- [View current tunnel MTU values](#view-current-tunnel-mtu-values)
-
-### Determine MTU size
-
-The following table lists common MTU sizes for {{prodname}} environments. Because MTU is a global property of the network path between endpoints, you should set the MTU to the minimum MTU of any path that packets may take.
-
-**Common MTU sizes**
-
-| Network MTU | {{prodname}} MTU | {{prodname}} MTU with IP-in-IP (IPv4) | {{prodname}} MTU with VXLAN (IPv4) | {{prodname}} MTU with VXLAN (IPv6) | {{prodname}} MTU with WireGuard (IPv4) | {{prodname}} MTU with WireGuard (IPv6) |
-| ---------------------- | ---------------- | ------------------------------------- | ---------------------------------- | ---------------------------------- | -------------------------------------- | -------------------------------------- |
-| 1500 | 1500 | 1480 | 1450 | 1430 | 1440 | 1420 |
-| 9000 | 9000 | 8980 | 8950 | 8930 | 8940 | 8920 |
-| 1500 (AKS) | 1500 | 1480 | 1450 | 1430 | 1340 | 1320 |
-| 1460 (GCE) | 1460 | 1440 | 1410 | 1390 | 1400 | 1380 |
-| 9001 (AWS Jumbo) | 9001 | 8981 | 8951 | 8931 | 8941 | 8921 |
-| 1450 (OpenStack VXLAN) | 1450 | 1430 | 1400 | 1380 | 1390 | 1370 |
-
-**Recommended MTU for overlay networking**
-
-The extra overlay header used in IP in IP, VXLAN and WireGuard protocols, reduces the minimum MTU by the size of the header. (IP in IP uses a 20-byte header, IPv4 VXLAN uses a 50-byte header, IPv6 VXLAN uses a 70-byte header, IPv4 WireGuard uses a [60-byte header](https://lists.zx2c4.com/pipermail/wireguard/2017-December/002201.html) and IPv6 WireGuard uses an 80-byte header).
-
-When using AKS, the underlying network has an [MTU of 1400](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-tcpip-performance-tuning#azure-and-vm-mtu), even though the network interface will have an MTU of 1500.
-WireGuard sets the Don't Fragment (DF) bit on its packets, and so the MTU for WireGuard on AKS needs to be set to 60 bytes below (or 80 bytes for IPv6) the 1400 MTU of the underlying network to avoid dropped packets.
-
-If you have a mix of WireGuard and either IP in IP or VXLAN in your cluster, you should configure the MTU to be the smallest of the values of each encap type. The reason for this is that only WireGuard encapsulation will be used between any nodes where both have WireGuard enabled, and IP in IP or VXLAN will then be used between any nodes where both do not have WireGuard enabled. This could be the case if, for example, you are in the process of installing WireGuard on your nodes.
-
-Therefore, we recommend the following:
-
-- If you use IPv4 WireGuard encryption anywhere in your pod network, configure MTU size as “physical network MTU size minus 60”.
-- If you use IPv6 WireGuard encryption anywhere in your pod network, configure MTU size as “physical network MTU size minus 80”.
-- If you don't use WireGuard, but use IPv4 VXLAN anywhere in your pod network, configure MTU size as “physical network MTU size minus 50”.
-- If you don't use WireGuard, but use IPv6 VXLAN anywhere in your pod network, configure MTU size as “physical network MTU size minus 70”.
-- If you don't use WireGuard, but use only IP in IP, configure MTU size as “physical network MTU size minus 20”
-- Set the workload endpoint MTU and the tunnel MTUs to the same value (so all paths have the same MTU)
-
-**eBPF mode**
-
-Implementation of NodePorts uses VXLAN tunnel to hand off packets from one node to another, therefore VXLAN MTU setting
-is used to set the MTUs of workloads (veths) and should be “physical network MTU size minus 50” (see above).
-
-**MTU for flannel networking**
-
-When using flannel for networking, the MTU for network interfaces should match the MTU of the flannel interface.
-
-- If using flannel with VXLAN, use the “{{prodname}} MTU with VXLAN” column in the table above for common sizes.
-
-### Configure MTU
-
-:::note
-
-The updated MTU used by {{prodname}} only applies to new workloads.
-
-:::
-
-Instructions for configuring MTU vary based on install method.
-
-
-
-
-For Operator installations, edit the {{prodname}} operator `Installation` resource to set the `mtu`
-field in the `calicoNetwork` section of the `spec`. For example:
-
-```bash
-kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"mtu":1440}}}'
-```
-
-Similarly, for OpenShift:
-
-```bash
-oc patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"mtu":1440}}}'
-```
-
-
-
-
-For manifest based installations (i.e. ones that do not use the operator) edit the `calico-config` ConfigMap. For example:
-
-```bash
-kubectl patch configmap/calico-config -n kube-system --type merge \
- -p '{"data":{"veth_mtu": "1440"}}'
-```
-
-After updating the ConfigMap, perform a rolling restart of all calico/node pods. For example:
-
-```bash
-kubectl rollout restart daemonset calico-node -n kube-system
-```
-
-
-
-
-### View current tunnel MTU values
-
-To view the current tunnel size, use the following command:
-
-`ip link show`
-
-The IP in IP tunnel appears as tunlx (for example, tunl0), along with the MTU size. For example:
-
-![Tunnel MTU](/img/calico/tunnel.png)
diff --git a/calico_versioned_docs/version-3.25/networking/configuring/pod-mac-address.mdx b/calico_versioned_docs/version-3.25/networking/configuring/pod-mac-address.mdx
deleted file mode 100644
index eafb428d66..0000000000
--- a/calico_versioned_docs/version-3.25/networking/configuring/pod-mac-address.mdx
+++ /dev/null
@@ -1,35 +0,0 @@
----
-description: Specify the MAC address for a pod instead of allowing the operating system to assign one
----
-
-# Use a specific MAC address for a pod
-
-## Big picture
-
-Choose the MAC address for a pod instead of allowing the operating system to assign one.
-
-## Value
-
-Some applications bind software licenses to networking interface MAC addresses.
-
-## Concepts
-
-### Container MAC address
-
-The MAC address configured by the annotation described here will be visible from within the container on the eth0 interface. Since it is isolated to the container it will not collide with any other MAC addresses assigned to other pods on the same node.
-
-## Before you begin...
-
-Your cluster must be using Calico CNI to use this feature.
-
-[Configuring the Calico CNI Plugins](../../reference/configure-cni-plugins.mdx)
-
-## How to
-
-Annotate the pod with cni.projectcalico.org/hwAddr set to the desired MAC address. For example:
-
-```
- "cni.projectcalico.org/hwAddr": "1c:0c:0a:c0:ff:ee"
-```
-
-The annotation must be present when the pod is created; adding it later has no effect.
diff --git a/calico_versioned_docs/version-3.25/networking/configuring/sidecar-acceleration.mdx b/calico_versioned_docs/version-3.25/networking/configuring/sidecar-acceleration.mdx
deleted file mode 100644
index cfcb69775c..0000000000
--- a/calico_versioned_docs/version-3.25/networking/configuring/sidecar-acceleration.mdx
+++ /dev/null
@@ -1,72 +0,0 @@
----
-description: Use Calico to accelerate network performance of traffic through the Istio Envoy sidecar using eBPF.
----
-
-# Accelerate Istio network performance
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Use Calico to accelerate network performance of routing network traffic via Istio Envoy sidecar.
-
-:::caution
-
-This feature is experimental and should not be used in production clusters. It uses a recent Linux kernel feature (eBPF SOCKMAP), which our testing confirms requires upstream kernel enhancements to reliably and securely support production clusters. We are contributing fixes to the kernel where needed.
-
-:::
-
-## Value
-
-Istio directs all application network traffic through an Envoy sidecar in each pod, which introduces network overhead for all traffic. Calico can greatly reduce this network overhead by automatically optimizing the Linux network path for this traffic.
-
-## Concepts
-
-### Sidecar acceleration
-
-The Sidecar acceleration process bypasses several layers of kernel networking, allowing data to flow between the sockets unobstructed. This makes the Envoy proxy (sidecar) to container network path as fast and efficient as possible.
-
-## Before you begin...
-
-- [Enable application layer policy](../../network-policy/istio/app-layer-policy.mdx)
-- Verify that hosts installed with Calico are using Linux kernel 4.19 and above
-
-### Sidecar acceleration: experimental technology
-
-The sidecar app acceleration feature is disabled by default in Calico because the technology is currently not production ready. Use only in test environments until the technology is hardened for production security.
-
-## How to
-
-To enable sidecar acceleration for Istio-enabled apps using Calico:
-
-
-
-
-```bash
-kubectl patch felixconfiguration default --type merge --patch '{"spec":{"sidecarAccelerationEnabled": true}}'
-```
-
-You should see an output like below:
-
-```
-felixconfiguration.projectcalico.org/default patched
-```
-
-
-
-
-```bash
-calicoctl patch felixconfiguration default --patch '{"spec":{"sidecarAccelerationEnabled": true}}'
-```
-
-You should see an output like below:
-
-```
-Successfully patched 1 'FelixConfiguration' resource
-```
-
-
-
-
-That’s it! Network traffic that is routed between apps and the Envoy sidecar is automatically accelerated at this point. Note that if you have an existing Istio/Calico implementation and you enable sidecar acceleration, existing connections do not benefit from acceleration.
diff --git a/calico_versioned_docs/version-3.25/networking/configuring/use-ipvs.mdx b/calico_versioned_docs/version-3.25/networking/configuring/use-ipvs.mdx
deleted file mode 100644
index c1d1dd029c..0000000000
--- a/calico_versioned_docs/version-3.25/networking/configuring/use-ipvs.mdx
+++ /dev/null
@@ -1,55 +0,0 @@
----
-description: Use IPVS kube-proxy for performance improvements.
----
-
-# Use IPVS kube-proxy
-
-## Big picture
-
-Use IPVS kube-proxy mode for load balancing traffic across pods.
-
-## Value
-
-No matter where you are on your journey with container networking, iptables will serve you well. However, if you are scaling above 1,000 services, it’s worth looking at potential performance improvements using kube-proxy IPVS mode.
-
-## Concepts
-
-### Kubernetes kube-proxy
-
-Kube-proxy process handles everything related to Services on each node. It ensures that connections to the service cluster IP and port go to a pod that backs the service. If backed by more than one pod, kube-proxy load-balances traffic across pods.
-
-Kube-proxy runs in three modes: **userspace**, **iptables**, and **ipvs**. (Userspace is old, slow and not recommended.) Here’s a quick summary of iptables and ipvs modes.
-
-| **kube-proxy mode** | **Designed to be...** | **Linux kernel hooks** | **Connection processing overhead...** |
-| ------------------- | ---------------------------------------------------------------------------------------------------------- | -------------------------------------- | ------------------------------------------- |
-| iptables | An efficient firewall | NAT pre-routing using sequential rules | Grows proportional to cluster size |
-| ipvs | A load balancer with scheduling options like round-robin, shortest-expected delay, least connections, etc. | Optimized lookup routine | Stays constant, independent of cluster size |
-
-If you are wondering about the performance differences between iptables and ipvs, the answers are definitely not straightforward. For a comparison between iptables (including {{prodname}}’s own use of iptables) and ipvs modes, see [Comparing kube-proxy modes: iptables or IPVS?](https://www.projectcalico.org/comparing-kube-proxy-modes-iptables-or-ipvs/).
-
-### IPVS mode and NodePort ranges
-
-Kube-proxy IPVS mode supports NodePort services and cluster IPs. {{prodname}} also uses NodePorts for routing traffic to the cluster, including the same default Kubernetes NodePort range (30000:32767). If you change your default NodePort range in Kubernetes, you must also change it on {{prodname}} to maintain ipvs coverage.
-
-### iptables: when to change mark bits
-
-To police traffic in IPVS mode, {{prodname}} uses additional iptables mark bits to store an ID for each local {{prodname}} endpoint. If you are planning to run more than 1,022 pods per host with IPVS enabled, you may need to adjust the mark bit size using the `IptablesMarkMask` parameter in {{prodname}} [FelixConfiguration](../../reference/felix/configuration.mdx#ipvs-bits).
-
-### {{prodname}} auto detects ipvs mode
-
-When {{prodname}} detects that kube-proxy is running in IPVS mode (during or after installation), IPVS support is automatically activated. Detection happens when calico-node starts up, so if you change kube-proxy's mode in a running cluster, you will need to restart your calico-node instances.
-
-## Before you begin...
-
-**Required**
-
-- kube-proxy is configured to use IPVS mode
-- Services for ipvs mode are type, NodePort
-
-## How to
-
-As previously discussed, there is nothing you need to do in {{prodname}} to use IPVS mode; if enabled, the mode is automatically detected. However, if your default Kubernetes NodePort range changes, use the following instructions to update {{prodname}} nodeport ranges to stay in sync. Detection happens when calico-node starts up, so if you change kube-proxy's mode in a running cluster, you will need to restart your calico-node instances.
-
-### Change {{prodname}} default nodeport range
-
-In the FelixConfiguration resource, change the configuration parameter for the default node port range (`KubeNodePortRange`,) in {{prodname}} to match your new default range in Kubernetes. For help, see [FelixConfiguration](../../reference/felix/configuration.mdx).
diff --git a/calico_versioned_docs/version-3.25/networking/configuring/vxlan-ipip.mdx b/calico_versioned_docs/version-3.25/networking/configuring/vxlan-ipip.mdx
deleted file mode 100644
index 6277ddbdbf..0000000000
--- a/calico_versioned_docs/version-3.25/networking/configuring/vxlan-ipip.mdx
+++ /dev/null
@@ -1,156 +0,0 @@
----
-description: Configure Calico to use IP in IP or VXLAN overlay networking so the underlying network doesn’t need to understand pod addresses.
----
-
-# Overlay networking
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Enable inter workload communication across networks that are not aware of workload IPs.
-
-## Value
-
-In general, we recommend running Calico without network overlay/encapsulation. This gives you the highest performance and simplest network; the packet that leaves your workload is the packet that goes on the wire.
-
-However, selectively using overlays/encapsulation can be useful when running on top of an underlying network that cannot easily be made aware of workload IPs. A common example is if you are using Calico networking in AWS across multiple VPCs/subnets. In this case, Calico can selectively encapsulate only the traffic that is routed between the VPCs/subnets, and run without encapsulation within each VPC/subnet. You might also decide to run your entire Calico network with encapsulation as an overlay network -- as a quick way to get started without setting up BGP peering or other routing information in your underlying network.
-
-## Concepts
-
-### Routing workload IP addresses
-
-Networks become aware of workload IP addresses through layer 3 routing techniques like static routes or BGP route distribution, or layer 2 address learning. As such, they can route unencapsulated traffic to the right host for the endpoint that is the ultimate destination. However, not all networks are able to route workload IP addresses. For example, public cloud environments where you don’t own the hardware, AWS across VPC subnet boundaries, and other scenarios where you cannot peer Calico over BGP to the underlay, or easily configure static routes. This is why Calico supports encapsulation, so you can send traffic between workloads without requiring the underlying network to be aware of workload IP addresses.
-
-### Encapsulation types
-
-Calico supports two types of encapsulation: VXLAN and IP in IP. VXLAN is supported in some environments where IP in IP is not (for example, Azure). VXLAN has a slightly higher per-packet overhead because the header is larger, but unless you are running very network intensive workloads the difference is not something you would typically notice. The other small difference between the two types of encapsulation is that Calico's VXLAN implementation does not use BGP, whereas Calico's IP in IP implementation uses BGP between Calico nodes.
-
-### Cross-subnet
-
-Encapsulation of workload traffic is typically required only when traffic crosses a router that is unable to route workload IP addresses on its own. Calico can perform encapsulation on: all traffic, no traffic, or only on traffic that crosses a subnet boundary.
-
-## Before you begin
-
-**Not supported**
-
-- OpenStack
-
-**Limitations**
-
-- IP in IP supports only IPv4 addresses
-- VXLAN in IPv6 is only supported for kernel versions ≥ 4.19.1 or redhat kernel version ≥ 4.18.0
-
-## How to
-
-- [Configure default IP pools at install time](#configure-default-ip-pools-at-install-time)
-- [Configure IP in IP encapsulation for only cross-subnet traffic](#configure-ip-in-ip-encapsulation-for-only-cross-subnet-traffic)
-- [Configure IP in IP encapsulation for all inter workload traffic](#configure-ip-in-ip-encapsulation-for-all-inter-workload-traffic)
-- [Configure VXLAN encapsulation for only cross-subnet traffic](#configure-vxlan-encapsulation-for-only-cross-subnet-traffic)
-- [Configure VXLAN encapsulation for all inter workload traffic](#configure-vxlan-encapsulation-for-all-inter-workload-traffic)
-
-### Best practice
-
-Calico has an option to selectively encapsulate only traffic that crosses subnet boundaries. We recommend using the **cross-subnet** option with IP in IP or VXLAN to minimize encapsulation overhead. Cross-subnet mode provides better performance in AWS multi-AZ deployments, Azure VNETs, and on networks where routers are used to connect pools of nodes with L2 connectivity.
-
-Be aware that switching encapsulation modes can cause disruption to in-progress connections. Plan accordingly.
-
-### Configure default IP pools at install time
-
-Default IP pools are configured at install-time automatically by Calico. You can configure these default IP pools based on install method.
-
-
-
-
-For operator managed clusters, you can configure encapsulation in the IP pools section of the default Installation. For example, the following installation snippet will enable VXLAN across subnets.
-
-```yaml
-kind: Installation
-apiVersion: operator.tigera.io/v1
-metadata:
- name: default
-spec:
- calicoNetwork:
- ipPools:
- - cidr: 192.168.0.0/16
- encapsulation: VXLANCrossSubnet
-```
-
-
-
-
-For manifest installations of Calico, you can control the default IP pool encapsulation mode using the `CALICO_IPV4POOL_VXLAN` and `CALICO_IPV4POOL_IPIP` (and `CALICO_IPV6POOL_VXLAN` for IPv6) environment variables in the environment of the `calico-node` daemon set.
-
-
-
-
-### Configure IP in IP encapsulation for only cross-subnet traffic
-
-IP in IP encapsulation can be performed selectively, and only for traffic crossing subnet boundaries.
-
-To enable this feature, set `ipipMode` to `CrossSubnet`.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: IPPool
-metadata:
- name: ippool-ipip-cross-subnet-1
-spec:
- cidr: 192.168.0.0/16
- ipipMode: CrossSubnet
- natOutgoing: true
-```
-
-### Configure IP in IP encapsulation for all inter workload traffic
-
-With `ipipMode` set to `Always`, Calico routes traffic using IP in IP for all traffic originating from a Calico enabled-host, to all Calico networked containers and VMs within the IP pool.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: IPPool
-metadata:
- name: ippool-ipip-1
-spec:
- cidr: 192.168.0.0/16
- ipipMode: Always
- natOutgoing: true
-```
-
-### Configure VXLAN encapsulation for only cross subnet traffic
-
-VXLAN encapsulation can be performed selectively, and only for traffic crossing subnet boundaries.
-
-To enable this feature, set `vxlanMode` to `CrossSubnet`.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: IPPool
-metadata:
- name: ippool-vxlan-cross-subnet-1
-spec:
- cidr: 192.168.0.0/16
- vxlanMode: CrossSubnet
- natOutgoing: true
-```
-
-### Configure VXLAN encapsulation for all inter workload traffic
-
-With `vxlanMode` set to `Always`, Calico routes traffic using VXLAN for all traffic originating from a Calico enabled host, to all Calico networked containers and VMs within the IP pool.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: IPPool
-metadata:
- name: ippool-vxlan-1
-spec:
- cidr: 192.168.0.0/16
- vxlanMode: Always
- natOutgoing: true
-```
-
-If you use only VXLAN pools, BGP networking is not required. You can disable BGP to reduce the moving parts in your cluster by [Customizing the manifests](../../getting-started/kubernetes/self-managed-onprem/config-options.mdx). Set the `calico_backend` setting to `vxlan`, and disable the BGP readiness check.
-
-## Additional resources
-
-For details on IP pool resource options, see [IP pool](../../reference/resources/ippool.mdx).
diff --git a/calico_versioned_docs/version-3.25/networking/configuring/workloads-outside-cluster.mdx b/calico_versioned_docs/version-3.25/networking/configuring/workloads-outside-cluster.mdx
deleted file mode 100644
index 7fe18c0fe3..0000000000
--- a/calico_versioned_docs/version-3.25/networking/configuring/workloads-outside-cluster.mdx
+++ /dev/null
@@ -1,64 +0,0 @@
----
-description: Configure networking to perform outbound NAT for connections from pods to outside of the cluster.
----
-
-# Configure outgoing NAT
-
-## Big picture
-
-Configure {{prodname}} networking to perform outbound NAT for connections from pods to outside of the cluster. {{prodname}} optionally source NATs the pod IP to the node IP.
-
-## Value
-
-The {{prodname}} NAT outbound connection option is flexible; it can be enabled, disabled, and applied to {{prodname}} IP pools with public IPs, private IPs, or a specific range of IP addresses. This article describes some use cases for enabling and disabling outgoing NAT.
-
-## Concepts
-
-### {{prodname}} IP pools and NAT
-
-When a pod with an IP address in the pool initiates a network connection to an IP address to outside of {{prodname}}’s IP pools, the outgoing packets will have their source IP address changed from the pod IP address to the node IP address using SNAT (Source Network Address Translation). Any return packets on the connection automatically get this change reversed before being passed back to the pod.
-
-### Enable NAT: for pods with IP addresses that are not routable beyond the cluster
-
-A common use case for enabling NAT outgoing, is to allow pods in an overlay network to connect to IP addresses outside of the overlay, or pods with private IP addresses to connect to public IP addresses outside the cluster/the internet (subject to network policy allowing the connection, of course). When NAT is enabled, traffic is NATed from pods in that pool to any destination outside of all other {{prodname}} IP pools.
-
-### Disable NAT: For on-premises deployments using physical infrastructure
-
-If you choose to implement {{prodname}} networking with [BGP peered with your physical network infrastructure](bgp.mdx), you can use your own infrastructure to NAT traffic from pods to the internet. In this case, you should disable the {{prodname}} `natOutgoing` option. For example, if you want your pods to have public internet IPs, you should:
-
-- Configure {{prodname}} to peer with your physical network infrastructure
-- Create an IP pool with public IP addresses for those pods that are routed to your network with NAT disabled (`natOutgoing: false`)
-- Verify that other network equipment does not NAT the pod traffic
-
-## How to
-
-- [Create an IP pool with NAT outgoing enabled](#create-an-ip-pool-with-nat-outgoing-enabled)
-- [Use additional IP pools to specify addresses that can be reached without NAT](#use-additional-ip-pools-to-specify-addresses-that-can-be-reached-without-nat)
-
-### Create an IP pool with NAT outgoing enabled
-
-In the following example, we create a {{prodname}} IPPool with natOutgoing enabled. Outbound NAT is performed locally on the node where each workload in the pool is hosted.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: IPPool
-metadata:
- name: default-ipv4-ippool
-spec:
- cidr: 192.168.0.0/16
- natOutgoing: true
-```
-
-### Use additional IP pools to specify addresses that can be reached without NAT
-
-Because {{prodname}} performs outgoing NAT only when connecting to an IP address that is not in a {{prodname}} IPPool, you can create additional IPPools that are not used for pod IP addresses, but prevent NAT to certain CIDR blocks. This is useful if you want nodes to NAT traffic to the internet, but not to IPs in certain internal ranges. For example, if you did not want to NAT traffic from pods to 10.0.0.0/8, you could create the following pool. You must ensure that the network between the cluster and 10.0.0.0/8 can route pod IPs.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: IPPool
-metadata:
- name: no-nat-10.0.0.0-8
-spec:
- cidr: 10.0.0.0/8
- disabled: true
-```
diff --git a/calico_versioned_docs/version-3.25/networking/determine-best-networking.mdx b/calico_versioned_docs/version-3.25/networking/determine-best-networking.mdx
deleted file mode 100644
index 3a0a8f1d56..0000000000
--- a/calico_versioned_docs/version-3.25/networking/determine-best-networking.mdx
+++ /dev/null
@@ -1,288 +0,0 @@
----
-description: Learn about the different networking options Calico supports so you can choose the best option for your needs.
----
-
-# Determine best networking option
-
-## Big picture
-
-Learn about the different networking options {{prodname}} supports so you can choose the best option for your needs.
-
-## Value
-
-{{prodname}}’s flexible modular architecture supports a wide range of deployment options, so you can select the best networking approach for your specific environment and needs. This includes the ability to run with a variety of CNI and IPAM plugins, and underlying network types, in non-overlay or overlay modes, with or without BGP.
-
-## Concepts
-
-If you want to fully understand the network choices available to you, we recommend you make sure you are familiar with and understand the following concepts. If you would prefer to skip the learning and get straight to the choices and recommendations, you can jump ahead to [Networking Options](#networking-options).
-
-### Kubernetes networking basics
-
-The Kubernetes network model defines a “flat” network in which:
-
-- Every pod get its own IP address.
-- Pods on any node can communicate with all pods on all other nodes without NAT.
-
-This creates a clean, backwards-compatible model where pods can be treated much like VMs or physical hosts from the perspectives of port allocation, naming, service discovery, load balancing, application configuration, and migration. Network segmentation can be defined using network policies to restrict traffic within these base networking capabilities.
-
-Within this model there’s quite a lot of flexibility for supporting different networking approaches and environments. The details of exactly how the network is implemented depend on the combination of CNI, network, and cloud provider plugins being used.
-
-### CNI plugins
-
-CNI (Container Network Interface) is a standard API which allows different network implementations to plug into Kubernetes. Kubernetes calls the API any time a pod is being created or destroyed. There are two types of CNI plugins:
-
-- CNI network plugins: responsible for adding or deleting pods to/from the Kubernetes pod network. This includes creating/deleting each pod’s network interface and connecting/disconnecting it to the rest of the network implementation.
-- CNI IPAM plugins: responsible for allocating and releasing IP addresses for pods as they are created or deleted. Depending on the plugin, this may include allocating one or more ranges of IP addresses (CIDRs) to each node, or obtaining IP addresses from an underlying public cloud’s network to allocate to pods.
-
-### Cloud provider integrations
-
-Kubernetes cloud provider integrations are cloud-specific controllers that can configure the underlying cloud network to help provide Kubernetes networking. Depending on the cloud provider, this could include automatically programming routes into the underlying cloud network so it knows natively how to route pod traffic.
-
-### Kubenet
-
-Kubenet is an extremely basic network plugin built into Kubernetes. It does not implement cross-node networking or network policy. It is typically used together with a cloud provider integration that sets up routes in the cloud provider network for communication between nodes, or in single node environments. Kubenet is not compatible with {{prodname}}.
-
-### Overlay networks
-
-An overlay network is a network that is layered on top of another network. In the context of Kubernetes, an overlay network can be used to handle pod-to-pod traffic between nodes on top of an underlying network that is not aware of pod IP addresses or which pods are running on which nodes. Overlay networks work by encapsulating network packets that an underlying network doesn’t know how to handle (for example using pod IP addresses) within an outer packet which the underlying network does know how to handle (for example node IP addresses). Two common network protocols used for encapsulation are VXLAN and IP-in-IP.
-
-The main advantage of using an overlay network is that it reduces dependencies on the underlying network. For example, you can run a VXLAN overlay on top of almost any underlying network, without needing to integrate with or make any changes to the underlying network.
-
-The main disadvantages of using an overlay network are:
-
-- A slight performance impact. The process of encapsulating packets takes a small amount of CPU, and the extra bytes required in the packet to encode the encapsulation (VXLAN or IP-in-IP headers) reduces the maximum size of inner packet that can be sent, which in turn can mean needing to send more packets for the same amount of total data.
-- The pod IP addresses are not routable outside of the cluster. More on this below!
-
-### Cross-subnet overlays
-
-In addition to standard VXLAN or IP-in-IP overlays, {{prodname}} also supports “cross-subnet” modes for VXLAN and IP-in-IP. In this mode, within each subnet, the underlying network acts as an L2 network. Packets sent within a single subnet are not encapsulated, so you get the performance of a non-overlay network. Packets sent across subnets are encapsulated, like a normal overlay network, reducing dependencies on the underlying network (without the need to integrate with or make any changes to the underlying network).
-
-Just like with a standard overlay network, the underlying network is not aware of pod IP addresses and the pod IP addresses are not routable outside of the cluster.
-
-### Pod IP routability outside of the cluster
-
-An important distinguishing feature of different Kubernetes network implementations is whether or not pod IP addresses are routable outside of the cluster across the broader network.
-
-**Not routable**
-
-If the pod IP addresses are not routable outside of the cluster then when a pod tries to establish a network connection to an IP address that is outside of the cluster, Kubernetes uses a technique called SNAT (Source Network Address Translation) to change the source IP address from the IP address of the pod, to the IP address of the node hosting the pod. Any return packets on the connection get automatically mapped back to the pod IP address. So the pod is unaware the SNAT is happening, the destination for the connection sees the node as the source of the connection, and the underlying broader network never sees pod IP addresses.
-
-For connections in the opposite direction, where something outside of the cluster needs to connect to a pod, this can only be done via Kubernetes services or Kubernetes ingress. Nothing outside of the cluster can directly connect to a pod IP address, because the broader network doesn’t know how to route packets to pod IP addresses.
-
-**Routable**
-
-If the pod IP addresses are routable outside of the cluster then pods can connect to the outside world without SNAT, and the outside world can connect directly to pods without going via a Kubernetes service or Kubernetes ingress.
-
-The advantage of pod IP addresses that are routable outside the cluster are:
-
-- Avoiding SNAT for outbound connections may be essential for integrating with existing broader security requirements. It can also simplify debugging and understandability of operation logs.
-- If you have specialized workloads that mean some pods need to be directly accessible without going via Kubernetes services or Kubernetes ingress, then routable pod IPs can be operationally simpler than the alternative of using host networked pods.
-
-The main disadvantage of pod IP addresses that are routable outside the cluster is that the pod IPs must be unique across the broader network. So for example, if running multiple clusters you will need to use a different IP address range (CIDR) for pods in each cluster. This in turn can lead to IP address range exhaustion challenges when running at scale, or if there are other significant existing enterprise demands on IP address space.
-
-**What determines routability?**
-
-If you are using an overlay network for your cluster, then pod IPs are not normally routable outside of the cluster.
-
-If you aren’t using an overlay network, then whether pod IPs are routable outside of the cluster depends on what combination of CNI plugins, cloud provider integrations, or (for on-prem) BGP peering with the physical network, is being used.
-
-### BGP
-
-BGP (Border Gateway Protocol) is a standards based networking protocol for sharing routes across a network. It’s one of the fundamental building blocks of the internet, with exceptional scaling characteristics.
-
-{{prodname}} has built in support for BGP. In an on-prem deployment, this allows {{prodname}} to peer with the physical network (typically to Top of Rack routers) to exchange routes, making a non-overlay network where pod IP addresses routable across the broader network, just like any other workload attached to the network.
-
-## About {{prodname}} Networking
-
-{{prodname}}’s flexible modular architecture for networking includes the following.
-
-**{{prodname}} CNI network plugin**
-
-The {{prodname}} CNI network plugin connects pods to the host network namespace’s L3 routing using a pair of virtual Ethernet devices (veth pair). This L3 architecture avoids the unnecessary complexity and performance overheads of additional L2 bridges that feature in many other Kubernetes networking solutions.
-
-**{{prodname}} CNI IPAM plugin**
-
-The {{prodname}} CNI IPAM plugin allocates IP addresses for pods out of one or more configurable IP address ranges, dynamically allocating small blocks of IPs per node as required. The result is a more efficient IP address space usage compared to many other CNI IPAM plugins, including the host local IPAM plugin which is used in many networking solutions.
-
-**Overlay network modes**
-
-{{prodname}} can provide both VXLAN or IP-in-IP overlay networks, including cross-subnet only modes.
-
-**Non-overlay network modes**
-
-{{prodname}} can provide non-overlay networks running on top of any underlying L2 network, or an L3 network that is either a public cloud network with appropriate cloud provider integration, or a BGP capable network (typically an on-prem network with standard Top-of-Rack routers).
-
-**Network policy enforcement**
-
-{{prodname}}’s network policy enforcement engine implements the full range of Kubernetes Network Policy features, plus the extended features of {{prodname}} Network Policy. This works in conjunction with {{prodname}}’s built in networking modes, or any other {{prodname}} compatible network plugins and cloud provider integrations.
-
-## {{prodname}} compatible CNI plugins and cloud provider integrations
-
-In addition to the {{prodname}} CNI plugins and built in networking modes, {{prodname}} is also compatible with a number of third party CNI plugins and cloud provider integrations.
-
-**Amazon VPC CNI**
-
-The Amazon VPC CNI plugin allocates pod IPs from the underlying AWS VPC and uses AWS elastic network interfaces to provide VPC native pod networking (pod IPs that are routable outside of the cluster). It is the default networking used in [Amazon EKS](https://aws.amazon.com/eks/), with Calico for network policy enforcement.
-
-**Azure CNI**
-
-The Azure CNI plugin allocates pod IPs from the underlying Azure VNET configures the Azure virtual network to provide VNET native pod networking (pod IPs that are routable outside of the cluster). It is the default networking used in [Microsoft AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/), with Calico for network policy enforcement.
-
-**Azure cloud provider**
-
-The Azure cloud provider integration can be used as an alternative to the Azure CNI plugin. It uses the host-local IPAM CNI plugin to allocate pod IPs, and programs the underlying Azure VNET subnet with corresponding routes. Pod IPs are only routable within the VNET subnet (which often equates to meaning they are not routable outside of the cluster).
-
-**Google cloud provider**
-
-The Google cloud provider integration uses host-local IPAM CNI plugin to allocate pod IPs, and programs the Google cloud network Alias IP ranges to provide VPC native pod networking on Google cloud (pod IPs that are routable outside of the cluster). It is the default for Google Kubernetes Engine (GKE), with Calico for network policy enforcement.
-
-**Host local IPAM**
-
-The host local CNI IPAM plugin is a commonly used IP address management CNI plugin, which allocates a fixed size IP address range (CIDR) to each node, and then allocates pod IP addresses from within that range. The default address range size is 256 IP addresses (a /24), though two of those IP addresses are reserved for special purposes and not assigned to pods. The simplicity of host local CNI IPAM plugin makes it easy to understand, but results in less efficient IP address space usage compared to {{prodname}} CNI IPAM plugin.
-
-**Flannel**
-
-Flannel routes pod traffic using static per-node CIDRs obtained from the host-local IPAM CNI plugin. Flannel provides a number of networking backends, but is predominantly used with its VXLAN overlay backend. {{prodname}} CNI and {{prodname}} network policy can be combined with flannel and the host-local IPAM plugin to provide a VXLAN network with policy enforcement. This combination is sometimes referred to as “Canal”.
-
-:::note
-
-{{prodname}} now has built in support for VXLAN, which we generally recommend for simplicity in preference to using the Calico+Flannel combination.
-
-:::
-
-## Networking Options
-
-### On-prem
-
-The most common network setup for {{prodname}} on-prem is non-overlay mode using [BGP to peer](configuring/bgp.mdx) with the physical network (typically top of rack routers) to make pod IPs routable outside of the cluster. (You can of course configure the rest of your on-prem network to limit the scope of pod IP routing outside of the cluster if desired.) This setup provides a rich range of advanced {{prodname}} features, including the ability to advertise Kubernetes service IPs (cluster IPs or external IPs), and the ability to control IP address management at the pod, namespace, or node level, to support a wide range of possibilities for integrating with existing enterprise network and security requirements.
-
-
-
-If peering BGP to the physical network is not an option, you can also run non-overlay mode if the cluster is within a single L2 network, with Calico just peering BGP between the nodes in the cluster. Even though this is not strictly an overlay network, the pod IPs are not routable outside of the cluster, because the broader network does not have routes for the pod IPs.
-
-
-
-Alternatively you can run {{prodname}} in either VXLAN or IP-in-IP overlay mode, with cross-subnet overlay mode to optimize performance within each L2 subnet.
-
-_Recommended:_
-
-
-
-_Alternative:_
-
-
-
-### AWS
-
-If you would like pod IP addresses to be routable outside of the cluster then you must use the Amazon VPC CNI plugin. This is the default networking mode for [EKS](https://aws.amazon.com/eks/), with Calico for network policy. Pod IP addresses are allocated from the underlying VPC and the maximum number of pods per node is dependent on the [instance type](https://github.com/aws/amazon-vpc-cni-k8s#eni-allocation).
-
-
-
-If you prefer to avoid dependencies on a specific cloud provider, or allocating pod IPs from the underlying VPC is problematic due to IP address range exhaustion challenges, or if the maximum number of pods supported per node by the Amazon VPC CNI plugin is not sufficient for your needs, we recommend using {{prodname}} networking in cross-subnet overlay mode. Pod IPs will not be routable outside of the cluster, but you can scale the cluster up to the limits of Kubernetes with no dependencies on the underlying cloud network.
-
-
-
-You can learn more about Kubernetes Networking on AWS, including how each of the above options works under the covers, in this short video: [Everything you need to know about Kubernetes networking on AWS](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-pod-networking-on-aws/).
-
-### Azure
-
-If you would like pod IP addresses to be routable outside of the cluster then you must use the Azure CNI plugin. This is supported by [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/), with Calico for network policy. Pod IP addresses are allocated from the underlying VNET.
-
-
-
-If you want to use AKS but allocating pod IPs from the underlying VNET is problematic due to IP address range exhaustion challenges, you can use {{prodname}} in conjunction with the Azure cloud provider integration. This uses host-local IPAM to allocate /24 per node, and programs routes within the cluster’s underlying VNET subnet for those /24. Pod IPs are not routable outside of the cluster / VNET subnet, so the same pod IP address range (CIDR) can be used across multiple clusters if desired.
-
-:::note
-
-This is referred to as kubenet + Calico in some AKS docs, but it is actually Calico CNI with Azure cloud provider, and does not use the kubenet plugin.
-
-:::
-
-
-
-If you aren’t using AKS, and prefer to avoid dependencies on a specific cloud provider or allocating pod IPs from the underlying VNET is problematic due to IP address range exhaustion challenges, we recommend using {{prodname}} networking in cross-subnet overlay mode. Pod IPs will not be routable outside of the cluster, but you can scale the cluster up to the limits of Kubernetes with no dependencies on the underlying cloud network.
-
-
-
-You can learn more about Kubernetes Networking on Azure, including how each of the above options works under the covers, in this short video: [Everything you need to know about Kubernetes networking on Azure](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-azure/).
-
-### Google Cloud
-
-If you would like pod IP addresses to be routable outside of the cluster then you must use the Google cloud provider integration in conjunction with host-local IPAM CNI plugin. This is supported by [GKE](https://cloud.google.com/kubernetes-engine), with Calico for network policy. Pod IP addresses are allocated from the underlying VPC, and corresponding Alias IP addresses are automatically assigned to nodes.
-
-
-
-If you prefer to avoid dependencies on a specific cloud provider, or allocating pod IPs from the underlying VPC is problematic due to IP address range exhaustion challenges, we recommend using {{prodname}} networking in overlay mode. As Google cloud network is a pure L3 network, cross-subnet mode is not supported. Pod IPs will not be routable outside of the cluster, but you can scale the cluster up to the limits of Kubernetes with no dependencies on the underlying cloud network.
-
-_Recommended:_
-
-
-
-_Alternative:_
-
-
-
-You can learn more about Kubernetes Networking on Google cloud, including how each of the above options works under the covers, in this short video: [Everything you need to know about Kubernetes networking on Google cloud](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-google-cloud/).
-
-### IBM Cloud
-
-If you are using IBM Cloud then we recommend using [IKS](https://www.ibm.com/products/kubernetes-service/), which has Calico built in to provide cross-subnet IP-in-IP overlay. In addition to providing network policy for pods, IKS also uses Calico network policies to [secure the hosts nodes](https://cloud.ibm.com/docs/containers?topic=containers-network_policies#default_policy) within the cluster.
-
-
-
-### Anywhere
-
-The above list of environments is obviously not exhaustive. Understanding the concepts and explanations in this guide has hopefully helped you figure out what is right for your environment. If you still aren't sure then you can ask for advice through the Calico Users's Slack or Discourse forum. And remember you can run Calico in VXLAN overlay mode in almost any environment if you want to get started without worrying too deeply about the different options.
-
-
-
-## Additional resources
-
-- [Video playlist: Everything you need to know about Kubernetes networking](https://www.youtube.com/playlist?list=PLoWxE_5hnZUZMWrEON3wxMBoIZvweGeiq)
-- [Configure BGP peering](configuring/bgp.mdx)
-- [Configure overlay networking](configuring/vxlan-ipip.mdx)
-- [Advertise Kubernetes service IP addresses](configuring/advertise-service-ips.mdx)
-- [Customize IP address management](ipam/index.mdx)
-- [Interoperate with legacy firewalls using IP ranges](ipam/legacy-firewalls.mdx)
diff --git a/calico_versioned_docs/version-3.25/networking/index.mdx b/calico_versioned_docs/version-3.25/networking/index.mdx
deleted file mode 100644
index 6f9d67e775..0000000000
--- a/calico_versioned_docs/version-3.25/networking/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Calico's flexible networking options reduce the barriers to adopting a CaaS platform solution. Determine the best networking option for your implementation.
-hide_table_of_contents: true
----
-
-# Networking
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/networking/ipam/add-floating-ip.mdx b/calico_versioned_docs/version-3.25/networking/ipam/add-floating-ip.mdx
deleted file mode 100644
index d119e2fc14..0000000000
--- a/calico_versioned_docs/version-3.25/networking/ipam/add-floating-ip.mdx
+++ /dev/null
@@ -1,117 +0,0 @@
----
-description: Configure one or more floating IPs to use as additional IP addresses for reaching a Kubernetes pod.
----
-
-# Add a floating IP to a pod
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Configure one or more floating IPs that can be used as additional IP addresses for reaching a Kubernetes pod.
-
-## Value
-
-Like Kubernetes Services, a floating IP provides a stable IP address to reach some network service that might be backed by different pods at different times. The primary advantage over Kubernetes services is that floating IPs work on all protocols: not just TCP, UDP, and SCTP. Unlike Kubernetes services, a floating IP fronts a single pod at a time and cannot be used for load balancing.
-
-## Concepts
-
-A **floating IP** is an additional IP address assigned to a workload endpoint. These IPs “float” in the sense that they can be moved around the cluster and front different workload endpoints at different times. The workload itself is generally unaware of the floating IP; the host uses network address translation (NAT) on incoming traffic to change the floating IP to the workload’s real IP before delivering packets to the workload.
-
-A Kubernetes Service assigns a **cluster IP** that allows other endpoints on the network (and may also assign a nodePort and/or an external load balancer IP) to access a set of pods, using network address translation. In many circumstances, a Kubernetes Service can handle similar use cases as a floating IP, and is generally recommended for Kubernetes users because it is a native Kubernetes concept. One thing you cannot do with Kubernetes Services is use protocols other than UDP, TCP, and SCTP (use of such protocols is fairly rare).
-
-## Before you begin...
-
-The features in this How to require:
-
-- {{prodname}} CNI plugin
-
-To verify, ssh to one of the Kubernetes nodes and look for at the CNI plugin configuration, usually located at `/etc/cni/net.d/`. If you see the file, `10-calico.conflist`, you are using the {{prodname}} CNI plugin.
-
-## How to
-
-- [Enable floating IPs](#enable-floating-ips)
-- [Configure a pod to use a floating IP](#configure-a-pod-to-use-a-floating-ip)
-
-### Enable floating IPs
-
-
-
-
-Floating IPs for Kubernetes pods are not currently supported for operator-managed Calico clusters.
-
-
-
-
-By default, floating IPs are disabled. To enable floating IPs, follow these steps.
-
-Modify the calico-config ConfigMap in the kube-system namespace. In the `cni_network_config` section, add the following stanza to the “calico” plugin config section.
-
-```
- "feature_control": {
- "floating_ips": true
- }
-```
-
-For example, your `cni_network_config` will look similar to the following after the update.
-
-```
- cni_network_config: |-
- {
- "name": "k8s-pod-network",
- "cniVersion": "0.3.0",
- "plugins": [
- {
- "type": "calico",
- "log_level": "info",
- "datastore_type": "kubernetes",
- "nodename": "__KUBERNETES_NODE_NAME__",
- "mtu": __CNI_MTU__,
- "ipam": {
- "type": "calico-ipam"
- },
- "policy": {
- "type": "k8s"
- },
- "kubernetes": {
- "kubeconfig": "__KUBECONFIG_FILEPATH__"
- },
- "feature_control": {
- "floating_ips": true
- }
- },
- {
- "type": "portmap",
- "snat": true,
- "capabilities": {"portMappings": true}
- }
- ]
- }
-```
-
-
-
-
-### Configure a pod to use a floating IP
-
-
-
-
-Floating IPs for Kubernetes pods are not currently supported for operator-managed Calico clusters.
-
-
-
-
-Annotate the pod with the key `cni.projectcalico.org/floatingIPs` and the value set to a list of IP addresses enclosed in square brackets. For correct advertisement to the rest of the cluster, all floating IPs must be within the range of a configured [IP pool](../../reference/resources/ippool.mdx).
-
-For example:
-
-```
-"cni.projectcalico.org/floatingIPs": "[\"10.0.0.1\"]"
-```
-
-Note the use of the escaped `\"` for the inner double quotes around the addresses.
-
-
-
diff --git a/calico_versioned_docs/version-3.25/networking/ipam/assign-ip-addresses-topology.mdx b/calico_versioned_docs/version-3.25/networking/ipam/assign-ip-addresses-topology.mdx
deleted file mode 100644
index 9d6d498eee..0000000000
--- a/calico_versioned_docs/version-3.25/networking/ipam/assign-ip-addresses-topology.mdx
+++ /dev/null
@@ -1,181 +0,0 @@
----
-description: Configure Calico to use specific IP pools for different topologies including zone, rack, or region.
----
-
-# Assign IP addresses based on topology
-
-## Big picture
-
-Assign blocks of IP addresses from an IP pool for different topological areas.
-
-## Value
-
-If you have workloads in different regions, zones, or rack, you may want them to get IP addresses from the same IP pool. This strategy is useful for reducing the number of routes that are required in the network, or to meet requirements imposed by an external firewall device or policy. {{prodname}} makes it easy to do this using an IP pool resource with node labels and node selectors.
-
-## Concepts
-
-### IP address assignment
-
-Topology-based IP address assignment requires addresses to be per-host (node).
-As such, Kubernetes annotations cannot be used because annotations are only per-namespace and per-pod. And although you can configure IP addresses for nodes in the CNI configuration, you are making changes within the host’s file system. The best option is to use node-selection IP address assignment using IP pools.
-
-### Node-selection IP address management
-
-Node selection-based IP address assignment is exactly what it sounds like: node labels are set, and Calico uses node selectors to decide which IP pools to use when assigning IP addresses to the node.
-
-### Best practice
-
-Nodes only assign workload addresses from IP pools which select them. To avoid having a workload not get an IP and fail to start, it is important to ensure that all nodes are selected by at least one IP pool.
-
-## How to
-
-### Create an IP pool, specific nodes
-
-In the following example, we create an IP pool that only allocates IP addresses for nodes with the label, **zone=west**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: IPPool
-metadata:
- name: zone-west-ippool
-spec:
- cidr: 192.168.0.0/24
- ipipMode: Always
- natOutgoing: true
- nodeSelector: zone == "west"
-```
-
-Then, we label a node with zone=west. For example:
-
-```bash
-kubectl label nodes kube-node-0 zone=west
-```
-
-## Tutorial
-
-In this tutorial, we create a cluster with four nodes across two racks (two nodes/rack).
-
-```
- -------------------
- | router |
- -------------------
- | |
---------------- ---------------
-| rack-0 | | rack-1 |
---------------- ---------------
-| kube-node-0 | | kube-node-2 |
-- - - - - - - - - - - - - - - -
-| kube-node-1 | | kube-node-3 |
-- - - - - - - - - - - - - - - -
-```
-
-Using the pod IP range `192.168.0.0/16`, we target the following setup: reserve
-the `192.168.0.0/24` and `192.168.1.0/24` pools for `rack-0`, `rack-1`. Let's
-get started.
-
-By installing {{prodname}} without setting the default IP pool to match,
-running `calicoctl get ippool -o wide` shows that {{prodname}} created its
-default IP pool of `192.168.0.0/16`:
-
-```
-NAME CIDR NAT IPIPMODE DISABLED SELECTOR
-default-ipv4-ippool 192.168.0.0/16 true Always false all()
-```
-
-1. Delete the default IP pool.
-
- Since the `default-ipv4-ippool` IP pool resource already exists and accounts
- for the entire `/16` block, we will have to delete this first:
-
- ```bash
- calicoctl delete ippools default-ipv4-ippool
- ```
-
-2. Label the nodes.
-
- To assign IP pools to specific nodes, these nodes must be labelled
- using [kubectl label](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/#add-a-label-to-a-node).
-
- ```bash
- kubectl label nodes kube-node-0 rack=0
- kubectl label nodes kube-node-1 rack=0
- kubectl label nodes kube-node-2 rack=1
- kubectl label nodes kube-node-3 rack=1
- ```
-
-3. Create an IP pool for each rack.
-
- ```bash
- calicoctl create -f -<
- nginx-5c7588df-s7qw6 1/1 Running 0 6m7s 192.168.0.129 kube-node-1
- nginx-5c7588df-w7r7g 1/1 Running 0 6m3s 192.168.1.65 kube-node-2
- nginx-5c7588df-62lnf 1/1 Running 0 6m3s 192.168.1.1 kube-node-3
- nginx-5c7588df-pnsvv 1/1 Running 0 6m3s 192.168.1.64 kube-node-2
- ```
-
- The grouping of IP addresses assigned to the workloads differ based on what
- node that they were scheduled to. Additionally, the assigned address for
- each workload falls within the respective IP pool that selects the rack that
- they run on.
-
-:::note
-
-{{prodname}} IPAM will not reassign IP addresses to workloads
-that are already running. To update running workloads with IP addresses from
-a newly configured IP pool, they must be recreated. We recommend doing this
-before going into production or during a maintenance window.
-
-:::
-
-## Additional resources
-
-[Calico IPAM](../../reference/configure-cni-plugins.mdx)
diff --git a/calico_versioned_docs/version-3.25/networking/ipam/change-block-size.mdx b/calico_versioned_docs/version-3.25/networking/ipam/change-block-size.mdx
deleted file mode 100644
index 03481cd23e..0000000000
--- a/calico_versioned_docs/version-3.25/networking/ipam/change-block-size.mdx
+++ /dev/null
@@ -1,260 +0,0 @@
----
-description: Expand or shrink the IP pool block size to efficiently manage IP pool addresses.
----
-
-# Change IP pool block size
-
-import DetermineIpam from '@site/calico_versioned_docs/version-3.25/_includes/content/_determine-ipam.mdx';
-
-## Big picture
-
-Change the IP pool block size to efficiently manage IP pool addresses.
-
-## Value
-
-Changing IP pool block size after installation requires ordered steps to minimize pod connectivity disruption.
-
-## Concepts
-
-### About IP pools
-
-By default, {{prodname}} uses an IPAM block size of 64 addresses – /26 for IPv4, and /122 for IPv6. However, the block size can be changed depending on the IP pool address family.
-
-- IPv4: 20-32, inclusive
-- IPv6: 116-128, inclusive
-
-You can have **only one default IP pool for per protocol** in your installation manifest. In this example, there is one IP pool for IPv4 (/26), and one IP pool for IPv6 (/122).
-
-```yaml
-apiVersion: operator.tigera.io/v1
-kind: Installation
-metadata:
- name: default
-spec:
- # Configures Calico networking.
- calicoNetwork:
- # Note: The ipPools section cannot be modified post-install.
- ipPools:
- - blockSize: 26
- cidr: 10.48.0.0/21
- encapsulation: IPIP
- natOutgoing: Enabled
- nodeSelector: all()
- - blockSize: 122
- cidr: 2001::00/64
- encapsulation: None
- natOutgoing: Enabled
- nodeSelector: all()
-```
-
-However, the following is invalid because it has two IP pools for IPv4.
-
-```yaml
-apiVersion: operator.tigera.io/v1
-kind: Installation
-metadata:
- name: default
-spec:
- # Configures Calico networking.
- calicoNetwork:
- # Note: The ipPools section cannot be modified post-install.
- ipPools:
- - blockSize: 26
- cidr: 10.48.0.0/21
- encapsulation: IPIP
- natOutgoing: Enabled
- nodeSelector: all()
- - blockSize: 31
- cidr: 10.48.8.0/21
- encapsulation: IPIP
- natOutgoing: Enabled
- nodeSelector: all()
-```
-
-### Expand or shrink IP pool block sizes
-
-By default, the {{prodname}} IPAM block size for an IP pool is /26. To expand from the default size /26, lower the `blockSize` (for example, /24). To shrink the `blockSize` from the default /26, raise the number (for example, /28).
-
-### Best practice: change IP pool block size before installation
-
-Because the `blockSize` field cannot be edited directly after {{prodname}} installation, it is best to change the IP pool block size before installation to minimize disruptions to pod connectivity.
-
-## Before you begin...
-
-**Required**
-
-Verify that you are using {{prodname}} IPAM.
-
-
-
-## How to
-
-:::note
-
-Follow the steps to minimize pod connectivity disruption. Pods may lose connectivity when they are redeployed, and may lose external connectivity while in the temporary pool. Also, when pods are deleted, applications may be temporarily unavailable (depending on the type of application). Plan your changes accordingly.
-
-:::
-
-The high-level steps to follow are:
-
-1. [Create a temporary IP pool](#create-a-temporary-ip-pool)
- **Note**: The temporary IP pool must not overlap with the existing one.
-1. [Disable the existing IP pool](#disable-the-existing-ip-pool)
- **Note**: When you disable an IP pool, only new IP address allocations are prevented; networking of existing pods are not affected.
-1. [Delete pods from the existing IP pool](#delete-pods-from-the-existing-ip-pool)
- This includes any new pods that may have been created with the existing IP pool prior to disabling the pool. Verify that new pods get an address from the temporary IP pool.
-1. [Delete the existing IP pool](#delete-the-existing-ip-pool)
-1. [Create a new IP pool with the desired block size](#create-a-new-ip-pool-with-the-desired-block-size)
-1. [Disable the temporary IP pool](#disable-the-temporary-ip-pool)
-1. [Delete pods from the temporary IP pool](#delete-pods-from-the-temporary-ip-pool)
-1. [Delete the temporary IP pool](#delete-the-temporary-ip-pool)
-
-## Tutorial
-
-In the following steps, our Kubernetes cluster has a default CIDR block size of /26. We want to shrink the block size to /28 to use the pool more efficiently.
-
-### Create a temporary IP pool
-
-We add a new IPPool with the CIDR range, 10.0.0.0/16.
-
-Create a temporary-pool.yaml.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: IPPool
-metadata:
- name: temporary-pool
-spec:
- cidr: 10.0.0.0/16
- ipipMode: Always
- natOutgoing: true
-```
-
-Apply the changes.
-
-```bash
-calicoctl apply -f temporary-pool.yaml
-```
-
-Let’s verify the temporary IP pool.
-
-```bash
-calicoctl get ippool -o wide
-```
-
-```
-NAME CIDR NAT IPIPMODE DISABLED
-default-ipv4-ippool 192.168.0.0/16 true Always false
-temporary-pool 10.0.0.0/16 true Always false
-```
-
-### Disable the existing IP pool
-
-Disable allocations in the default pool.
-
-```bash
-calicoctl patch ippool default-ipv4-ippool -p '{"spec": {"disabled": true}}'
-```
-
-Verify the changes.
-
-```bash
-calicoctl get ippool -o wide
-```
-
-```
-NAME CIDR NAT IPIPMODE DISABLED
-default-ipv4-ippool 192.168.0.0/16 true Always true
-temporary-pool 10.0.0.0/16 true Always false
-```
-
-### Delete pods from the existing IP pool
-
-In our example, **coredns** is our only pod; for multiple pods you would trigger a deletion for all pods in the cluster.
-
-```bash
-kubectl delete pod -n kube-system coredns-6f4fd4bdf-8q7zp
-```
-
-Restart all pods with just one command.
-
-:::caution
-
-The following command is disruptive and may take several minutes depending on the number of pods deployed.
-
-:::
-
-```bash
-kubectl delete pod -A --all
-```
-
-### Delete the existing IP pool
-
-Now that you’ve verified that pods are getting IPs from the new range, you can safely delete the existing pool.
-
-```bash
-calicoctl delete ippool default-ipv4-ippool
-```
-
-### Create a new IP pool with the desired block size
-
-In this step, we update the IPPool with the new block size of (/28).
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: IPPool
-metadata:
- name: default-ipv4-ippool
-spec:
- blockSize: 28
- cidr: 192.0.0.0/16
- ipipMode: Always
- natOutgoing: true
-```
-
-Apply the changes.
-
-```bash
-calicoctl apply -f pool.yaml
-```
-
-### Disable the temporary IP pool
-
-```bash
-calicoctl patch ippool temporary-pool -p '{"spec": {"disabled": true}}'
-```
-
-### Delete pods from the temporary IP pool
-
-In our example, **coredns** is our only pod; for multiple pods you would trigger a deletion for all pods in the cluster.
-
-```bash
-kubectl delete pod -n kube-system coredns-6f4fd4bdf-8q7zp
-```
-
-Restart all pods with just one command.
-
-:::caution
-
-The following command is disruptive and may take several minutes depending on the number of pods deployed.
-
-:::
-
-```bash
-kubectl delete pod -A --all
-```
-
-Validate your pods and block size are correct by running the following commands:
-
-```bash
-kubectl get pods --all-namespaces -o wide
-calicoctl ipam show --show-blocks
-```
-
-### Delete the temporary IP pool
-
-Clean up the IP pools by deleting the temporary IP pool.
-
-```bash
-calicoctl delete pool temporary-pool
-```
diff --git a/calico_versioned_docs/version-3.25/networking/ipam/get-started-ip-addresses.mdx b/calico_versioned_docs/version-3.25/networking/ipam/get-started-ip-addresses.mdx
deleted file mode 100644
index 08f9be302a..0000000000
--- a/calico_versioned_docs/version-3.25/networking/ipam/get-started-ip-addresses.mdx
+++ /dev/null
@@ -1,78 +0,0 @@
----
-description: Configure Calico to use Calico IPAM or host-local IPAM, and when to use one or the other.
----
-
-# Get started with IP address management
-
-## Big picture
-
-Understand how IP address management (IPAM) functions in a Kubernetes cluster using Calico.
-
-## Value
-
-Different IPAM techniques provide different feature sets. Calico’s IPAM provides additional IP allocation efficiency and flexibility compared to other address management approaches.
-
-## Concepts
-
-### IPAM in Kubernetes
-
-Kubernetes uses IPAM plugins to allocate and manage IP addresses assigned to pods. Different IPAM plugins provide different feature sets. Calico provides its own IPAM plugin called **calico-ipam** which is designed to work well with Calico and includes a number of features.
-
-### Calico IPAM
-
-The **calico-ipam** plugin uses Calico’s IP pool resource to control how IP addresses are allocated to pods within the cluster. This is the default plugin used by most Calico installations.
-
-By default, Calico uses a single IP pool for the entire Kubernetes pod CIDR, but you can divide the pod CIDR into several pools. You can assign separate IP pools to particular selections of **nodes**, or to teams, users, or applications within a cluster using **namespaces**.
-
-You can control which pools Calico uses for each pod using
-
-- node selectors
-- an annotation on the pod’s namespace, or
-- an annotation on the pod
-
-Calico also supports the **host-local** IPAM plugin. However, when using the host-local IPAM plugin some Calico features are not available.
-
-### Calico IPAM blocks
-
-In Calico IPAM, IP pools are subdivided into blocks -- smaller chunks that are associated with a particular node in the cluster. Each node in the cluster can have one or more blocks associated with it. Calico will automatically create and destroy blocks as needed as the number of nodes and pods in the cluster grows or shrinks.
-
-Blocks allow Calico to efficiently aggregate addresses assigned to pods on the same node, reducing the size of the routing table. By default Calico will try to allocate IP addresses from within an associated block, creating a new block if necessary. Calico can also assign addresses to pods on a node that are not within a block associated with that node. This allows for IP allocations independent of the node on which a pod is launched.
-
-By default, Calico creates blocks with room for 64 addresses (a /26), but you can control block sizes for each IP pool.
-
-### Host-local IPAM
-
-The host-local plugin is a simple IP address management plugin. It uses predetermined CIDRs statically allocated to each node to choose addresses for pods. Once set, the CIDR for a node cannot be modified. Pods can be assigned addresses only from within the CIDR allocated to the node.
-
-Calico can use the host-local IPAM plugin, using the **Node.Spec.PodCIDR** field in the Kubernetes API to determine the CIDR to use for each node. However, per-node, per-pod, and per-namespace IP allocation features are not available using the host-local plugin.
-
-The host-local IPAM plugin is primarily used by other methods of routing pod traffic from one host to another. For example, it is used when installing Calico for policy enforcement with flannel networking, as well as when using Calico in Google Kubernetes Engine (GKE).
-
-## How to
-
-### Install Calico with calico-ipam
-
-Follow one of the [getting started guides](../../getting-started/index.mdx) to install Calico.
-
-### Install Calico with host-local IPAM
-
-Follow one of the [getting started guides](../../getting-started/index.mdx) to install Calico with flannel networking, or on GKE.
-
-Or, see the [reference documentation on host-local IPAM](../../reference/configure-cni-plugins.mdx#using-host-local-ipam).
-
-## Tutorial
-
-For a blog/tutorial on IP pools, see [Calico IPAM: Explained and Enhanced](https://www.tigera.io/blog/calico-ipam-explained-and-enhanced/).
-
-## Additional resources
-
-- [IP Pool](../../reference/resources/ippool.mdx)
-
-There are several other ways to leverage Calico IPAM including:
-
-- [Assign addresses based on topology](assign-ip-addresses-topology.mdx)
-- [Use a specific address for a pod](use-specific-ip.mdx)
-- [Migrate from one IP pool to another](migrate-pools.mdx)
-- [Interoperate with legacy firewalls using IP ranges](legacy-firewalls.mdx)
-- [View IP address utilization](../../reference/calicoctl/ipam/show.mdx)
-- [Change IP address block size](../../reference/resources/ippool.mdx)
diff --git a/calico_versioned_docs/version-3.25/networking/ipam/index.mdx b/calico_versioned_docs/version-3.25/networking/ipam/index.mdx
deleted file mode 100644
index 834d48a7fd..0000000000
--- a/calico_versioned_docs/version-3.25/networking/ipam/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Calico IPAM is flexible and efficient. Learn how to interoperate with legacy firewalls using IP address ranges, advertise Kubernetes service IPs, and more.
-hide_table_of_contents: true
----
-
-# IP address management
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/networking/ipam/ip-autodetection.mdx b/calico_versioned_docs/version-3.25/networking/ipam/ip-autodetection.mdx
deleted file mode 100644
index 8d5df010f7..0000000000
--- a/calico_versioned_docs/version-3.25/networking/ipam/ip-autodetection.mdx
+++ /dev/null
@@ -1,308 +0,0 @@
----
-description: Calico IP autodetection ensures the correct IP address is used for routing. Learn how to customize it.
----
-
-# Configure IP autodetection
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Configure IP autodetection for {{prodname}} nodes to ensure the correct IP address is used for routing.
-
-## Value
-
-When you install {{prodname}} on a node, an IP address and subnet is automatically detected. {{prodname}} provides several ways to configure IP/subnet autodetection, and supports configuring specific IPs for:
-
-- Hosts with multiple external interfaces
-- Host interfaces with multiple IP addresses
-- [Changes to cross subnet packet encapsulation](../configuring/vxlan-ipip.mdx)
-- Changes to host IP address
-
-## Concepts
-
-### Autodetecting node IP address and subnet
-
-For internode routing, each {{prodname}} node must be configured with an IPv4 address and/or an IPv6 address. When you install {{prodname}} on a node, a node resource is automatically created using routing information that is detected from the host. For some deployments, you may want to update autodetection to ensure nodes get the correct IP address.
-
-**Sample default node resource after installation**
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: Node
-metadata:
- name: node-hostname
-spec:
- bgp:
- asNumber: 64512
- ipv4Address: 10.244.0.1/24
- ipv6Address: 2000:db8:85a3::8a2e:370:7335/120
- ipv4IPIPTunnelAddr: 192.168.0.1
-```
-
-### Autodetection methods
-
-By default, {{prodname}} uses the **first-found** method; the first valid IP address on the first interface (excluding local interfaces such as the docker bridge). However, you can change the default method to any of the following:
-
-- Address assigned to Kubernetes node (**kubernetes-internal-ip**)
-- Address used by the node to reach a particular IP or domain (**can-reach**)
-- Regex to include matching interfaces (**interface**)
-- Regex to exclude matching interfaces (**skip-interface**)
-- A list of IP ranges in CIDR format to determine valid IP addresses on the node to choose from (**cidrs**)
-
-For more details on autodetection methods, see [node configuration](../../reference/configure-calico-node.mdx) reference.
-
-## How to
-
-- [Change the autodetection method](#change-the-autodetection-method)
-- [Manually configure IP address and subnet for a node](#manually-configure-ip-address-and-subnet-for-a-node)
-
-### Change the autodetection method
-
-
-
-
-As noted previously, the default autodetection method is **first valid interface found** (first-found). To use a different autodetection method, edit the default [Installation](../../reference/installation/api.mdx#operator.tigera.io/v1.Installation) custom resource, specifying the method. Below are examples of the supported autodetection methods:
-
-:::note
-
-To configure the default autodetection method for IPv6 for any of the below methods, use the field `nodeAddressAutodetectionV6`.
-
-:::
-
-- **Kubernetes Node IP**
-
- {{prodname}} will select the first internal IP address listed in the Kubernetes node's `Status.Addresses` field.
-
- ```yaml
- kind: Installation
- apiVersion: operator.tigera.io/v1
- metadata:
- name: default
- spec:
- calicoNetwork:
- nodeAddressAutodetectionV4:
- kubernetes: NodeInternalIP
- ```
-
-- **Source address used to reach an IP or domain name**
-
- {{prodname}} will choose the IP address that is used to reach the given "can reach" IP address or domain. For example:
-
- ```yaml
- kind: Installation
- apiVersion: operator.tigera.io/v1
- metadata:
- name: default
- spec:
- calicoNetwork:
- nodeAddressAutodetectionV4:
- canReach: 8.8.8.8
- ```
-
-- **Including matching interfaces**
-
- {{prodname}} will choose an address on each node from an interface that matches the given [regex](https://pkg.go.dev/regexp).
- For example:
-
- ```yaml
- kind: Installation
- apiVersion: operator.tigera.io/v1
- metadata:
- name: default
- spec:
- calicoNetwork:
- nodeAddressAutodetectionV4:
- interface: eth.*
- ```
-
-- **Excluding matching interfaces**
-
- {{prodname}} will choose an address on each node from an interface that does not match the given [regex](https://pkg.go.dev/regexp).
- For example:
-
- ```yaml
- kind: Installation
- apiVersion: operator.tigera.io/v1
- metadata:
- name: default
- spec:
- calicoNetwork:
- nodeAddressAutodetectionV4:
- skipInterface: eth.*
- ```
-
-- **Including CIDRs**
-
- {{prodname}} will select any IP address from the node that falls within the given CIDRs. For example:
-
- ```yaml
- kind: Installation
- apiVersion: operator.tigera.io/v1
- metadata:
- name: default
- spec:
- calicoNetwork:
- nodeAddressAutodetectionV4:
- cidrs:
- - "192.168.200.0/24"
- ```
-
-
-
-
-As noted previously, the default autodetection method is **first valid interface found** (first-found). To use a different autodetection method, use the following `kubectl set env` command, specifying the method:
-
-- **IPv4**
-
- ```bash
- kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=
- ```
-
-- **IPv6**
-
- ```bash
- kubectl set env daemonset/calico-node -n kube-system IP6_AUTODETECTION_METHOD=
- ```
-
-Where autodetection methods are based on:
-
-- **Kubernetes Node IP**
-
- {{prodname}} will select the first internal IP address listed in the Kubernetes node's `Status.Addresses` field.
-
- ```bash
- kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=kubernetes-internal-ip
- ```
-
-- **Source address used to reach an IP or domain name**
-
- {{prodname}} will choose the IP address that is used to reach the given "can reach" IP address or domain. For example:
-
- ```bash
- kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=can-reach=www.google.com
- ```
-
-- **Including matching interfaces**
-
- {{prodname}} will choose an address on each node from an interface that matches the given [regex](https://pkg.go.dev/regexp).
- For example:
-
- ```bash
- kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=eth.*
- ```
-
-- **Excluding matching interfaces**
-
- {{prodname}} will choose an address on each node from an interface that does not match the given [regex](https://pkg.go.dev/regexp).
- For example:
-
- ```bash
- kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=skip-interface=eth.*
- ```
-
-- **Including CIDRs**
-
- {{prodname}} will select any IP address from the node that falls within the given CIDRs. For example:
-
- ```bash
- kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=cidr=192.168.200.0/24,172.15.0.0/24
- ```
-
-
-
-
-### Manually configure IP address and subnet for a node
-
-In the following scenarios, you may want to configure a specific IP and subnet:
-
-- Hosts with multiple external interfaces
-- Host interfaces with multiple IP addresses
-- Changes to cross subnet packet encapsulation
-- Changes to host IP address
-
-
-
-
-You can configure specific IP address and subnet for a node by disabling IP autodetection and then updating the [Node resource](../../reference/resources/node.mdx).
-
-#### Disable autodetection
-
-To disable autodetection method, update the proper `NodeAddressAutodetection` field in the Installation resource:
-
-```yaml
-apiVersion: operator.tigera.io/v1
-kind: Installation
-metadata:
- name: default
-spec:
- calicoNetwork:
- nodeAddressAutodetectionV4: {}
- nodeAddressAutodetectionV6: {}
-```
-
-#### Configure IP and subnet using node resource
-
-You can configure the IP address and subnet on a Node resource.
-
-Use `calicoctl patch` to update the current node configuration. For example:
-
-```bash
-calicoctl patch node kind-control-plane \
- --patch='{"spec":{"bgp": {"ipv4Address": "10.0.2.10/24", "ipv6Address": "fd80:24e2:f998:72d6::/120"}}}'
-```
-
-
-
-
-You can configure specific IP address and subnet for a node using environment variables or by updating the [Node resource](../../reference/resources/node.mdx). Because you can configure IP address and subnet using either environment variables or node resource, the following table describes how values are synchronized.
-
-| **If this environment variable...** | **Is...** | **Then...** |
-| ----------------------------------- | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
-| IP/IP6 | Explicitly set | The specified values are used, and the Node resource is updated. |
-| | Set to autodetect | The requested method is used (first-found, can-reach, interface, skip-interface, kubernetes-internal-ip), and the Node resource is updated. |
-| | Not set, but Node resource has IP/IP6 values | Node resource value is used. |
-| IP | Not set, and there is no IP value in Node resource | Autodetects an IPv4 address and subnet, and updates Node resource. |
-| IP6 | Not set, and there is a no IP6 value in Node resource | No IP6 routing is performed on the node. |
-
-#### Configure IP and subnet using environment variables
-
-To configure IP and subnet values using environment variables, use a `kubectl set env` command. For example:
-
-```bash
-kubectl set env daemonset/calico-node -n kube-system IP=10.0.2.10/24 IP6=fd80:24e2:f998:72d6::/120
-```
-
-:::note
-
-If the subnet is omitted, the defaults are: /32 (IPv4) and /128 (IPv6). We recommend that you include the subnet information for clarity when specifying IP addresses.
-
-:::
-
-#### Configure IP and subnet using node resource
-
-You can also configure the IP address and subnet on a Node resource.
-
-:::note
-
-When configuring the IP address on a Node resource, you may want to disable IP address options or environment variables on the node. IP options on the container take precedence, and will overwrite the values you configure on the node resource.
-
-:::
-
-Use `calicoctl patch` to update the current node configuration. For example:
-
-```bash
-calicoctl patch node kind-control-plane \
- --patch='{"spec":{"bgp": {"ipv4Address": "10.0.2.10/24", "ipv6Address": "fd80:24e2:f998:72d6::/120"}}}'
-```
-
-
-
-
-## Additional resources
-
-- For details on autodetection methods, see the [node configuration](../../reference/configure-calico-node.mdx) reference.
-- For calicoctl environment variables, see [Configuring {{nodecontainer}}](../../reference/configure-calico-node.mdx)
-- [Node resource](../../reference/resources/node.mdx)
-- [Reference documentation for calicoctl patch](../../reference/calicoctl/patch.mdx)
diff --git a/calico_versioned_docs/version-3.25/networking/ipam/ipv6-control-plane.mdx b/calico_versioned_docs/version-3.25/networking/ipam/ipv6-control-plane.mdx
deleted file mode 100644
index 4b0a6c0556..0000000000
--- a/calico_versioned_docs/version-3.25/networking/ipam/ipv6-control-plane.mdx
+++ /dev/null
@@ -1,27 +0,0 @@
----
-description: Configure the Kubernetes control plane to operate over IPv6 for dual stack or IPv6 only.
----
-
-# Configure Kubernetes control plane to operate over IPv6
-
-## Big picture
-
-If you have IPv6 connectivity between your nodes and workloads, you may also want to configure the Kubernetes control plane to operate over IPv6, instead of IPv4.
-
-## How to
-
-To configure Kubernetes components for IPv6 only, set the following flags.
-
-| Component | **Flag** | **Value/Content** |
-| --------------------------- | --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
-| **kube-apiserver** | `--bind-address` or `--insecure-bind-address` | Set to the appropriate IPv6 address or `::` for all IPv6 addresses on the host. |
-| | `--advertise-address` | Set to the IPv6 address that nodes should use to access the `kube-apiserver`. |
-| **kube-controller-manager** | `--master` | Set with the IPv6 address where the `kube-apiserver` can be accessed. |
-| **kube-scheduler** | `--master` | Set with the IPv6 address where the `kube-apiserver` can be accessed. |
-| **kubelet** | `--address` | Set to the appropriate IPv6 address or `::` for all IPv6 addresses. |
-| | `--cluster-dns` | Set to the IPv6 address that will be used for the service DNS; this must be in the range used for `--service-cluster-ip-range`. |
-| | `--node-ip` | Set to the IPv6 address of the node. |
-| **kube-proxy** | `--bind-address` | Set to the appropriate IPv6 address or `::` for all IPv6 addresses on the host. |
-| | `--master` | Set with the IPv6 address where the `kube-apiserver` can be accessed. |
-
-For dual stack settings, see [Enable IPv4/IPv6 dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#prerequisites).
diff --git a/calico_versioned_docs/version-3.25/networking/ipam/ipv6.mdx b/calico_versioned_docs/version-3.25/networking/ipam/ipv6.mdx
deleted file mode 100644
index 7311ce7b96..0000000000
--- a/calico_versioned_docs/version-3.25/networking/ipam/ipv6.mdx
+++ /dev/null
@@ -1,233 +0,0 @@
----
-description: Configure dual stack or IPv6 only for workloads.
----
-
-# Configure dual stack or IPv6 only
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Configure {{prodname}} IP address allocation to use dual stack or IPv6 only for workload communications.
-
-## Value
-
-Workload communication over IPv6 is increasingly desirable, as well as or instead of IPv4. {{prodname}} supports:
-
-- **IPv4 only** (default)
-
- Each workload gets an IPv4 address, and can communicate over IPv4.
-
-- **Dual stack**
-
- Each workload gets an IPv4 and an IPv6 address, and can communicate over IPv4 or IPv6.
-
-- **IPv6 only**
-
- Each workload gets an IPv6 address, and can communicate over IPv6.
-
-## Before you begin
-
-**{{prodname}} requirements**
-
-- {{prodname}} IPAM
-
-**Kubernetes version requirements**
-
-- For dual stack, 1.16 and later
-- For one IP stack at a time (IPv4 or IPv6), any Kubernetes version
-
-**Kubernetes IPv6 host requirements**
-
-- An IPv6 address that is reachable from the other hosts
-- The sysctl setting, `net.ipv6.conf.all.forwarding`, is set to `1`.
- This ensures both Kubernetes service traffic and {{prodname}} traffic is forwarded appropriately.
-- A default IPv6 route
-
-**Kubernetes IPv4 host requirements**
-
-- An IPv4 address that is reachable from the other hosts
-- The sysctl setting, `net.ipv4.conf.all.forwarding`, is set to `1`.
- This ensures both Kubernetes service traffic and {{prodname}} traffic is forwarded appropriately.
-- A default IPv4 route
-
-## How to
-
-:::note
-
-The following tasks are only for new clusters.
-
-:::
-
-- [Enable IPv6 only](#enable-ipv6-only)
-- [Enable dual stack](#enable-dual-stack)
-
-### Enable IPv6 only
-
-
-
-
-To configure an IPv6-only cluster using the operator, edit your default Installation at install time to include a single IPv6 pool, and no IPv4 pools. For example:
-
-```yaml
-apiVersion: operator.tigera.io/v1
-kind: Installation
-metadata:
- name: default
-spec:
- calicoNetwork:
- # Note: The ipPools section cannot be modified post-install.
- ipPools:
- - blockSize: 122
- cidr: 2001::00/64
- encapsulation: None
- natOutgoing: Enabled
- nodeSelector: all()
-```
-
-
-
-
-1. Set up a new Kubernetes cluster with an IPv6 pod CIDR and service IP range.
-
-1. Using the [{{prodname}} Kubernetes install guide](../../getting-started/kubernetes/self-managed-onprem/onpremises.mdx), download the correct {{prodname}} manifest for the cluster and datastore type.
-
-1. Edit the CNI config (calico-config ConfigMap in the manifest) to disable IPv4 assignments and enable IPv6 assignments.
-
- ```
- "ipam": {
- "type": "calico-ipam",
- "assign_ipv4": "false",
- "assign_ipv6": "true"
- },
- ```
-
-1. Configure IPv6 support by adding the following variable settings to the environment for the `calico-node` container:
-
- | Variable name | Value |
- | ------------------- | ------------ |
- | `IP6` | `autodetect` |
- | `FELIX_IPV6SUPPORT` | `true` |
-
- :::note
-
- If your IPv6 IP pools include private IP addresses, pods that are assigned private IP addresses cannot perform outbound NAT by default.
-
- To enable outbound NAT for these pods, add `CALICO_IPV6POOL_NAT_OUTGOING: true` to the environment of the `calico-node` container.
-
- :::
-
-1. For clusters **not** provisioned with kubeadm (see note below), configure the default IPv6 IP pool by adding the following variable setting to the environment for the `calico-node` container:
-
- | Variable name | Value |
- | ---------------------- | ------------------------------------------------------------------------------------------------------- |
- | `CALICO_IPV6POOL_CIDR` | the same as the IPv6 range you configured as the cluster CIDR to kube-controller-manager and kube-proxy |
-
- :::note
-
- For clusters provisioned with kubeadm, {{prodname}} autodetects the IPv4 and IPv6 pod CIDRs and does not require configuration.
-
- :::
-
-1. Apply the edited manifest with `kubectl apply -f`.
-
- New pods will get IPv6 addresses, and can communicate with each other and the outside world over IPv6.
-
-**(Optional) Update host to not look for IPv4 addresses**
-
-If you want your workloads to have IPv6 addresses only, because you do not have IPv4 addresses or connectivity
-between your nodes, complete these additional steps to tell {{prodname}} not to look for any IPv4 addresses.
-
-1. Disable [IP autodetection of IPv4](ip-autodetection.mdx) by setting `IP` to `none`.
-1. Calculate the {{prodname}} BGP router ID for IPv6 using either of the following methods.
- - Set the environment variable `CALICO_ROUTER_ID=hash` on {{nodecontainer}}.
- This configures {{prodname}} to calculate the router ID based on the hostname.
- - Pass a unique value for `CALICO_ROUTER_ID` to each node individually.
-
-
-
-
-### Enable dual stack
-
-1. Set up a new cluster following the Kubernetes [prerequisites](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#prerequisites) and [enablement steps](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#enable-ipv4-ipv6-dual-stack).
-
-
-
-
-To configure dual-stack cluster using the operator, edit your default Installation at install time to include both an IPv4 and IPv6 pool. For example:
-
-```yaml
-apiVersion: operator.tigera.io/v1
-kind: Installation
-metadata:
- name: default
-spec:
- # Configures Calico networking.
- calicoNetwork:
- # Note: The ipPools section cannot be modified post-install.
- ipPools:
- - blockSize: 26
- cidr: 10.48.0.0/21
- encapsulation: IPIP
- natOutgoing: Enabled
- nodeSelector: all()
- - blockSize: 122
- cidr: 2001::00/64
- encapsulation: None
- natOutgoing: Enabled
- nodeSelector: all()
-```
-
-
-
-
-1. Using the [{{prodname}} Kubernetes install guide](../../getting-started/kubernetes/self-managed-onprem/onpremises.mdx), download the correct {{prodname}} manifest for the cluster and datastore type.
-
-1. Edit the CNI config (`calico-config` ConfigMap in the manifest), and enable IPv4 and IPv6 address allocation by setting both fields to true.
-
- ```
- "ipam": {
- "type": "calico-ipam",
- "assign_ipv4": "true",
- "assign_ipv6": "true"
- },
- ```
-
-1. Configure IPv6 support by adding the following variable settings to the environment for the `calico-node` container:
-
- | Variable name | Value |
- | ------------------- | ------------ |
- | `IP6` | `autodetect` |
- | `FELIX_IPV6SUPPORT` | `true` |
-
- :::note
-
- If your IPv6 IP pools include private IP addresses, pods that are assigned private IP addresses cannot perform outbound NAT by default.
-
- To enable outbound NAT for these pods, add `CALICO_IPV6POOL_NAT_OUTGOING: true` to the environment of the `calico-node` container.
-
- :::
-
-1. For clusters **not** provisioned with kubeadm (see note below), configure the default IPv6 IP pool by adding the following variable setting to the environment for the `calico-node` container:
-
- | Variable name | Value |
- | ---------------------- | ------------------------------------------------------------------------------------------------------------ |
- | `CALICO_IPV6POOL_CIDR` | the same as the IPv6 range you configured as the IPv6 cluster CIDR to kube-controller-manager and kube-proxy |
-
- :::note
-
- For clusters provisioned with kubeadm, {{prodname}} autodetects the IPv4 and IPv6 pod CIDRs and does not require configuration.
-
- :::
-
-1. Apply the edited manifest with `kubectl apply -f`.
-
- New pods will get both IPv4 and IPv6 addresses, and can communicate with each other and the outside world over IPv4 or IPv6.
-
-
-
-
-## Additional resources
-
-- [Configure Kubernetes control plane to operate over IPv6](ipv6-control-plane.mdx)
diff --git a/calico_versioned_docs/version-3.25/networking/ipam/legacy-firewalls.mdx b/calico_versioned_docs/version-3.25/networking/ipam/legacy-firewalls.mdx
deleted file mode 100644
index ccd25c9dd7..0000000000
--- a/calico_versioned_docs/version-3.25/networking/ipam/legacy-firewalls.mdx
+++ /dev/null
@@ -1,66 +0,0 @@
----
-description: Restrict the IP address chosen for a pod to a specific range of IP addresses.
----
-
-# Restrict a pod to use an IP address in a specific range
-
-import DetermineIpam from '@site/calico_versioned_docs/version-3.25/_includes/content/_determine-ipam.mdx';
-
-## Big picture
-
-Restrict the IP address chosen for a pod to a specific range of IP addresses.
-
-## Value
-
-When Kubernetes pods interact with external systems that make decisions based on IP ranges (for example legacy firewalls), it can be useful to define several IP ranges and explicitly assign pods to those ranges. Using {{prodname}} IP Address Management (IPAM), you can restrict a pod to use an address from within a specific range.
-
-## Concepts
-
-### Kubernetes pod CIDR
-
-The **Kubernetes pod CIDR** is the range of IPs Kubernetes expects pod IPs to be assigned from. It is defined for the entire cluster and is used by various Kubernetes components to determine whether an IP belongs to a pod. For example, kube-proxy treats traffic differently if that traffic is from a pod than if it is not. All pod IPs must be in the CIDR range for Kubernetes to function correctly.
-
-### IP Pool
-
-**IP pools** are ranges of IP addresses from which {{prodname}} assigns pod IPs. By default, {{prodname}} creates an IP pool for the entire Kubernetes pod CIDR, but you can change this to break the pod CIDR up into several pools. You can control which pool {{prodname}} uses for each pod using node selectors, or annotations on the pod or the pod’s namespace.
-
-## Before you begin...
-
-The features in this How to guide require:
-
-- {{prodname}} IPAM
-
-
-
-Additionally, cluster administrators must have [configured IP pools](../../reference/resources/ippool.mdx) to define the valid IP ranges to use for allocating pod IP addresses.
-
-## How to
-
-### Restrict a pod to use an IP address range
-
-Annotate the pod with key `cni.projectcalico.org/ipv4pools` and/or `cni.projectcalico.org/ipv6pools` and value set to a list of IP pool names, enclosed in brackets. For example:
-
-```
-cni.projectcalico.org/ipv4pools: '["pool-1", "pool-2"]'
-```
-
-Note the use of the escaped \" for the inner double quotes around the pool names.
-
-### Restrict all pods within a namespace to use an IP address range
-
-Annotate the namespace with key `cni.projectcalico.org/ipv4pools` and/or `cni.projectcalico.org/ipv6pools` and value set to a list of IP pool names, enclosed in brackets. For example:
-
-```
-cni.projectcalico.org/ipv4pools: '["pool-1", "pool-2"]'
-
-```
-
-Note the use of the escaped `\"` for the inner double quotes around the pool names.
-
-If both the pod and the pod’s namespace have the annotation, the pod annotation takes precedence.
-
-The annotation must be present at the time the pod is created. Adding it to an existing pod has no effect.
-
-## Additional resources
-
-For help configuring {{prodname}} IPAM, see [Configuring the {{prodname}} CNI Plugins](../../reference/configure-cni-plugins.mdx).
diff --git a/calico_versioned_docs/version-3.25/networking/ipam/migrate-pools.mdx b/calico_versioned_docs/version-3.25/networking/ipam/migrate-pools.mdx
deleted file mode 100644
index 1b55341884..0000000000
--- a/calico_versioned_docs/version-3.25/networking/ipam/migrate-pools.mdx
+++ /dev/null
@@ -1,227 +0,0 @@
----
-description: Migrate pods from one IP pool to another on a running cluster without network disruption.
----
-
-# Migrate from one IP pool to another
-
-import DetermineIpam from '@site/calico_versioned_docs/version-3.25/_includes/content/_determine-ipam.mdx';
-
-## Big picture
-
-Migrate pods from one IP pool to another on a running cluster without network disruption.
-
-## Value
-
-Pods are assigned IP addresses from IP pools that you configure in {{prodname}}. As the number of pods increase, you may need to increase the number of addresses available for pods to use. Or, you may need to move pods from a CIDR that was used by mistake. {{prodname}} lets you migrate from one IP pool to another one on a running cluster without network disruption.
-
-## Concepts
-
-### IP pools and cluster CIDRs
-
-{{prodname}} supports using multiple disjoint IP pool CIDRs within the cluster. However, Kubernetes expects that all pods have addresses within the same cluster CIDR. This means that although it is technically feasible to create an IP pool outside of the cluster CIDR, we do not recommend it. Pods allocated addresses outside of the Kubernetes cluster CIDR will lose network connectivity.
-
-## Before you begin...
-
-**Verify that you are using {{prodname}} IPAM**.
-
-
-
-**Verify orchestrator support for changing the pod network CIDR**.
-
-Although Kubernetes supports changing the pod network CIDR, not all orchestrators do. Check your orchestrator documentation to verify.
-
-## How to
-
-### Migrate from one IP pool to another
-
-Follow these steps to migrate pods from one IP pool to another pool.
-
-:::note
-
-If you follow these steps, existing pod connectivity will not be affected. (If you delete the old IP pool before you create and verify the new pool, existing pods will be affected.) When pods are deleted, applications may be temporarily unavailable (depending on the type of application); plan accordingly.
-
-:::
-
-1. Add a new IP pool.
-
-:::note
-
-It is highly recommended that your Calico IP pools are within the Kubernetes cluster CIDR. If pods IPs are allocated
-from outside of the Kubernetes cluster CIDR, some traffic flows may have NAT applied unnecessarily causing unexpected behavior.
-
-:::
-
-1. Disable the old IP pool.
-
-:::note
-
-Disabling an IP pool only prevents new IP address allocations; it does not affect the networking of existing pods.
-
-:::
-
-1. Delete pods from the old IP pool. This includes any new pods that may have been created with the old IP pool prior to disabling the pool.
-
-1. Verify that new pods get an address from the new IP pool.
-
-1. Delete the old IP pool.
-
-## Tutorial
-
-In the following example, we created a Kubernetes cluster using **kubeadm**. But the IP pool CIDR we configured (192.168.0.0/16) doesn't match the
-Kubernetes cluster CIDR. Let's change the CIDR to **10.0.0.0/16**, which for the purposes of this example falls within the cluster CIDR.
-
-Let’s run `calicoctl get ippool -o wide` to see the IP pool, **default-ipv4-ippool**.
-
-```
-NAME CIDR NAT IPIPMODE VXLANMODE DISABLED
-default-ipv4-ippool 192.168.0.0/16 true Always Never false
-```
-
-When we run `calicoctl get wep --all-namespaces`, we see that a pod is created using the default range (192.168.52.130/32).
-
-```
-NAMESPACE WORKLOAD NODE NETWORKS INTERFACE
-kube-system coredns-6f4fd4bdf-8q7zp vagrant 192.168.52.130/32 cali800a63073ed
-```
-
-Let’s get started changing this pod to the new IP pool (10.0.0.0/16).
-
-### Step 1: Add a new IP pool
-
-We add a new **IPPool** with the CIDR range, **10.0.0.0/16**.
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: IPPool
-metadata:
- name: new-pool
-spec:
- cidr: 10.0.0.0/16
- ipipMode: Always
- natOutgoing: true
-```
-
-Let’s verify the new IP pool.
-
-```bash
-calicoctl get ippool -o wide
-
-```
-
-```
-NAME CIDR NAT IPIPMODE DISABLED
-default-ipv4-ippool 192.168.0.0/16 true Always false
-new-pool 10.0.0.0/16 true Always false
-```
-
-### Step 2: Disable the old IP pool
-
-List the existing IP pool definition.
-
-```bash
-calicoctl get ippool -o yaml > pools.yaml
-
-```
-
-```yaml
-apiVersion: projectcalico.org/v3
-items:
- - apiVersion: projectcalico.org/v3
- kind: IPPool
- metadata:
- name: default-ipv4-ippool
- spec:
- cidr: 192.0.0.0/16
- ipipMode: Always
- natOutgoing: true
- - apiVersion: projectcalico.org/v3
- kind: IPPool
- metadata:
- name: new-pool
- spec:
- cidr: 10.0.0.0/16
- ipipMode: Always
- natOutgoing: true
-```
-
-Edit pools.yaml.
-
-Disable this IP pool by setting: `disabled: true`
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: IPPool
-metadata:
- name: default-ipv4-ippool
-spec:
- cidr: 192.0.0.0/16
- ipipMode: Always
- natOutgoing: true
- disabled: true
-```
-
-Apply the changes.
-
-Remember, disabling a pool only affects new IP allocations; networking for existing pods is not affected.
-
-```bash
-calicoctl apply -f pools.yaml
-```
-
-Verify the changes.
-
-```bash
-calicoctl get ippool -o wide
-```
-
-```
-NAME CIDR NAT IPIPMODE DISABLED
-default-ipv4-ippool 192.168.0.0/16 true Always true
-new-pool 10.0.0.0/16 true Always false
-```
-
-### Step 3: Delete pods from the old IP pool
-
-Next, we delete all of the existing pods from the old IP pool. (In our example, **coredns** is our only pod; for multiple pods you would trigger a deletion for all pods in the cluster.)
-
-```bash
-kubectl delete pod -n kube-system coredns-6f4fd4bdf-8q7zp
-```
-
-### Step 4: Verify that new pods get an address from the new IP pool
-
-1. Create a test namespace and nginx pod.
-
- ```bash
- kubectl create ns ippool-test
- ```
-
-1. Create an nginx pod.
-
- ```bash
- kubectl -n ippool-test create deployment nginx --image nginx
- ```
-
-1. Verify that the new pod gets an IP address from the new range.
-
- ```bash
- kubectl -n ippool-test get pods -l app=nginx -o wide
- ```
-
-1. Clean up the ippool-test namespace.
-
- ```bash
- kubectl delete ns ippool-test
- ```
-
-### Step 5: Delete the old IP pool
-
-Now that you've verified that pods are getting IPs from the new range, you can safely delete the old pool.
-
-```bash
-calicoctl delete pool default-ipv4-ippool
-```
-
-## Additional resources
-
-- [IP pools reference](../../reference/resources/ippool.mdx)
diff --git a/calico_versioned_docs/version-3.25/networking/ipam/use-specific-ip.mdx b/calico_versioned_docs/version-3.25/networking/ipam/use-specific-ip.mdx
deleted file mode 100644
index a83d62b044..0000000000
--- a/calico_versioned_docs/version-3.25/networking/ipam/use-specific-ip.mdx
+++ /dev/null
@@ -1,65 +0,0 @@
----
-description: Specify the IP address for a pod instead of allowing Calico to automatically choose one.
----
-
-# Use a specific IP address with a pod
-
-import DetermineIpam from '@site/calico_versioned_docs/version-3.25/_includes/content/_determine-ipam.mdx';
-
-## Big picture
-
-Choose the IP address for a pod instead of allowing Calico to choose automatically.
-
-## Value
-
-Some applications require the use of stable IP addresses. Also, you may want to create entries in external DNS servers that point directly to pods, and this requires static IPs.
-
-## Concepts
-
-### Kubernetes pod CIDR
-
-The **Kubernetes pod CIDR** is the range of IPs Kubernetes expects pod IPs to be assigned from. It is defined for the entire cluster and is used by various Kubernetes components to determine whether an IP belongs to a pod. For example, kube-proxy treats traffic differently if an IP is from a pod than if it is not. All pod IPs must be in the CIDR range for Kubernetes to function correctly.
-
-**IP Pools**
-
-IP pools are ranges of IP addresses from which Calico assigns pod IPs. Static IPs must be in an IP pool.
-
-## Before you begin...
-
-Your cluster must be using Calico IPAM to use this feature.
-
-
-
-## How to
-
-Annotate the pod with cni.projectcalico.org/ipAddrs set to a list of IP addresses to assign, enclosed in brackets. For example:
-
-```
- "cni.projectcalico.org/ipAddrs": "[\"192.168.0.1\"]"
-```
-
-Note the use of the escaped `\"` for the inner double quotes around the addresses.
-
-The address must be within a configured Calico IP pool and not currently in use. The annotation must be present when the pod is created; adding it later has no effect.
-
-Note that currently only a single IP address is supported per-pod using this annotation.
-
-### Reserving IPs for manual assignments
-
-The `cni.projectcalico.org/ipAddrs` annotation requires the IP address to be within an IP pool. This means that,
-by default, {{prodname}} may decide to use the IP address that you select for another workload or for an internal
-tunnel address. To prevent this, there are several options:
-
-- To reserve a whole IPPool for manual allocations, you can set its [node selector](../../reference/resources/ippool.mdx) to `"!all()"`. Since the `!all()`
- cannot match any nodes, the IPPool will not be used for any automatic assignments.
-
-- To reserve part of a pool, you can create an [`IPReservation` resource](../../reference/resources/ipreservation.mdx). This allows for certain IPs to be reserved so
- that Calico IPAM will not use them automatically. However, manual assignments (using the annotation) can still use
- IPs that are "reserved".
-
-- To prevent {{prodname}} from using IPs from a certain pool for internal IPIP and/or VXLAN tunnel addresses, you
- can set the `allowedUses` field on the [IPPool](../../reference/resources/ippool.mdx) to `["Workload"]`.
-
-## Additional resources
-
-For help configuring Calico CNI and Calico IPAM, see [Configuring the Calico CNI Plugins](../../reference/configure-cni-plugins.mdx).
diff --git a/calico_versioned_docs/version-3.25/networking/openstack/configuration.mdx b/calico_versioned_docs/version-3.25/networking/openstack/configuration.mdx
deleted file mode 100644
index 2ede08d76f..0000000000
--- a/calico_versioned_docs/version-3.25/networking/openstack/configuration.mdx
+++ /dev/null
@@ -1,87 +0,0 @@
----
-description: Configure OpenStack components for Calico.
----
-
-# Configure systems for use with Calico
-
-When running {{prodname}} with OpenStack, you also need to configure various
-OpenStack components, as follows.
-
-## Nova (/etc/nova/nova.conf)
-
-{{prodname}} uses the Nova metadata service to provide metadata to VMs,
-without any proxying by Neutron. To make that work:
-
-- An instance of the Nova metadata API must run on every compute node.
-- `/etc/nova/nova.conf` must not set `service_neutron_metadata_proxy`
- or `service_metadata_proxy` to `True`. (The default `False` value is
- correct for a {{prodname}} cluster.)
-
-## Neutron server (/etc/neutron/neutron.conf)
-
-In `/etc/neutron/neutron.conf` you need the following settings to
-configure the Neutron service.
-
-| Setting | Value | Meaning |
-| -------------------- | --------- | ---------------------------------------- |
-| core_plugin | calico | Use the {{prodname}} core plugin |
-| -------------------- | --------- | ---------------------------------------- |
-
-{{prodname}} can operate either as a core plugin or as an ML2 mechanism driver. The
-function is the same both ways, except that floating IPs are only supported
-when operating as a core plugin; hence the recommended setting here.
-
-However, if you don't need floating IPs and have other reasons for using ML2,
-you can, instead, set
-
-| Setting | Value | Meaning |
-| -------------------- | -------------------------------------- | ---------------------- |
-| core_plugin | neutron.plugins.ml2.plugin.ML2Plugin | Use ML2 plugin |
-| -------------------- | -------------------------------------- | ---------------------- |
-
-and then the further ML2-specific configuration as covered below.
-
-The following options in the `[calico]` section of `/etc/neutron/neutron.conf` govern how
-the {{prodname}} plugin/driver and DHCP agent connect to the {{prodname}} etcd
-datastore. You should set `etcd_host` to the IP of your etcd server, and `etcd_port` if
-that server is using a non-standard port. If the etcd server is TLS-secured, also set:
-
-- `etcd_cert_file` to a client certificate, which must be signed by a Certificate
- Authority that the server trusts
-
-- `etcd_key_file` to the corresponding private key file
-
-- `etcd_ca_cert_file` to a file containing data for the Certificate Authorities that you
- trust to sign the etcd server's certificate.
-
-| Setting | Default Value | Meaning |
-| ----------------- | ------------- | ------------------------------------------------------------ |
-| etcd_host | 127.0.0.1 | The hostname or IP of the etcd server |
-| etcd_port | 2379 | The port to use for the etcd node/proxy |
-| etcd_key_file | | The path to the TLS key file to use with etcd |
-| etcd_cert_file | | The path to the TLS client certificate file to use with etcd |
-| etcd_ca_cert_file | | The path to the TLS CA certificate file to use with etcd |
-
-In a [multi-region deployment](multiple-regions.mdx),
-`[calico] openstack_region` configures the name of the region that the local compute or controller
-node belongs to.
-
-| Setting | Default Value | Meaning |
-| ------------------ | ------------- | ---------------------------------------------------------------------------- |
-| `openstack_region` | none | The name of the region that the local compute of controller node belongs to. |
-
-When specified, the value of `openstack_region` must be a string of lower case alphanumeric
-characters or '-', starting and ending with an alphanumeric character, and must match the value of
-[`OpenStackRegion`](../../reference/felix/configuration.mdx#openstack-specific-configuration)
-configured for the Felixes in the same region.
-
-## ML2 (.../ml2_conf.ini)
-
-In `/etc/neutron/plugins/ml2/ml2_conf.ini` you need the following
-settings to configure the ML2 plugin.
-
-| Setting | Value | Meaning |
-| -------------------- | ----------- | --------------------------------- |
-| mechanism_drivers | calico | Use {{prodname}} |
-| type_drivers | local, flat | Allow 'local' and 'flat' networks |
-| tenant_network_types | local, flat | Allow 'local' and 'flat' networks |
diff --git a/calico_versioned_docs/version-3.25/networking/openstack/connectivity.mdx b/calico_versioned_docs/version-3.25/networking/openstack/connectivity.mdx
deleted file mode 100644
index 6eae1bf1c9..0000000000
--- a/calico_versioned_docs/version-3.25/networking/openstack/connectivity.mdx
+++ /dev/null
@@ -1,255 +0,0 @@
----
-description: Configure OpenStack networking for Calico.
----
-
-# IP addressing and connectivity
-
-An OpenStack deployment is of limited use if its VMs cannot reach and be
-reached by the outside world. This document will explain how to
-configure your {{prodname}}-based OpenStack deployment to ensure that you have
-the desired connectivity with the outside world.
-
-## Major differences from standard OpenStack
-
-If you've deployed OpenStack before you'll be thinking in terms of
-routers, floating IPs, and external networks. {{prodname}}'s focus on
-simplicity means that it doesn't use any of these concepts. This section
-is mostly a warning: even if you think you know what you're doing,
-please read the rest of this article. You might be surprised!
-
-## Setting up connectivity
-
-### Part 0: Deciding your address ranges
-
-For {{prodname}}, it's best to pick up to three address ranges you're going to
-use from the following three options. If it's possible, use all three.
-
-The first option is an IPv6 address range, assuming you want your VMs to
-have IPv6 connectivity. Note that you can only use this range if your
-data center network can route IPv6 traffic. All IPv6 addresses should be
-considered 'externally reachable', so this needs to be a range that will
-be routed to your gateway router: ideally globally scoped.
-
-The second option is a 'private' IPv4 range, assuming you want your VMs
-to have IPv4 connectivity. This is the most likely range for you to
-configure. This range will contain all VMs that cannot be reached by
-traffic that originates from outside the data center.
-
-The third option is a 'public' IPv4 range, assuming you want your VMs to
-have IPv4 connectivity. This range will contain all the VMs that want to
-be reachable by traffic that originates from outside the data center.
-Make sure that traffic destined for this range from outside the data
-center will be routed to your gateway, or nothing will work!
-
-The minimum requirement is one of those address ranges.
-
-### Part 1: Configuring the fabric
-
-Your {{prodname}} deployment will require a gateway router. In most
-non-trivial cases this will be a heavy-duty router, but if you're
-deploying a smaller network (maybe for testing purposes) and don't have
-access to one you can use a Linux server in the role.
-
-The gateway router needs to be on the default route for all of your
-compute hosts. This is to ensure that all traffic destined to leave the
-data center goes via the gateway. That means that in a flat L3 topology
-the gateway router needs to be set as the next hop. In a more complex
-setup such as a multi-tier L3 topology the next hop may need to be
-slightly shorter, for example to a top-of-rack router, which will in
-turn need to route towards the gateway router.
-
-Then, the gateway router needs to be a BGP peer of the {{prodname}} network.
-This could be a peer of one or more route reflectors, or in smaller
-topologies directly peering with the compute hosts. This is to ensure it
-knows the routes to all the VMs, so that it knows which way to route
-traffic destined for them. Instructions for configuring your gateway
-(and potentially BGP route reflectors) are beyond the scope of this
-document. If you don't know how to do this or want to know how {{prodname}}
-fits into your existing deployment, please get in touch on our mailing
-list: it is difficult to add a generic solution to this problem to this
-article.
-
-If your gateway uses eBGP to advertise routes externally, you'll need to
-configure the BGP policy on the gateway to ensure that it does not
-export routes to the private IPv4 address range you configured above.
-Otherwise, in smaller deployments, you just need to make sure that
-external traffic destined for your VMs will get routed to the gateway.
-How you do this is outside the scope of this document: please ask for
-assistance on our mailing list.
-
-Finally, configure your gateway to do stateful PNAT for any traffic
-coming from the IPv4 internal range. This ensures that even VMs that
-cannot be directly reached from the external network can still contact
-servers themselves, to do things like request software updates.
-Again, the actual manner in which this is configured depends on your
-router.
-
-### Part 2: Set up OpenStack
-
-In OpenStack, you want to set up two shared Neutron networks. For the
-first, add one IPv4 subnet containing the 'external' IPv4 range. Make
-sure the subnet has a gateway IP, and that DHCP is enabled.
-Additionally, add one IPv6 subnet containing half your IPv6 range, again
-with a gateway IP and DHCP enabled. Make sure this network has a name
-that makes it clear that it's for your 'externally accessible' VMs.
-Maybe even mark it an 'external' network, though that has no effect on
-what {{prodname}} does.
-
-For the second network, add one IPv4 subnet containing the 'private'
-IPv4 range and one IPv6 subnet containing the other half of your IPv6
-range, both with gateway IPs and DHCP enabled. Make sure this network
-has a name that makes it clear that it's for your 'private' VMs. Note
-that if you give this network part of your IPv6 range these VMs will all
-be reachable over IPv6. It is expected that all users will want to
-deploy in this way, but if you don't, either don't give these VMs IPv6
-addresses or give them private ones that are not advertised by your
-gateway.
-
-Then, configure the default network, subnet, router and floating IP
-quota for all tenants to be 0 to prevent them from creating more
-networks and confusing themselves!
-
-A sample configuration is below, showing the networks and two of the
-four subnets (as they differ only in their address ranges, all other
-configuration is the same).
-
-From the controller, issue the following Neutron CLI command.
-
-```bash
-neutron net-list
-```
-
-It returns a list of the networks.
-
-```
-+--------------------------------------+----------+----------------------------------------------------------+
-| id | name | subnets |
-+--------------------------------------+----------+----------------------------------------------------------+
-| 8d5dec25-a6aa-4e18-8706-a51637a428c2 | external | 54db559c-5e1d-4bdc-83b0-c479ef2a0ead 172.18.208.0/24 |
-| | | cf6ceea0-dde0-4018-ab9a-f8f68935622b 2001:db8:a41:2::/64 |
-| fa52b704-7b3c-4c83-8698-244807352711 | internal | 301b3e63-5324-4d62-8e22-ed8dddd50689 10.65.0.0/16 |
-| | | bf94ccb1-c57c-4c9a-a873-c20cbfa4ecaf 2001:db8:a41:3::/64 |
-+--------------------------------------+----------+----------------------------------------------------------+
-```
-
-Next, check the details of the `external` network.
-
-```bash
-neutron net-show external
-```
-
-It should return something like the following.
-
-```
-+---------------------------+--------------------------------------+
-| Field | Value |
-+---------------------------+--------------------------------------+
-| admin_state_up | True |
-| id | 8d5dec25-a6aa-4e18-8706-a51637a428c2 |
-| name | external |
-| provider:network_type | local |
-| provider:physical_network | |
-| provider:segmentation_id | |
-| router:external | True |
-| shared | True |
-| status | ACTIVE |
-| subnets | 54db559c-5e1d-4bdc-83b0-c479ef2a0ead |
-| | cf6ceea0-dde0-4018-ab9a-f8f68935622b |
-| tenant_id | ed34337f935745bb911eeb741bc4374b |
-+---------------------------+--------------------------------------+
-```
-
-Check the details of the `internal` network.
-
-```bash
-neutron net-show internal
-```
-
-It should return something like the following.
-
-```
-+---------------------------+--------------------------------------+
-| Field | Value |
-+---------------------------+--------------------------------------+
-| admin_state_up | True |
-| id | fa52b704-7b3c-4c83-8698-244807352711 |
-| name | internal |
-| provider:network_type | local |
-| provider:physical_network | |
-| provider:segmentation_id | |
-| router:external | False |
-| shared | True |
-| status | ACTIVE |
-| subnets | 301b3e63-5324-4d62-8e22-ed8dddd50689 |
-| | bf94ccb1-c57c-4c9a-a873-c20cbfa4ecaf |
-| tenant_id | ed34337f935745bb911eeb741bc4374b |
-+---------------------------+--------------------------------------+
-```
-
-Check the `external4` subnet.
-
-```bash
-neutron subnet-show external4
-```
-
-It should return something like the following.
-
-```
-+------------------+----------------------------------------------------+
-| Field | Value |
-+------------------+----------------------------------------------------+
-| allocation_pools | {"start": "172.18.208.2", "end": "172.18.208.255"} |
-| cidr | 172.18.208.0/24 |
-| dns_nameservers | |
-| enable_dhcp | True |
-| gateway_ip | 172.18.208.1 |
-| host_routes | |
-| id | 54db559c-5e1d-4bdc-83b0-c479ef2a0ead |
-| ip_version | 4 |
-| name | external4 |
-| network_id | 8d5dec25-a6aa-4e18-8706-a51637a428c2 |
-| tenant_id | ed34337f935745bb911eeb741bc4374b |
-+------------------+----------------------------------------------------+
-```
-
-Check the `external6` subnet.
-
-```bash
-neutron subnet-show external6
-```
-
-It should return something like the following.
-
-```
-+------------------+-----------------------------------------------------------------------------+
-| Field | Value |
-+------------------+-----------------------------------------------------------------------------+
-| allocation_pools | {"start": "2001:db8:a41:2::2", "end": "2001:db8:a41:2:ffff:ffff:ffff:fffe"} |
-| cidr | 2001:db8:a41:2::/64 |
-| dns_nameservers | |
-| enable_dhcp | True |
-| gateway_ip | 2001:db8:a41:2::1 |
-| host_routes | |
-| id | cf6ceea0-dde0-4018-ab9a-f8f68935622b |
-| ip_version | 6 |
-| name | external6 |
-| network_id | 8d5dec25-a6aa-4e18-8706-a51637a428c2 |
-| tenant_id | ed34337f935745bb911eeb741bc4374b |
-+------------------+-----------------------------------------------------------------------------+
-```
-
-## Part 3: Start using your networks
-
-At this stage, all configuration is done! When you spin up a new VM, you
-have to decide if you want it to be contactable from outside the data
-center. If you do, give it a network interface on the `external`
-network: otherwise, give it one on the `internal` network. Obviously, a
-machine that originally wasn't going to be reachable can be made
-reachable by plugging a new interface into it on the `external` network.
-
-Right now we don't support address mobility, so an address is tied to a
-single port until that port is no longer in use. We plan to address this
-in the future.
-
-The next step in configuring your OpenStack deployment is to configure
-security. We'll have a document addressing this shortly.
diff --git a/calico_versioned_docs/version-3.25/networking/openstack/dev-machine-setup.mdx b/calico_versioned_docs/version-3.25/networking/openstack/dev-machine-setup.mdx
deleted file mode 100644
index 9b7882ce1e..0000000000
--- a/calico_versioned_docs/version-3.25/networking/openstack/dev-machine-setup.mdx
+++ /dev/null
@@ -1,133 +0,0 @@
----
-description: Configure Calico networking for OpenStack VMs.
----
-
-# Set up a development machine
-
-In this example, a user wants to spin up a machine to use as a Linux
-development environment. This user has a straightforward use-case: they
-want a GUI and SSH access, but relatively little else.
-
-This user is provisioned with a single OpenStack user and single
-OpenStack tenant. Neutron will automatically provision them with a
-single security group, `default`, that contains the following rules:
-
-- allow all inbound traffic from machines in the `default` security
- group
-- allow all outbound traffic to anywhere
-
-Per the instructions in [IP addressing and connectivity](connectivity.mdx), this user cannot create
-Neutron networks or subnets, but they do have access to the networks
-created by the administrator: `external` and `internal`.
-
-Because the user wants to be able to reach the machine from their own
-laptop, they need the machine to be reachable from outside the data
-center. In vanilla Neutron, this would mean provisioning it with a
-floating IP, but in {{prodname}} they instead want to make sure the VM is
-attached to the `external` network. To add themselves to this network,
-the user needs to find out the UUID for it.
-
-```bash
-neutron net-list
-```
-
-This should return something like the following.
-
-```
-+--------------------------------------+----------+----------------------------------------------------------+
-| id | name | subnets |
-+--------------------------------------+----------+----------------------------------------------------------+
-| 8d5dec25-a6aa-4e18-8706-a51637a428c2 | external | 54db559c-5e1d-4bdc-83b0-c479ef2a0ead 172.18.208.0/24 |
-| | | cf6ceea0-dde0-4018-ab9a-f8f68935622b 2001:db8:a41:2::/64 |
-| fa52b704-7b3c-4c83-8698-244807352711 | internal | 301b3e63-5324-4d62-8e22-ed8dddd50689 10.65.0.0/16 |
-| | | bf94ccb1-c57c-4c9a-a873-c20cbfa4ecaf 2001:db8:a41:3::/64 |
-+--------------------------------------+----------+----------------------------------------------------------+
-```
-
-In the example above, the `external` network has the UUID
-`8d5dec25-a6aa-4e18-8706-a51637a428c2`. Thus, the machine can be created
-with the following `nova boot` command.
-
-```bash
-nova boot --flavor m1.medium \
- --image debian-wheezy-amd64 \
- --security-groups default \
- --nic "netid=8d5dec25-a6aa-4e18-8706-a51637a428c2" \
- development-server
-```
-
-This places the VM with a single NIC in the `external` network. The VM
-starts to boot, and Neutron allocates it an IP address in the `external`
-network: in this case, both an IPv4 and IPv6 address, as you can see
-below:
-
-```
-+--------------------------------------+-----------------------------------------------------------+
-| Property | Value |
-+--------------------------------------+-----------------------------------------------------------+
-| external network | 2001:db8:a41:2::1c, 172.18.208.85 |
-| flavor | m1.medium (3) |
-| hostId | b80247c27400fc9048ca569c8635f00801654bf676a00d8f08887215 |
-| id | e36f4e62-0efa-4188-87b8-8c96dc6e6028 |
-| name | development-server |
-| security_groups | default |
-+--------------------------------------+-----------------------------------------------------------+
-```
-
-While the machine boots, the security group can be configured. It needs
-four extra rules: one for SSH and three for VNC. In this example,
-developer's personal machine has the IPv4 address 191.64.52.12, and
-that's the only machine they'd like to be able to access their machine.
-For that reason, they add the four security group rules as follows.
-
-To add the SSH ingress rule:
-
-```bash
-neutron security-group-rule-create --protocol tcp \
- --port-range-min 22 \
- --port-range-max 22 \
- --direction ingress \
- --remote-ip-prefix 191.64.52.12/32 \
- --ethertype IPv4 \
- default
-```
-
-To add the first VNC rule:
-
-```bash
-neutron security-group-rule-create --protocol tcp \
- --port-range-min 5800 \
- --port-range-max 5801 \
- --direction ingress \
- --remote-ip-prefix 191.64.52.12/32 \
- --ethertype IPv4 \
- default
-```
-
-To add the second VNC rule:
-
-```bash
-neutron security-group-rule-create --protocol tcp \
- --port-range-min 5900 \
- --port-range-max 5901 \
- --direction ingress \
- --remote-ip-prefix 191.64.52.12/32 \
- --ethertype IPv4 \
- default
-```
-
-To add the third VNC rule:
-
-```bash
-neutron security-group-rule-create --protocol tcp \
- --port-range-min 6000 \
- --port-range-max 6001 \
- --direction ingress \
- --remote-ip-prefix 191.64.52.12/32 \
- --ethertype IPv4 \
- default
-```
-
-At this stage, the developer's machine is up and running. It can be
-reached on its public IP (172.18.208.85), and the developer confirms
-this by SSHing into their box. They're now ready to go.
diff --git a/calico_versioned_docs/version-3.25/networking/openstack/floating-ips.mdx b/calico_versioned_docs/version-3.25/networking/openstack/floating-ips.mdx
deleted file mode 100644
index 468bda0bfc..0000000000
--- a/calico_versioned_docs/version-3.25/networking/openstack/floating-ips.mdx
+++ /dev/null
@@ -1,129 +0,0 @@
----
-description: Configure floating IPs in Calico for OpenStack.
----
-
-# Floating IPs
-
-networking-calico includes beta support for floating IPs. Currently this
-requires running {{prodname}} as a Neutron core plugin (i.e. `core_plugin = calico`) instead of as an ML2 mechanism driver.
-
-:::note
-
-We would like it to work as an ML2 mechanism driver too—patches
-and/or advice welcome!
-
-:::
-
-To set up a floating IP, you need the same pattern of Neutron data model
-objects as you do for Neutron in general, which means:
-
-- a tenant network, with an instance attached to it, that will be the target of
- the floating IP
-
-- a Neutron router, with the tenant network connected to it
-
-- a provider network with `router:external True` that is set as the
- router's gateway (e.g. with `neutron router-gateway-set`), and with a
- subnet with a CIDR that floating IPs will be allocated from
-
-- a floating IP, allocated from the provider network subnet, that maps onto the
- instance attached to the tenant network.
-
-For example:
-
-1. Create tenant network and subnet.
-
- ```bash
- neutron net-create --shared calico
- neutron subnet-create --gateway 10.65.0.1 --enable-dhcp --ip-version 4 --name calico-v4 calico 10.65.0.0/24
- ```
-
-1. Boot a VM on that network.
-
- ```bash
- nova boot [...]
- ```
-
-1. Find its Neutron port ID.
-
- ```bash
- neutron port-list
- ```
-
-1. Create an external network and subnet; this is where floating
- IPs will be allocated from.
-
- ```bash
- neutron net-create public --router:external True
- neutron subnet-create public 172.16.1.0/24
- ```
-
-1. Create a router connecting the tenant and external networks.
-
- ```bash
- neutron router-create router1
- neutron router-interface-add router1
- neutron router-gateway-set router1 public
- ```
-
-1. Create a floating IP and associate it with the target VM.
-
- ```bash
- neutron floatingip-create public
- neutron floatingip-associate
- ```
-
- Then the {{prodname}} agents will arrange that the floating IP is routed to the
- instance's compute host, and then DNAT'd to the instance's fixed IP address.
-
-1. From a compute node, issue the following command.
-
- ```bash
- ip r
- ```
-
- It should return the routing table.
-
- ```
- default via 10.240.0.1 dev eth0
- 10.65.0.13 dev tap9a7e0868-da scope link
- 10.65.0.14 via 192.168.8.4 dev l2tpeth8-3 proto bird
- 10.65.0.23 via 192.168.8.4 dev l2tpeth8-3 proto bird
- 10.240.0.1 dev eth0 scope link
- 172.16.1.3 dev tap9a7e0868-da scope link
- 192.168.8.0/24 dev l2tpeth8-3 proto kernel scope link src 192.168.8.3
- 192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1
- ```
-
-1. Issue the following command to review iptables.
-
- ```bash
- sudo iptables -L -n -v -t nat
- ```
-
- It should return something like the following.
-
- ```
- [...]
- Chain felix-FIP-DNAT (2 references)
- pkts bytes target prot opt in out source destination
- 0 0 DNAT all -- * * 0.0.0.0/0 172.16.1.3 to:10.65.0.13
-
- Chain felix-FIP-SNAT (1 references)
- pkts bytes target prot opt in out source destination
- 0 0 SNAT all -- * * 10.65.0.13 10.65.0.13 to:172.16.1.3
-
- Chain felix-OUTPUT (1 references)
- pkts bytes target prot opt in out source destination
- 1 60 felix-FIP-DNAT all -- * * 0.0.0.0/0 0.0.0.0/0
-
- Chain felix-POSTROUTING (1 references)
- pkts bytes target prot opt in out source destination
- 1 60 felix-FIP-SNAT all -- * * 0.0.0.0/0 0.0.0.0/0
-
- Chain felix-PREROUTING (1 references)
- pkts bytes target prot opt in out source destination
- 0 0 felix-FIP-DNAT all -- * * 0.0.0.0/0 0.0.0.0/0
- 0 0 DNAT tcp -- * * 0.0.0.0/0 169.254.169.254 tcp dpt:80 to:127.0.0.1:8775
- [...]
- ```
diff --git a/calico_versioned_docs/version-3.25/networking/openstack/host-routes.mdx b/calico_versioned_docs/version-3.25/networking/openstack/host-routes.mdx
deleted file mode 100644
index 233b2a0009..0000000000
--- a/calico_versioned_docs/version-3.25/networking/openstack/host-routes.mdx
+++ /dev/null
@@ -1,85 +0,0 @@
----
-description: Options for host routing with Calico.
----
-
-# Host routes
-
-Neutron allows "host routes" to be configured on a subnet, with each host route
-comprising
-
-- an IP destination prefix
-- a next hop IP for routing to that prefix.
-
-When an instance is launched and gets an IP from that subnet, Neutron arranges,
-via DHCP, that the instance's routing table gets those routes.
-
-## With {{prodname}}, a host route's next hop IP should be the local host
-
-networking-calico supports host routes, but it's important to note that a host
-route is only consistent with {{prodname}} when its next hop IP represents the local
-hypervisor. This is because the local hypervisor, in a {{prodname}} setup, _always_
-routes all data from an instance and so is always the next hop IP for data to
-any destination. If the instance's routing table has a route with some other
-next hop IP, that next hop IP address will effectively be ignored, and the data
-will likely _not_ pass through the implied router; instead the data will go
-first to the hypervisor, and then the hypervisor's routing table will determine
-its next IP hop from there.
-
-Specifically, each host route's next hop IP should be the gateway IP of the
-subnet that the desired instance NIC is attached to, and from which it got its
-IP address - where 'desired instance NIC' means the one that you want data for
-that host route to go through. In networking-calico's usage, subnet gateway
-IPs represent the local hypervisor, because data sent by an instance is always
-routed there.
-
-:::note
-
-networking-calico avoids unnecessary IP usage by using the subnet
-gateway IP to represent the local compute host, on every compute host where
-that subnet is being used. Although that might initially sound odd, it works
-because no data is ever sent to or from the gateway IP address; the gateway
-IP is only used as the next hop address for the first IP hop from an instance
-to its compute host, and then the compute host routes the data again,
-according to its routing table, to wherever it needs to go. This also means
-that the gateway IP address really is functioning as each instance's default
-gateway, in the generally understood sense.
-
-:::
-
-## When are host routes useful with {{prodname}}?
-
-Host routes are useful with {{prodname}} when an instance has multiple NICs and you
-want to specify which NIC should be used for data to particular prefixes.
-
-When an instance has multiple NICs, it should have a default route through only
-one of those NICs, and use non-default routes to direct appropriate traffic
-through the other NICs. Neutron host routes can be used to establish those
-non-default routes; alternatively they can also be programmed manually in the
-instance.
-
-For example, suppose an instance has eth0 attached to a subnet with gateway
-10.65.0.1, eth1 attached to a subnet with gateway 11.8.0.1, and a default route
-via eth0. Then a host route like
-
-```bash
-11.11.0.0/16,11.8.0.1
-```
-
-can be configured for the subnet, to say that data to 11.11.0.0/16 should go
-out through eth1. The instance's routing table will then be:
-
-```bash
-default via 10.65.0.1 dev eth0
-10.65.0.0/24 dev eth0
-11.8.0.0/24 dev eth1
-11.11.0.0/16 via 11.8.0.1 dev eth1
-```
-
-When an instance only has a single network attachment, and so a single NIC,
-host routes cannot make any difference to how data is routed, so it is
-unhelpful (although also harmless) to configure them. Regardless of what the
-instance's routing table says, data must exit over the single NIC, and is
-always layer-2-terminated and rerouted by the host according to the host's
-routing table. It's required for the host's routing table to cover whatever
-destinations instances may want to send to, and host routes don't add anything
-to that.
diff --git a/calico_versioned_docs/version-3.25/networking/openstack/index.mdx b/calico_versioned_docs/version-3.25/networking/openstack/index.mdx
deleted file mode 100644
index 372f82ee43..0000000000
--- a/calico_versioned_docs/version-3.25/networking/openstack/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Configure Calico networking in an OpenStack deployment.
-hide_table_of_contents: true
----
-
-# Calico networking for OpenStack
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/networking/openstack/ipv6.mdx b/calico_versioned_docs/version-3.25/networking/openstack/ipv6.mdx
deleted file mode 100644
index be335c5f1d..0000000000
--- a/calico_versioned_docs/version-3.25/networking/openstack/ipv6.mdx
+++ /dev/null
@@ -1,56 +0,0 @@
----
-description: Prepare a VM guest OS for IPv6.
----
-
-# Prepare a VM guest OS for IPv6
-
-## Big picture
-
-Prepare a VM guest OS for IPv6.
-
-## How to
-
-OpenStack (not {{prodname}}) controls whether a VM gets IPv4, IPv6, or both addresses. Calico simply honors the addresses that OpenStack specifies. The following extra steps are required for **IPv6 only** and **dual stack** deployments -- so the guest OS can learn its IPv6 address (if assigned by OpenStack).
-
-1. Verify that the guest VM image meets these requirements for IPv6 connectivity.
-
- - When booting up, the VM must issue a DHCPv6 request for each of its interfaces, so that it can learn the IPv6 addresses that OpenStack allocates for it. If the VM uses the widely-deployed **DHCP client from ISC**, it must have a fix/workaround for [this known issue](https://kb.isc.org/docs/aa-01141).
- - The VM must be configured to accept router advertisements.
-
- Although not all common cloud images meet these requirements yet, it is easy to remedy by launching an image, making appropriate changes to its configuration files, taking a snapshot, and then using the snapshot thereafter instead of the original image.
-
- For example, starting from an **Ubuntu cloud image**, the following changes meet the requirements listed.
-
- - In `/etc/network/interfaces.d/eth0.cfg`, add:
- ```bash
- iface eth0 inet6 dhcp
- accept_ra 1
- ```
-
- - In `/sbin/dhclient-script`, add at the start of the script:
- ```bash
- `new_ip6_prefixlen=128`
- ```
-
- - In `/etc/sysctl.d`, create a file named `30-eth0-rs-delay.conf` with
- contents:
- ```bash
- `net.ipv6.conf.eth0.router_solicitation_delay = 10`
- ```
-
- For **CentOS**, these additions to a cloud-init script have been reported to be effective:
-
- runcmd:
-
- - `sed -i -e '$a'"IPV6INIT=yes" /etc/sysconfig/network-scripts/ifcfg-eth0`
- - `sed -i -e '$a'"DHCPV6C=yes" /etc/sysconfig/network-scripts/ifcfg-eth0`
- - `sed -i '/PATH/i\new_ip6_prefixlen=128' /sbin/dhclient-script`
- - `systemctl restart network`
-
-1. Configure IPv6 support in {{prodname}} by defining an IPv6 subnet in each Neutron network with:
-
- - The IPv6 address range that you want your VMs to use
- - DHCP enabled
- - IPv6 address mode set to DHCPv6 stateful
-
- We suggest that you initially configure both IPv4 and IPv6 subnets in each network. This allows handling VM images that support only IPv4 alongside those that support both IPv4 and IPv6, and allows a VM to be accessed over IPv4 in case this is needed to troubleshoot any issues with its IPv6 configuration. In principle, though, we are not aware of any problems with configuring and using IPv6-only networks in OpenStack.
diff --git a/calico_versioned_docs/version-3.25/networking/openstack/kuryr.mdx b/calico_versioned_docs/version-3.25/networking/openstack/kuryr.mdx
deleted file mode 100644
index da12609925..0000000000
--- a/calico_versioned_docs/version-3.25/networking/openstack/kuryr.mdx
+++ /dev/null
@@ -1,40 +0,0 @@
----
-description: Use Kuryr with Calico networking.
----
-
-# Kuryr
-
-networking-calico works with Kuryr; this means using Neutron, with the {{prodname}}
-ML2 driver, to provide networking for container workloads.
-
-You can use DevStack to install a single node {{prodname}}/Kuryr system, with a
-`local.conf` file like this:
-
-```bash
-[[local|localrc]]
-ADMIN_PASSWORD=015133ea2bdc46ed434c
-DATABASE_PASSWORD=d0060b07d3f3631ece78
-RABBIT_PASSWORD=6366743536a8216bde26
-SERVICE_PASSWORD=91eb72bcafb4ddf246ab
-SERVICE_TOKEN=c5680feca5e2c9c8f820
-
-enable_plugin networking-calico git://git.openstack.org/openstack/networking-calico
-enable_plugin kuryr git://git.openstack.org/openstack/kuryr
-enable_service kuryr
-enable_service etcd-server
-enable_service docker-engine
-
-LOGFILE=stack.log
-LOG_COLOR=False
-```
-
-Please follow general Kuryr instructions for creating a Docker network that
-uses Kuryr as its backend, and for launching containers on that network. Then
-if you look at the IP routing table and iptables, you will see {{prodname}} routes to
-the containers.
-
-## {{prodname}} for containers without Kuryr
-
-The {{prodname}} project also provides networking for containers more directly,
-without Neutron and Kuryr as intermediaries. Please see [Getting Started](../../getting-started/index.mdx)
- for details.
diff --git a/calico_versioned_docs/version-3.25/networking/openstack/labels.mdx b/calico_versioned_docs/version-3.25/networking/openstack/labels.mdx
deleted file mode 100644
index 5ee51ed09b..0000000000
--- a/calico_versioned_docs/version-3.25/networking/openstack/labels.mdx
+++ /dev/null
@@ -1,141 +0,0 @@
----
-description: Use Calico labels to define policy for OpenStack VMs.
----
-
-# Endpoint labels and operator policy
-
-When {{prodname}} represents an OpenStack VM as a {{prodname}} WorkloadEndpoint,
-it puts labels on the WorkloadEndpoint to identify the project, security groups and
-namespace that the VM belongs to. The deployment operator can use these labels to
-configure {{prodname}} policy that is additional to the policy defined by OpenStack
-security groups, and that cannot be overridden by user-level security group config.
-
-## VM endpoint labels
-
-For the VM's OpenStack project (previously known as 'tenant'), those labels are:
-
-| Label Name | Value |
-| ------------------------------------------------- | -------------------------------- |
-| `projectcalico.org/openstack-project-id` | `` |
-| `projectcalico.org/openstack-project-name` | `` |
-| `projectcalico.org/openstack-project-parent-id` | `` |
-| ------------------------------------------------- | -------------------------------- |
-
-For each security group that the VM belongs to, those labels are:
-
-| Label Name | Value |
-| ------------------------------------------------------------- | ------------------------- |
-| `sg.projectcalico.org/openstack-` | `` |
-| `sg-name.projectcalico.org/openstack-` | `` |
-| ------------------------------------------------------------- | ------------------------- |
-
-For the VM's {{prodname}} namespace, the label is:
-
-| Label Name | Value |
-| --------------------------------- | ---------------------- |
-| `projectcalico.org/namespace` | `` |
-| --------------------------------- | ---------------------- |
-
-When `[calico] openstack_region` has been configured in `/etc/neutron/neutron.conf` (as
-recommended for [multiple region deployments](multiple-regions.mdx)) the namespace will be
-"openstack-region-" followed by the configured region name. Otherwise it is simply
-"openstack".
-
-:::note
-
-To allow {{prodname}} to provide the project name and parent ID labels,
-you must give Neutron the 'admin' role within your cluster:
-
-```
-openstack role add --project service --user neutron admin
-```
-
-or some equivalent privilege that allows the Neutron server to do admin-level queries of
-the Keystone database. This is because {{prodname}}'s driver runs as part of the
-Neutron server, and needs to query the Keystone database for the information for those
-labels. If Neutron isn't sufficiently privileged, {{prodname}} will fall back to
-not generating those labels.
-
-:::
-
-:::note
-
-{{prodname}} only allows certain characters in label names and values
-(alphanumerics, '-', '\_', '.' and '/'), so if a project or security group name normally
-has other characters, those will be replaced here by '\_'. Also there is a length
-limit, so particularly long names may be truncated.
-
-:::
-
-:::note
-
-{{prodname}} does not support changing project name or security group
-name for a given ID associated with a VM after the VM has been created. It is
-recommended that operators avoid any possible confusion here by not changing project
-name for a particular project ID or security group name for particular security group
-ID, post-creation.
-
-:::
-
-## Configuring operator policy
-
-Configuring operator policy requires the `calicoctl` executable, so you should
-[install](../../operations/calicoctl/install.mdx) and
-[configure calicoctl](../../operations/calicoctl/configure/overview.mdx)
- if you
-haven't done so already.
-
-- Calico for OpenStack deployments use an etcd datastore, so you should follow the
- instructions for an etcd datastore.
-
-- The settings you need for etcd endpoints, and TLS credentials if your deployment uses
- those, should match what you have in your
- [`neutron.conf`](configuration.mdx)
- and [Felix](../../reference/felix/configuration.mdx)
- configurations.
-
-## Example
-
-Now you can configure {{prodname}} operator policy that will apply before the policy
-that is derived from OpenStack security groups. For example, to prevent any possible
-communication between the "superman" and "lexluthor" projects, you could configure the
-following.
-
-```bash
-calicoctl apply -f - <
- ```
-
- where `` is the name of the region that that compute host belongs to.
-
-1. In `/etc/neutron/neutron.conf` on each controller and compute node, add
-
- ```conf
- [calico]
- openstack_region =
- ```
-
- where `` is the name of the region that that node belongs to.
-
-:::note
-
-the value specified for `OpenStackRegion` and `openstack_region`
-must be a string of lower case alphanumeric characters or '-', starting and
-ending with an alphanumeric character.
-
-:::
-
-:::caution
-
-If the Felix and Neutron values here do not match, OpenStack
-will not be able to launch any VMs in that region, because the Neutron server
-for the region will think that there are no working compute nodes.
-
-:::
-
-### Configuring OpenStack
-
-You should now create networks in your OpenStack regions as normal. e.g.
-
-```bash
- neutron net-create --shared calico
- neutron subnet-create --gateway 10.65.0.1 --enable-dhcp --ip-version 4 --name calico-v4 calico 10.65.0.0/24
-```
-
-:::note
-
-that Calico networking provides a flat L3 network,
-so _subnets across all regions must not overlap_.
-For example, having 10.1.0.0/16 in one region and 10.2.0.0/16 in another
-would be fine, but 10.1.0.0/16 and 10.1.200.0/24 would not.
-
-:::
-
-## Configuring cross-region policy
-
-Suppose that:
-
-- you have two regions
-
-- you have a set of VMs in one region belonging to security group
- a7734e61-b545-452d-a3cd-0189cbd9747a
-
-- you have a set of VMs in another region belonging to security group
- 85cc3048-abc3-43cc-89b3-377341426ac5
-
-- you want to allow the second set of VMs to connect to port 80 of the first
- set.
-
-You need to have [calicoctl installed and configured for your cluster](labels.mdx#configuring-operator-policy)
-. Once that is in place,
-you could achieve the desired connectivity by using calicoctl to
-configure this {{prodname}} policy:
-
-```bash
-calicoctl apply -f - < mtu 16436 qdisc noqueue
- link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
- inet 127.0.0.1/8 scope host lo
- inet6 ::1/128 scope host
- valid_lft forever preferred_lft forever
- 2: eth0: mtu 1500 qdisc pfifo_fast qlen 1000
- link/ether fa:16:3e:28:a9:a4 brd ff:ff:ff:ff:ff:ff
- inet 10.28.0.13/16 brd 10.28.255.255 scope global eth0
- inet6 fe80::f816:3eff:fe28:a9a4/64 scope link
- valid_lft forever preferred_lft forever
- ```
-
-1. Next, issue the following command.
-
- ```bash
- sudo ip a a 10.28.0.23/16 dev eth0
- ```
-
-1. List the interfaces again.
-
- ```bash
- ip a
- ```
-
- The interfaces should now look more like the following.
-
- ```
- 1: lo: mtu 16436 qdisc noqueue
- link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
- inet 127.0.0.1/8 scope host lo
- inet6 ::1/128 scope host
- valid_lft forever preferred_lft forever
- 2: eth0: mtu 1500 qdisc pfifo_fast qlen 1000
- link/ether fa:16:3e:28:a9:a4 brd ff:ff:ff:ff:ff:ff
- inet 10.28.0.13/16 brd 10.28.255.255 scope global eth0
- inet 10.28.0.23/16 scope global secondary eth0
- inet6 fe80::f816:3eff:fe28:a9a4/64 scope link
- valid_lft forever preferred_lft forever
- ```
-
-1. Exit the SSH session.
-
- ```
- Connection to 10.28.0.13 closed.
- ```
-
-1. And now we can access the VM on its service IP, as shown below.
-
- ```bash
- core@access-node$ ssh cirros@10.28.0.23
- The authenticity of host '10.28.0.23 (10.28.0.23)' can't be established.
- RSA key fingerprint is 65:a5:b0:0c:e2:c4:ac:94:2a:0c:64:b8:bc:5a:aa:66.
- Are you sure you want to continue connecting (yes/no)? yes
-
- Warning: Permanently added '10.28.0.23' (RSA) to the list of known hosts.
- cirros@10.28.0.23's password:
- $
- ```
-
- Note that we already have security set up that allows SSH to the instance from
- our access machine (`192.168.8.1`).
-
-1. You can check this by listing the security groups.
-
- ```bash
- neutron security-group-list
- ```
-
- It should return something like the following.
-
- ```
- +--------------------------------------+---------+----------------------------------------------------------------------+
- | id | name | security_group_rules |
- +--------------------------------------+---------+----------------------------------------------------------------------+
- | 75fccd0a-ef3d-44cd-91ec-ef22941f50f5 | default | egress, IPv4 |
- | | | egress, IPv6 |
- | | | ingress, IPv4, 22/tcp, remote_ip_prefix: 192.168.8.1/32 |
- | | | ingress, IPv4, remote_group_id: 75fccd0a-ef3d-44cd-91ec-ef22941f50f5 |
- | | | ingress, IPv6, remote_group_id: 75fccd0a-ef3d-44cd-91ec-ef22941f50f5 |
- | 903d9936-ce72-4756-a2cc-7c95a846e7e5 | default | egress, IPv4 |
- | | | egress, IPv6 |
- | | | ingress, IPv4, 22/tcp, remote_ip_prefix: 192.168.8.1/32 |
- | | | ingress, IPv4, remote_group_id: 903d9936-ce72-4756-a2cc-7c95a846e7e5 |
- | | | ingress, IPv6, remote_group_id: 903d9936-ce72-4756-a2cc-7c95a846e7e5 |
- +--------------------------------------+---------+----------------------------------------------------------------------+
- ```
-
-## Moving the service IP to another VM
-
-Service IPs are often used for HA, so need to be moved to target a different VM
-if the first one fails for some reason (or if the HA system just decides to
-cycle the active VM).
-
-1. To demonstrate that we create a second test VM.
-
- ```bash
- nova boot --flavor m1.tiny --image cirros-0.3.2-x86_64 --nic net-name=demo-net testvm2
- ```
-
-1. List the VMs.
-
- ```bash
- nova list
- ```
-
- You should see the new VM in the list.
-
- ```
- +--------------------------------------+---------+--------+------------+-------------+----------------------------------------------+
- | ID | Name | Status | Task State | Power State | Networks | +--------------------------------------+---------+--------+------------+-------------+----------------------------------------------+
- | b6d8a3c4-9674-4972-9151-11107b60d622 | testvm1 | ACTIVE | - | Running | demo-net=10.28.0.13, 10.28.0.23 |
- | bb4ef5e3-dc77-472e-af6f-3f0d8c3e5a6d | testvm2 | ACTIVE | - | Running | demo-net=10.28.0.14, fd5f:5d21:845:1c2e:2::e |
- +--------------------------------------+---------+--------+------------+-------------+----------------------------------------------+
- ```
-
-1. Check the ports.
-
- ```bash
- neutron port-list
- ```
-
- It should return something like the following.
-
- ```
- +--------------------------------------+------+-------------------+------------------------------------------------------------------------------------------------+
- | id | name | mac_address | fixed_ips |
- +--------------------------------------+------+-------------------+------------------------------------------------------------------------------------------------+
- | 656b3617-570d-473e-a5dd-90b61cb0c49f | | fa:16:3e:4d:d5:25 | |
- | 7627a298-a2db-4a1a-bc07-9f0f10f58363 | | fa:16:3e:8e:dc:33 | {"subnet_id": "0a1221f2-e6ed-413d-a040-62a266bd0d8f", "ip_address": "10.28.0.14"} |
- | | | | {"subnet_id": "345fec2e-6493-44de-a489-97b755c16dd4", "ip_address": "fd5f:5d21:845:1c2e:2::e"} |
- | 9a7e0868-da7a-419e-a7ad-9d37e11091b8 | | fa:16:3e:28:a9:a4 | {"subnet_id": "0a1221f2-e6ed-413d-a040-62a266bd0d8f", "ip_address": "10.28.0.13"} |
- | | | | {"subnet_id": "0a1221f2-e6ed-413d-a040-62a266bd0d8f", "ip_address": "10.28.0.23"} |
- | a4b26bcc-ba94-4033-a9fc-edaf151c0c20 | | fa:16:3e:74:46:bd | |
- | a772a5e1-2f13-4fc3-96d5-fa1c29717637 | | fa:16:3e:c9:c6:8f | |
- +--------------------------------------+------+-------------------+------------------------------------------------------------------------------------------------+
- ```
-
-1. Remove the service IP from the first VM.
-
- ```bash
- neutron port-update --fixed-ip subnet_id=0a1221f2-e6ed-413d-a040-62a266bd0d8f,ip_address=10.28.0.13 9a7e0868-da7a-419e-a7ad-9d37e11091b8
- ```
-
-1. And add it to the second.
-
- ```bash
- neutron port-update --fixed-ip subnet_id=0a1221f2-e6ed-413d-a040-62a266bd0d8f,ip_address=10.28.0.14 \
- --fixed-ip subnet_id=0a1221f2-e6ed-413d-a040-62a266bd0d8f,ip_address=10.28.0.23 7627a298-a2db-4a1a-bc07-9f0f10f58363
- ```
-
-1. SSH into `testvm2`.
-
- ```bash
- core@access-node$ ssh cirros@10.28.0.14
- The authenticity of host '10.28.0.14 (10.28.0.14)' can't be established.
- RSA key fingerprint is 6a:02:7f:3a:bf:0c:91:de:c4:d6:e7:f6:81:3f:6a:85.
- Are you sure you want to continue connecting (yes/no)? yes
-
- Warning: Permanently added '10.28.0.14' (RSA) to the list of known hosts.
- cirros@10.28.0.14's password:
- ```
-
-1. Tell `testvm2` that it now has the service IP `10.28.0.23`.
-
- ```bash
- sudo ip a a 10.28.0.23/16 dev eth0
- ```
-
-1. Now connections to `10.28.0.23` go to `testvm2`
-
- ```bash
- core@access-node$ ssh cirros@10.28.0.23
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!
- Someone could be eavesdropping on you right now (man-in-the-middle attack)!
- It is also possible that a host key has just been changed.
- The fingerprint for the RSA key sent by the remote host is
- 6a:02:7f:3a:bf:0c:91:de:c4:d6:e7:f6:81:3f:6a:85.
- Please contact your system administrator.
- Add correct host key in /home/core/.ssh/known_hosts to get rid of this message.
- Offending RSA key in /home/core/.ssh/known_hosts:4
- RSA host key for 10.28.0.23 has changed and you have requested strict checking.
- Host key verification failed.
- ```
-
-1. Remove the `known_hosts` files.
-
- ```bash
- rm ~/.ssh/known_hosts
- ```
-
-1. Try again to SSH into the VM.
-
- ```bash
- core@access-node$ ssh cirros@10.28.0.23
- The authenticity of host '10.28.0.23 (10.28.0.23)' can't be established.
- RSA key fingerprint is 6a:02:7f:3a:bf:0c:91:de:c4:d6:e7:f6:81:3f:6a:85.
- Are you sure you want to continue connecting (yes/no)? yes
-
- Warning: Permanently added '10.28.0.23' (RSA) to the list of known hosts.
- cirros@10.28.0.23's password:
- ```
-
-1. Check the host name.
-
- ```bash
- hostname
- ```
-
- It should return:
-
- ```
- testvm2
- ```
-
-1. Check the interfaces.
-
- ```
- ip a
- ```
-
- They should look something like the following.
-
- ```
- 1: lo: mtu 16436 qdisc noqueue
- link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
- inet 127.0.0.1/8 scope host lo
- inet6 ::1/128 scope host
- valid_lft forever preferred_lft forever
- 2: eth0: mtu 1500 qdisc pfifo_fast qlen 1000
- link/ether fa:16:3e:8e:dc:33 brd ff:ff:ff:ff:ff:ff
- inet 10.28.0.14/16 brd 10.28.255.255 scope global eth0
- inet 10.28.0.23/16 scope global secondary eth0
- inet6 fe80::f816:3eff:fe8e:dc33/64 scope link
- valid_lft forever preferred_lft forever
- $
- ```
diff --git a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/etcd.mdx b/calico_versioned_docs/version-3.25/operations/calicoctl/configure/etcd.mdx
deleted file mode 100644
index 25be361af4..0000000000
--- a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/etcd.mdx
+++ /dev/null
@@ -1,190 +0,0 @@
----
-description: Sample configuration files etcd.
----
-
-# Configure calicoctl to connect to an etcd datastore
-
-## Big picture
-
-Learn how to configure the calicoctl CLI tool for an etcd cluster.
-
-## Value
-
-The `calicoctl` CLI tool provides helpful administrative commands for interacting with a {{prodname}} cluster.
-
-## Concepts
-
-### calicoctl vs kubectl
-
-In previous releases, calicoctl has been required to manage Calico API resources in the `projectcalico.org/v3` API group. The calicoctl CLI tool provides important validation and defaulting on these APIs.
-
-In newer releases, the Calico API server performs that defaulting and validation server-side, exposing the same API semantics without a dependency on calicoctl. For this reason, we recommend
-[installing the Calico API server](../../install-apiserver.mdx) and using `kubectl` instead of `calicoctl` for most operations.
-
-calicoctl is still required for the following subcommands:
-
-- [calicoctl node](../../../reference/calicoctl/node/index.mdx)
-- [calicoctl ipam](../../../reference/calicoctl/ipam/index.mdx)
-- [calicoctl convert](../../../reference/calicoctl/convert.mdx)
-- [calicoctl version](../../../reference/calicoctl/version.mdx)
-
-calicoctl is also required for non-Kubernetes platforms such as OpenStack.
-
-## How to
-
-### Complete list of etcd configuration options
-
-| Configuration file option | Environment variable | Description | Schema |
-| ------------------------- | -------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- |
-| `datastoreType` | `DATASTORE_TYPE` | Indicates the datastore to use. If unspecified, defaults to `kubernetes`. (optional) | `kubernetes`, `etcdv3` |
-| `etcdEndpoints` | `ETCD_ENDPOINTS` | A comma-separated list of etcd endpoints. Example: `http://127.0.0.1:2379,http://127.0.0.2:2379` (required) | string |
-| `etcdDiscoverySrv` | `ETCD_DISCOVERY_SRV` | Domain name to discover etcd endpoints via SRV records. Mutually exclusive with `etcdEndpoints`. Example: `example.com` (optional) | string |
-| `etcdUsername` | `ETCD_USERNAME` | User name for RBAC. Example: `user` (optional) | string |
-| `etcdPassword` | `ETCD_PASSWORD` | Password for the given user name. Example: `password` (optional) | string |
-| `etcdKeyFile` | `ETCD_KEY_FILE` | Path to the file containing the private key matching the `calicoctl` client certificate. Enables `calicoctl` to participate in mutual TLS authentication and identify itself to the etcd server. Example: `/etc/calicoctl/key.pem` (optional) | string |
-| `etcdCertFile` | `ETCD_CERT_FILE` | Path to the file containing the client certificate issued to `calicoctl`. Enables `calicoctl` to participate in mutual TLS authentication and identify itself to the etcd server. Example: `/etc/calicoctl/cert.pem` (optional) | string |
-| `etcdCACertFile` | `ETCD_CA_CERT_FILE` | Path to the file containing the root certificate of the certificate authority (CA) that issued the etcd server certificate. Configures `calicoctl` to trust the CA that signed the root certificate. The file may contain multiple root certificates, causing `calicoctl` to trust each of the CAs included. Example: `/etc/calicoctl/ca.pem` (optional) | string |
-| `etcdKey` | | The private key matching the `calicoctl` client certificate. Enables `calicoctl` to participate in mutual TLS authentication and identify itself to the etcd server. For example, please see below.(optional) | string |
-| `etcdCert` | | The client certificate issued to `calicoctl`. Enables `calicoctl` to participate in mutual TLS authentication and identify itself to the etcd server. For example, please see below.(optional) | string |
-| `etcdCACert` | | The root certificate of the certificate authority (CA) that issued the etcd server certificate. Configures `calicoctl` to trust the CA that signed the root certificate. The config file may contain multiple root certificates, causing `calicoctl` to trust each of the CAs included. For example, please see below.(optional) | string |
-
-:::note
-
-- If you are running with TLS enabled, ensure your endpoint addresses use HTTPS.
-- When specifying through environment variables, the `DATASTORE_TYPE` environment
- is required for etcdv3.
-- All environment variables may also be prefixed with `CALICO_`, for example
- `CALICO_DATASTORE_TYPE` and `CALICO_ETCD_ENDPOINTS` etc. may also be used.
- This is useful if the non-prefixed names clash with existing environment
- variables defined on your system
-- The Configuration file options `etcdCACert`, `etcdCert` and `etcdKey` does not have
- corresponding environment variables.
-- Previous versions of `calicoctl` supported `ETCD_SCHEME` and `ETC_AUTHORITY` environment
- variables as a mechanism for specifying the etcd endpoints. These variables are
- no longer supported. Use `ETCD_ENDPOINTS` instead.
-
-:::
-
-### Example configuration file
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: CalicoAPIConfig
-metadata:
-spec:
- etcdEndpoints: https://etcd1:2379,https://etcd2:2379,https://etcd3:2379
- etcdKeyFile: /etc/calico/key.pem
- etcdCertFile: /etc/calico/cert.pem
- etcdCACertFile: /etc/calico/ca.pem
-```
-
-### Example configuration file with inline CA certificate, client certificate and key
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: CalicoAPIConfig
-metadata:
-spec:
- datastoreType: etcdv3
- etcdEndpoints: 'https://127.0.0.1:2379'
- etcdCACert: |
- -----BEGIN CERTIFICATE-----
- MIICKzCCAZSgAwIBAgIBAzANBgkqhkiG9w0BAQQFADA3MQswCQYDVQQGEwJVUzER
- MA8GA1UEChMITmV0c2NhcGUxFTATBgNVBAsTDFN1cHJpeWEncyBDQTAeFw05NzEw
- MTgwMTM2MjVaFw05OTEwMTgwMTM2MjVaMEgxCzAJBgNVBAYTAlVTMREwDwYDVQQK
- EwhOZXRzY2FwZTENMAsGA1UECxMEUHViczEXMBUGA==
- -----END CERTIFICATE-----
- etcdCert: |
- -----BEGIN CERTIFICATE-----
- gI6iLXgMsp2EOlD56I6FA1jrCtNb01XQvX3eyFuA6g5T1jWGYBDtvQb0WRVkdUy9
- L/uK+sHQwtloCSuakcQAsWV9bajCQtHX8XGu25Yz56kpJ/OJjcishxT6pc/sthum
- A5PX739JsNUi/p5aG+H/6eNx+ukJP7QaM646YCfS5i8S9DJUvim+/BSlKi2ZiOCd
- 0MYH4Xb7lmAOTNmTvSYpKo9J2fZ9erw0MYSBTyjh6F7PRbHBiivgUnJfGQ==
- -----END CERTIFICATE-----
- etcdKey: |
- -----BEGIN RSA PRIVATE KEY-----
- k0dWj16h9P6TvfcNl2iwT4VIwx0uy2faWBED1DrCJcuQCy5nPrts2ZIaAWPi1t3t
- VbDKQvs+KXBEeqh0qYcYkejUXqIF0uKUFLjiQmZssjpL5RHqqWuYKbO87n+Jod1L
- TjGRHdbP0zF2U0LdjM17rc2hpJ3qrmgJ7pOLzbXMcOr+NP1ojRCArXhQ4iLs7D8T
- eHw9QH4luJYtnmk7x03izLMQdLWcKnUbqh/xOVPyazgJHXwRxwNXpMsBVGY=
- -----END RSA PRIVATE KEY-----
-```
-
-### Example using environment variables
-
-```bash
-ETCD_ENDPOINTS=http://myhost1:2379 calicoctl get bgppeers
-```
-
-### Example using etcd DNS discovery
-
-```bash
-ETCD_DISCOVERY_SRV=example.com calicoctl get nodes
-```
-
-### Example using IPv6
-
-Create a single node etcd cluster listening on IPv6 localhost `[::1]`.
-
-```bash
-etcd --listen-client-urls=http://[::1]:2379 --advertise-client-urls=http://[::1]:2379
-```
-
-Use the etcd IPv6 cluster:
-
-```bash
-ETCD_ENDPOINTS=http://[::1]:2379 calicoctl get bgppeers
-```
-
-### Example using mixed IPv4/IPv6
-
-Create a single node etcd cluster listening on IPv4 and IPv6 localhost `[::1]`.
-
-```bash
-etcd --listen-client-urls=http://[::1]:2379,http://127.0.0.1:2379 --advertise-client-urls=http://[::1]:2379
-```
-
-Use the IPv6 endpoint:
-
-```bash
-ETCD_ENDPOINTS=http://[::1]:2379 calicoctl get bgppeers
-```
-
-Use the IPv4 endpoint:
-
-```bash
-ETCD_ENDPOINTS=http://127.0.0.1:2379 calicoctl get bgppeers
-```
-
-### {{nodecontainer}}
-
-It is important to note that not only will calicoctl will use the specified keys directly
-on the host to access etcd, **it will also pass on these environment variables
-and volume mount the keys into the started `{{noderunning}}` container.**
-
-Therefore, configuring `{{nodecontainer}}` for etcd is easily accomplished by running
-`calicoctl node run` with the parameters set correctly.
-
-### Checking the configuration
-
-Here is a simple command to check that the installation and configuration is
-correct.
-
-```bash
-calicoctl get nodes
-```
-
-A correct setup will yield a list of the nodes that have registered. If an
-empty list is returned you are either pointed at the wrong datastore or no
-nodes have registered. If an error is returned then attempt to correct the
-issue then try again.
-
-## Next steps
-
-Now you are ready to read and configure most aspects of {{prodname}}. You can
-find the full list of commands in the
-[Command Reference](../../../reference/calicoctl/overview.mdx).
-
-The full list of resources that can be managed, including a description of each,
-can be found in the
-[Resource Definitions](../../../reference/resources/overview.mdx).
diff --git a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/index.mdx b/calico_versioned_docs/version-3.25/operations/calicoctl/configure/index.mdx
deleted file mode 100644
index 6aabb3c936..0000000000
--- a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Configure the calicoctl to access your datastore.
-hide_table_of_contents: true
----
-
-# Configure calicoctl
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/kdd.mdx b/calico_versioned_docs/version-3.25/operations/calicoctl/configure/kdd.mdx
deleted file mode 100644
index 2237e96501..0000000000
--- a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/kdd.mdx
+++ /dev/null
@@ -1,124 +0,0 @@
----
-description: Sample configuration files for kdd.
----
-
-# Configure calicoctl to connect to the Kubernetes API datastore
-
-## Big picture
-
-Learn how to configure the calicoctl CLI tool for your Kubernetes cluster.
-
-## Value
-
-The `calicoctl` CLI tool provides helpful administrative commands for interacting with a {{prodname}} cluster.
-
-## Concepts
-
-### calicoctl vs kubectl
-
-In previous releases, calicoctl has been required to manage Calico API resources in the `projectcalico.org/v3` API group. The calicoctl CLI tool provides important validation and defaulting on these APIs.
-
-In newer releases, the Calico API server performs that defaulting and validation server-side, exposing the same API semantics without a dependency on calicoctl. For this reason, we recommend
-[installing the Calico API server](../../install-apiserver.mdx) and using `kubectl` instead of `calicoctl` for most operations.
-
-calicoctl is still required for the following subcommands:
-
-- [calicoctl node](../../../reference/calicoctl/node/index.mdx)
-- [calicoctl ipam](../../../reference/calicoctl/ipam/index.mdx)
-- [calicoctl convert](../../../reference/calicoctl/convert.mdx)
-- [calicoctl version](../../../reference/calicoctl/version.mdx)
-
-### Default configuration
-
-By default, calicoctl will attempt to read from the Kubernetes API using the default kubeconfig located at `$(HOME)/.kube/config`.
-
-If the default kubeconfig does not exist, or you would like to specify alternative API access information, you can do so using the following configuration options.
-
-## How to
-
-### Complete list of Kubernetes API connection configuration
-
-| Configuration file option | Environment variable | Description | Schema |
-| ------------------------- | -------------------- | --------------------------------------------------------------------------------------------------------- | ---------------------- |
-| `datastoreType` | `DATASTORE_TYPE` | Indicates the datastore to use. [Default: `kubernetes`] | `kubernetes`, `etcdv3` |
-| `kubeconfig` | `KUBECONFIG` | When using the Kubernetes datastore, the location of a kubeconfig file to use, e.g. /path/to/kube/config. | string |
-| `k8sAPIEndpoint` | `K8S_API_ENDPOINT` | Location of the Kubernetes API. Not required if using kubeconfig. [Default: `https://kubernetes-api:443`] | string |
-| `k8sCertFile` | `K8S_CERT_FILE` | Location of a client certificate for accessing the Kubernetes API, e.g., `/path/to/cert`. | string |
-| `k8sKeyFile` | `K8S_KEY_FILE` | Location of a client key for accessing the Kubernetes API, e.g., `/path/to/key`. | string |
-| `k8sCAFile` | `K8S_CA_FILE` | Location of a CA for accessing the Kubernetes API, e.g., `/path/to/ca`. | string |
-| `k8sToken` | | Token to be used for accessing the Kubernetes API. | string |
-
-:::note
-
-All environment variables may also be prefixed with `"CALICO_"`, for
-example `"CALICO_DATASTORE_TYPE"` and `"CALICO_KUBECONFIG"` etc. may be used.
-This is useful if the non-prefixed names clash with existing environment
-variables defined on your system.
-
-:::
-
-### Kubernetes command line
-
-```bash
-DATASTORE_TYPE=kubernetes KUBECONFIG=~/.kube/config calicoctl get nodes
-```
-
-### Example configuration file
-
-```yaml
-apiVersion: projectcalico.org/v3
-kind: CalicoAPIConfig
-metadata:
-spec:
- datastoreType: 'kubernetes'
- kubeconfig: '/path/to/.kube/config'
-```
-
-### Example using environment variables
-
-```bash
-export DATASTORE_TYPE=kubernetes
-export KUBECONFIG=~/.kube/config
-calicoctl get workloadendpoints
-```
-
-And using `CALICO_` prefixed names:
-
-```bash
-export CALICO_DATASTORE_TYPE=kubernetes
-export CALICO_KUBECONFIG=~/.kube/config
-calicoctl get workloadendpoints
-```
-
-With multiple `kubeconfig` files:
-
-```bash
-export DATASTORE_TYPE=kubernetes
-export KUBECONFIG=~/.kube/main:~/.kube/auxy
-calicoctl get --context main workloadendpoints
-calicoctl get --context auxy workloadendpoints
-```
-
-### Checking the configuration
-
-Here is a simple command to check that the installation and configuration is
-correct.
-
-```bash
-calicoctl get nodes
-```
-
-A correct setup will yield a list of the nodes that have registered. If an
-empty list is returned you are either pointed at the wrong datastore or no
-nodes have registered. If an error is returned then attempt to correct the
-issue then try again.
-
-## Next steps
-
-Now you are ready to read and configure most aspects of {{prodname}}. You can
-find the full list of commands in the
-[Command Reference](../../../reference/calicoctl/overview.mdx).
-
-The full list of resources that can be managed, including a description of each,
-can be found in the
-[Resource Definitions](../../../reference/resources/overview.mdx).
diff --git a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/overview.mdx b/calico_versioned_docs/version-3.25/operations/calicoctl/configure/overview.mdx
deleted file mode 100644
index 9fe78ed2e3..0000000000
--- a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/overview.mdx
+++ /dev/null
@@ -1,74 +0,0 @@
----
-description: Configure calicoctl for datastore access.
----
-
-# Configure calicoctl
-
-## Big picture
-
-Learn how to configure the calicoctl CLI tool for your cluster.
-
-## Value
-
-The `calicoctl` CLI tool provides helpful administrative commands for interacting with a {{prodname}} cluster.
-
-## Concepts
-
-### calicoctl vs kubectl
-
-In previous releases, calicoctl has been required to manage Calico API resources in the `projectcalico.org/v3` API group. The calicoctl CLI tool provides important validation and defaulting on these APIs.
-
-In newer releases, the Calico API server performs that defaulting and validation server-side, exposing the same API semantics without a dependency on calicoctl. For this reason, we recommend
-[installing the Calico API server](../../install-apiserver.mdx) and using `kubectl` instead of `calicoctl` for most operations.
-
-calicoctl is still required for the following subcommands:
-
-- [calicoctl node](../../../reference/calicoctl/node/index.mdx)
-- [calicoctl ipam](../../../reference/calicoctl/ipam/index.mdx)
-- [calicoctl convert](../../../reference/calicoctl/convert.mdx)
-- [calicoctl version](../../../reference/calicoctl/version.mdx)
-
-calicoctl is also required for non-Kubernetes platforms such as OpenStack.
-
-### Default calicoctl behavior
-
-Most `calicoctl` commands require access to the {{prodname}} datastore. By default, calicoctl
-will attempt to read from the Kubernetes API based on the default kubeconfig.
-
-## How to
-
-### Configure access using a Configuration file
-
-By default, `calicoctl` will look for a configuration file at `/etc/calico/calicoctl.cfg`. You can override this using the `--config` option with commands that require datastore access.
-The file can be in either YAML or JSON format. It must be valid and readable by `calicoctl`. For example:
-
-```yaml noValidation
-apiVersion: projectcalico.org/v3
-kind: CalicoAPIConfig
-metadata:
-spec:
- datastoreType: "etcdv3"
- etcdEndpoints: "http://etcd1:2379,http://etcd2:2379"
- ...
-```
-
-### Configure access using environment variables
-
-If `calicoctl` cannot locate, read, or access a configuration file, it will check a specific set of environment variables.
-
-Refer to the section that corresponds to your datastore type for a full set of options
-and examples.
-
-- [Kubernetes API datastore](kdd.mdx)
-
-- [etcd datastore](etcd.mdx)
-
-:::note
-
-When running `calicoctl` inside a container, any environment variables and
-configuration files must be passed to the container so they are available to
-the process inside. It can be useful to keep a running container (that sleeps) configured
-for your datastore, then it is possible to `exec` into the container and have an
-already configured environment.
-
-:::
diff --git a/calico_versioned_docs/version-3.25/operations/calicoctl/index.mdx b/calico_versioned_docs/version-3.25/operations/calicoctl/index.mdx
deleted file mode 100644
index ab048e5496..0000000000
--- a/calico_versioned_docs/version-3.25/operations/calicoctl/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Install and configure the Calico CLI for managing resources.
-hide_table_of_contents: true
----
-
-# calicoctl
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/operations/calicoctl/install.mdx b/calico_versioned_docs/version-3.25/operations/calicoctl/install.mdx
deleted file mode 100644
index 4bb8954a57..0000000000
--- a/calico_versioned_docs/version-3.25/operations/calicoctl/install.mdx
+++ /dev/null
@@ -1,410 +0,0 @@
----
-description: Install the CLI for Calico.
----
-
-# Install calicoctl
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-import CodeBlock from '@theme/CodeBlock';
-
-import { releaseTitle } from '../../variables.js';
-
-export function buildUrl() {
- const url =
- releaseTitle === 'master'
- ? 'https://github.com/projectcalico/calico/releases/latest/download'
- : `https://github.com/projectcalico/calico/releases/download/${releaseTitle}`;
- return url;
-}
-
-export const url = buildUrl();
-
-## Big picture
-
-This guide helps you install the `calicoctl` command line tool to manage {{prodname}} resources
-and perform administrative functions.
-
-## Value
-
-The `calicoctl` command line tool is required to use many of {{prodname}}'s features. It
-is used to manage {{prodname}} policies and configuration, as well as view detailed cluster status.
-
-## Concepts
-
-### API groups
-
-All Kubernetes resources belong to an API group. The API group is indicated by the resource's `apiVersion`. For example, {{prodname}}
-uses resources in the `projectcalico.org/v3` API group for configuration, and the operator uses resources in the `operator.tigera.io/v1` API group.
-
-You can read more about API groups in [the Kubernetes documentation](https://kubernetes.io/docs/reference/using-api/#api-groups).
-
-### calicoctl and kubectl
-
-To manage {{prodname}} APIs in the `projectcalico.org/v3` API group, you should use `calicoctl`. This is because
-`calicoctl` provides important validation and defaulting for these resources that is not available in `kubectl`. However, `kubectl`
-should still be used to manage other Kubernetes resources.
-
-:::note
-
-If you would like to use `kubectl` to manage `projectcalico.org/v3` API resources, you can use the
-[Calico API server](../install-apiserver.mdx).
-
-:::
-
-:::caution
-
-Never modify resources in the `crd.projectcalico.org` API group directly. These are internal data representations
-and modifying them directly may result in unexpected behavior.
-In addition to resource management, `calicoctl` also enables other {{prodname}} administrative tasks such as viewing IP pool utilization
-and BGP status.
-
-:::
-
-### Datastore
-
-{{prodname}} objects are stored in one of two datastores, either etcd or Kubernetes. The choice of datastore is determined at the time {{prodname}}
-is installed. Typically for Kubernetes installations the Kubernetes datastore is the default.
-
-You can run `calicoctl` on any host with network access to the {{prodname}} datastore as either a binary or a container.
-For step-by-step instructions, refer to the section that corresponds to your desired deployment.
-
-
-
-
-
-## How to
-
-:::note
-
-Make sure you always install the version of `calicoctl` that matches the version of {{prodname}} running on your cluster.
-
-:::
-
-- [Install calicoctl as a binary on a single host](#install-calicoctl-as-a-binary-on-a-single-host)
-- [Install calicoctl as a kubectl plugin on a single host](#install-calicoctl-as-a-kubectl-plugin-on-a-single-host)
-- [Install calicoctl as a container on a single host](#install-calicoctl-as-a-container-on-a-single-host)
-
-### Install calicoctl as a binary on a single host
-
-
-
-
-1. Log into the host, open a terminal prompt, and navigate to the location where
- you want to install the binary.
-
- :::tip
-
- Consider navigating to a location that's in your `PATH`. For example,
- `/usr/local/bin/`.
-
- :::
-
-1. Use the following command to download the `calicoctl` binary.
-
- curl -L {url}/calicoctl-linux-amd64 -o calicoctl
-
-1. Set the file to be executable.
-
- ```bash
- chmod +x ./calicoctl
- ```
-
- :::note
-
- If the location of `calicoctl` is not already in your `PATH`, move the file
- to one that is or add its location to your `PATH`. This will allow you to invoke it
- without having to prepend its location.
-
- :::
-
-
-
-
-1. Log into the host, open a terminal prompt, and navigate to the location where
- you want to install the binary.
-
- :::tip
-
- Consider navigating to a location that's in your `PATH`. For example,
- `/usr/local/bin/`.
-
- :::
-
-1. Use the following command to download the `calicoctl` binary.
-
- curl -L {url}/calicoctl-darwin-amd64 -o calicoctl
-
-1. Set the file to be executable.
-
- ```bash
- chmod +x calicoctl
- ```
-
- :::note
-
- If you are faced with `cannot be opened because the developer cannot be verified` error when using `calicoctl` for the first time.
- go to `Applications > System Preferences > Security & Privacy` in the `General` tab at the bottom of the window click `Allow anyway`.
-
- :::
-
- :::note
-
- If the location of `calicoctl` is not already in your `PATH`, move the file
- to one that is or add its location to your `PATH`. This will allow you to invoke it
- without having to prepend its location.
-
- :::
-
-
-
-
-1. Use the following PowerShell command to download the `calicoctl` binary.
-
- :::tip
-
- Consider running PowerShell as administrator and navigating
- to a location that's in your `PATH`. For example, `C:\Windows`.
-
- :::
-
-Invoke-WebRequest -Uri "{url}/calicoctl-windows-amd64.exe" -OutFile "calicoctl.exe"
-
-
-
-
-1. Log into the host, open a terminal prompt, and navigate to the location where
- you want to install the binary.
-
- :::tip
-
- Consider navigating to a location that's in your `PATH`. For example,
- `/usr/local/bin/`.
-
- :::
-
-1. Use the following command to download the `calicoctl` binary.
-
- curl -L {url}/calicoctl-linux-ppc64le -o calicoctl
-
-1. Set the file to be executable.
-
- ```bash
- chmod +x calicoctl
- ```
-
- :::note
-
- If the location of `calicoctl` is not already in your `PATH`, move the file
- to one that is or add its location to your `PATH`. This will allow you to invoke it
- without having to prepend its location.
-
- :::
-
-
-
-
-1. Log into the host, open a terminal prompt, and navigate to the location where
- you want to install the binary.
-
- :::tip
-
- Consider navigating to a location that's in your `PATH`. For example,
- `/usr/local/bin/`.
-
- :::
-
-1. Use the following command to download the `calicoctl` binary.
-
- curl -L {url}/calicoctl-linux-arm64 -o calicoctl
-
-1. Set the file to be executable.
-
- ```bash
- chmod +x calicoctl
- ```
-
- :::note
-
- If the location of `calicoctl` is not already in your `PATH`, move the file
- to one that is or add its location to your `PATH`. This will allow you to invoke it
- without having to prepend its location.
-
- :::
-
-
-
-
-### Install calicoctl as a kubectl plugin on a single host
-
-
-
-
-1. Log into the host, open a terminal prompt, and navigate to the location where
- you want to install the binary.
-
- :::tip
-
- Consider navigating to a location that's in your `PATH`. For example,
- `/usr/local/bin/`.
-
- :::
-
-1. Use the following command to download the `calicoctl` binary.
-
- curl -L {url}/calicoctl-linux-amd64 -o kubectl-calico
-
-1. Set the file to be executable.
-
- ```bash
- chmod +x kubectl-calico
- ```
-
- :::note
-
- If the location of `kubectl-calico` is not already in your `PATH`, move the file
- to one that is or add its location to your `PATH`. This is required in order for
- kubectl to detect the plugin and allow you to use it.
-
- :::
-
-
-
-
-1. Log into the host, open a terminal prompt, and navigate to the location where
- you want to install the binary.
-
- :::tip
-
- Consider navigating to a location that's in your `PATH`. For example,
- `/usr/local/bin/`.
-
- :::
-
-1. Use the following command to download the `calicoctl` binary.
-
- curl -L {url}/calicoctl-darwin-amd64 -o kubectl-calico
-
-1. Set the file to be executable.
-
- ```bash
- chmod +x kubectl-calico
- ```
-
- :::note
-
- If you are faced with `cannot be opened because the developer cannot be verified` error when using `calicoctl` for the first time.
- go to `Applications > System Preferences > Security & Privacy` in the `General` tab at the bottom of the window click `Allow anyway`.
-
- > If the location of `kubectl-calico` is not already in your `PATH`, move the file
- > to one that is or add its location to your `PATH`. This is required in order for
- > kubectl to detect the plugin and allow you to use it.
-
- :::
-
-
-
-
-1. Use the following PowerShell command to download the `calicoctl` binary.
-
- :::tip
-
- Consider running PowerShell as administrator and navigating
- to a location that's in your `PATH`. For example, `C:\Windows`.
-
- :::
-
-Invoke-WebRequest -Uri "{url}/calicoctl-windows-amd64.exe" -OutFile kubectl-calico.exe
-
-
-
-
-1. Log into the host, open a terminal prompt, and navigate to the location where
- you want to install the binary.
-
- :::tip
-
- Consider navigating to a location that's in your `PATH`. For example,
- `/usr/local/bin/`.
-
- :::
-
-1. Use the following command to download the `calicoctl` binary.
-
- curl -L {url}/calicoctl-linux-ppc64le -o kubectl-calico
-
-1. Set the file to be executable.
-
- ```bash
- chmod +x kubectl-calico
- ```
-
- :::note
-
- If the location of `kubectl-calico` is not already in your `PATH`, move the file
- to one that is or add its location to your `PATH`. This is required in order for
- kubectl to detect the plugin and allow you to use it.
-
- :::
-
-
-
-
-1. Log into the host, open a terminal prompt, and navigate to the location where
- you want to install the binary.
-
- :::tip
-
- Consider navigating to a location that's in your `PATH`. For example,
- `/usr/local/bin/`.
-
- :::
-
-1. Use the following command to download the `calicoctl` binary.
-
- curl -L {url}/calicoctl-linux-arm64 -o kubectl-calico
-
-1. Set the file to be executable.
-
- ```bash
- chmod +x kubectl-calico
- ```
-
- :::note
-
- If the location of `kubectl-calico` is not already in your `PATH`, move the file
- to one that is or add its location to your `PATH`. This is required in order for
- kubectl to detect the plugin and allow you to use it.
-
- :::
-
-
-
-
-Verify the plugin works.
-
-```
-kubectl calico -h
-```
-
-You can now run any `calicoctl` subcommands through `kubectl calico`.
-
-:::note
-
-If you run these commands from your local machine (instead of a host node), some of
-the node related subcommands will not work (like node status).
-
-:::
-
-### Install calicoctl as a container on a single host
-
-To install `calicoctl` as a container on a single host, log into the
-target host and issue the following command.
-
-```bash
-docker pull {{registry}}{{imageNames.calicoctl}}:{{releaseTitle}}
-```
-
-**Next step**:
-
-[Configure `calicoctl` to connect to your datastore](configure/index.mdx).
diff --git a/calico_versioned_docs/version-3.25/operations/certificate-management.mdx b/calico_versioned_docs/version-3.25/operations/certificate-management.mdx
deleted file mode 100644
index 6831aa2c76..0000000000
--- a/calico_versioned_docs/version-3.25/operations/certificate-management.mdx
+++ /dev/null
@@ -1,136 +0,0 @@
----
-description: Control the issuer of certificates used by Calico
----
-
-# Manage TLS certificates used by Calico
-
-## Big picture
-
-Enable custom workflows for issuing and signing certificates used to secure communication between {{prodname}} components.
-
-## Value
-
-Some deployments have security requirements that strictly minimize or eliminate the access to private keys and/or
-requirements to control the trusted certificates throughout clusters. Using the Kubernetes Certificates API that automates
-certificate issuance, {{prodname}} provides a simple configuration option that you add to your installation.
-
-## Before you begin
-
-**Supported algorithms**
-
-- Private Key Pair: RSA (size: 2048, 4096, 8192), ECDSA (curve: 256, 384, 521)
-- Certificate Signature: RSA (sha: 256, 384, 512), ECDSA (sha: 256, 384, 512)
-
-## How to
-
-- [Enable certificate management](#enable-certificate-management)
-- [Verify and monitor](#verify-and-monitor)
-- [Implement your own signing/approval process](#implement-your-own-signing-and-approval-process)
-
-### Enable certificate management
-
-1. Modify your [the installation resource](../reference/installation/api.mdx#operator.tigera.io/v1.Installation)
- resource and add the `certificateManagement` section. Apply the following change to your cluster.
-
-```yaml
-apiVersion: operator.tigera.io/v1
-kind: Installation
-metadata:
- name: default
-spec:
- certificateManagement:
- caCert:
- signerName: /
- signatureAlgorithm: SHA512WithRSA
- keyAlgorithm: RSAWithSize4096
-```
-
-Done! If you have an automatic signer and approver, there is nothing left to do. The next section explains in more detail
-how to verify and monitor the status.
-
-### Verify and monitor
-
-1. Monitor your pods as they come up:
-
-```
-kubectl get pod -n calico-system -w
-NAMESPACE NAME READY STATUS RESTARTS AGE
-calico-system calico-node-5ckvq 0/1 Pending 0 0s
-calico-system calico-typha-688c9957f5-h9c5w 0/1 Pending 0 0s
-calico-system calico-node-5ckvq 0/1 Init:0/3 0 1s
-calico-system calico-typha-688c9957f5-h9c5w 0/1 Init:0/1 0 1s
-calico-system calico-node-5ckvq 0/1 PodInitializing 0 2s
-calico-system calico-typha-688c9957f5-h9c5w 0/1 PodInitializing 0 2s
-calico-system calico-node-5ckvq 1/1 Running 0 3s
-calico-system calico-typha-688c9957f5-h9c5w 1/1 Running 0 3s
-```
-
-During the `Init` phase a certificate signing request (CSR) is created by an init container of the pod. It will be stuck in the
-`Init` phase. Once the CSR has been approved and signed by the certificate authority, the pod continues with `PodInitializing`
-and eventually `Running`.
-
-1. Monitor certificate signing requests:
-
-```
-kubectl get csr -w
-NAME AGE REQUESTOR CONDITION
-calico-system:calico-node-5ckvq:9a3a10 0s system:serviceaccount:calico-system:calico-node Pending
-calico-system:calico-node-5ckvq:9a3a10 0s system:serviceaccount:calico-system:calico-node Pending,Issued
-calico-system:calico-node-5ckvq:9a3a10 0s system:serviceaccount:calico-system:calico-node Approved,Issued
-calico-system:typha-688c9957f5-h9c5w:2b0d82 0s system:serviceaccount:calico-system:calico-typha Pending
-calico-system:typha-688c9957f5-h9c5w:2b0d82 0s system:serviceaccount:calico-system:calico-typha Pending,Issued
-calico-system:typha-688c9957f5-h9c5w:2b0d82 0s system:serviceaccount:calico-system:calico-typha Approved,Issued
-```
-
-A CSR will be `Pending` until it has been `Issued` and `Approved`. The name of a CSR is based on the namespace, the pod
-name and the first 6 characters of the pod's UID. The pod will be `Pending` until the CSR has been `Approved`.
-
-1. Monitor the status of this feature using the `TigeraStatus`:
-
-```
-kubectl get tigerastatus
-NAME AVAILABLE PROGRESSING DEGRADED SINCE
-calico True False False 2m40s
-```
-
-### Implement your own signing and approval process
-
-**Required steps**
-
-This feature uses api version `certificates.k8s.io/v1beta1` for [certificate signing requests](https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/).
-To automate the signing and approval process, run a server that performs the following actions:
-
-1. Watch `CertificateSigningRequests` resources with status `Pending` and `spec.signerName=`.
-
- :::note
-
- You can skip this step if you are using a version before Kubernetes v1.18; (the signerName field was not available).
-
- :::
-
-1. For each `Pending` CSR perform (security) checks (see next heading)
-1. Issue a certificate and update `.spec.status.certificate`
-1. Approve the CSR and update `.spec.status.conditions`
-
-**Security requirements**
-
-Based on your requirements you may want to implement custom checks to make sure that no certificates are issued for a malicious user.
-When a CSR is created, the kube-apiserver adds immutable fields to the spec to help you perform checks:
-
-- `.spec.username`: username of the requester
-- `.spec.groups`: user groups of the requester
-- `.spec.request`: certificate request in pem format
-
-Verify that the user and/or group match with the requested certificate subject (alt) names.
-
-**Implement your signer and approver using golang**
-
-- Use [client-go](https://github.com/kubernetes/client-go) to create a clientset
-- To watch CSRs, use `clientset.CertificatesV1().CertificateSigningRequests().Watch(..)`
-- To issue the certificate use `clientset.CertificatesV1().CertificateSigningRequests().UpdateStatus(...)`
-- To approve the CSR use `clientset.CertificatesV1().CertificateSigningRequests().UpdateApproval(...)`
-
-### Additional resources
-
-- Read [kubernetes certificate signing requests](https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/) for more information on CSRs
-- Use [client-go](https://github.com/kubernetes/client-go) to implement a controller to sign and approve a CSR
diff --git a/calico_versioned_docs/version-3.25/operations/datastore-migration.mdx b/calico_versioned_docs/version-3.25/operations/datastore-migration.mdx
deleted file mode 100644
index ca16821038..0000000000
--- a/calico_versioned_docs/version-3.25/operations/datastore-migration.mdx
+++ /dev/null
@@ -1,146 +0,0 @@
----
-description: Migrate your cluster from using an etcdv3 datastore to a Kubernetes datastore.
----
-
-# Migrate Calico data from an etcdv3 datastore to a Kubernetes datastore
-
-## Big picture
-
-Switch your {{prodname}} datastore from etcdv3 to Kubernetes on a live cluster.
-
-## Value
-
-Using Kubernetes as your datastore provides a number of benefits over using etcdv3
-directly, including fewer components and better support for role based access
-control. For most users, using the Kubernetes data store will provide a better
-experience. We provide a seamless way to migrate your data from an existing
-cluster with an etcdv3 datastore to a Kubernetes datastore. For the
-complete set of advantages of using a Kubernetes datastore over an etcd datastore, see
-[{{prodname}} Datastore](../getting-started/kubernetes/hardway/the-calico-datastore.mdx#using-kubernetes-as-the-datastore)
-documentation.
-
-## Before you begin
-
-- Ensure that your Calico installation is configured to use an etcdv3 datastore. This guide does not apply to clusters installed using the Kubernetes API datastore.
-
-- The **latest version of calicoctl** must be [installed and configured to access etcd](calicoctl/install.mdx).
-
- :::note
-
- Since the steps below require modifying calicoctl configuration, we do not recommend using calicoctl installed
- as a Kubernetes pod for this procedure. Instead, install the binary directly on a host with access to etcd and the Kubernetes API.
-
- :::
-
-## How To
-
-### Migrate the datastore
-
-To migrate contents of the datastore, we will be using the `calicoctl datastore migrate`
-command and subcommands. For more information, see the
-[calicoctl datastore migrate](../reference/calicoctl/datastore/migrate/overview.mdx)
-documentation.
-
-1. Lock the etcd datastore for migration. This prevents any changes to the data from
- affecting the cluster.
-
- ```
- calicoctl datastore migrate lock
- ```
-
- :::note
-
- After running the above command, you cannot make changes to the configuration of your cluster until the
- migration is complete. New pods will not be started until after the migration.
-
- :::
-
-1. Export the datastore contents to a file.
-
- ```
- calicoctl datastore migrate export > etcd-data
- ```
-
-1. Configure `calicoctl` to access the
- [Kubernetes datastore](calicoctl/configure/kdd.mdx).
-
-1. Import the datastore contents from your exported file.
-
- ```
- calicoctl datastore migrate import -f etcd-data
- ```
-
-1. Verify that the datastore was properly imported. This can be accomplished by using
- `calicoctl` to query for any {{prodname}} resources that exist in the etcd
- datastore (e.g. networkpolicy).
-
- ```
- calicoctl get networkpolicy
- ```
-
-1. Configure {{prodname}} to read from the Kubernetes datastore. Follow the
- directions to install {{prodname}} with the Kubernetes datastore. The
- installation instructions contain the relevant version of the
- `calico.yaml` file to apply.
-
- ```
- kubectl apply -f calico.yaml
- ```
-
-1. Wait for Calico to perform a rolling update before continuing by monitoring the following command
-
- ```
- kubectl rollout status daemonset calico-node -n kube-system
- ```
-
-1. Unlock the datastore. This allows the {{prodname}} resources to affect the cluster again.
-
- ```
- calicoctl datastore migrate unlock
- ```
-
- :::note
-
- Once the Kubernetes datastore is unlocked, the datastore migration
- cannot be rolled back. Make sure that the Kubernetes datastore is populated with
- all of the expected {{prodname}} resources prior to unlocking the datastore.
-
- :::
-
-### Roll back the datastore migration
-
-Rolling back the datastore migration can only be done if the original etcd datastore still exists
-and the Kubernetes datastore was not unlocked after the datastore resources were imported. The
-following steps delete the {{prodname}} resources imported into the Kubernetes datastore and
-configure the cluster to once again read from the original etcd datastore.
-
-1. Lock the Kubernetes datastore.
-
- ```
- calicoctl datastore migrate lock
- ```
-
-1. Delete all of the {{prodname}} CRDs. This will remove all of the data imported into
- the Kubernetes datastore.
-
- ```
- kubectl delete $(kubectl get crds -o name | grep projectcalico.org)
- ```
-
-1. Configure {{prodname}} to read from the etcd datastore. Follow the
- directions to install {{prodname}} with the etcd datastore. The
- installation instructions contain the relevant version of the
- `calico.yaml` file to apply.
-
- ```
- kubectl apply -f calico.yaml
- ```
-
-1. Configure `calicoctl` to access the
- [etcd datastore](calicoctl/configure/etcd.mdx).
-
-1. Unlock the etcd datastore. This allows the {{prodname}} resources to affect the cluster again.
-
- ```
- calicoctl datastore migrate unlock
- ```
diff --git a/calico_versioned_docs/version-3.25/operations/decommissioning-a-node.mdx b/calico_versioned_docs/version-3.25/operations/decommissioning-a-node.mdx
deleted file mode 100644
index bdc0a56f94..0000000000
--- a/calico_versioned_docs/version-3.25/operations/decommissioning-a-node.mdx
+++ /dev/null
@@ -1,94 +0,0 @@
----
-description: Manually remove a node from a cluster that is installed with Calico.
----
-
-# Decommission a node
-
-## About decommissioning nodes
-
-If you are running the [node controller](../reference/kube-controllers/configuration.mdx)
-or using the Kubernetes API datastore in policy-only mode, you do not need to manually decommission nodes.
-
-In other configurations, you may need to manually decommission a node for one
-of the following reasons.
-
-- You are decommissioning a host running `{{nodecontainer}}` or removing it from your
- cluster.
-- You are renaming a node.
-- You are receiving an error about an IP address already in use.
-- Readiness checks are failing due to unreachable peers that are no longer in the
- cluster.
-- Hosts are regularly added and removed from your cluster.
-
-## Purpose of this page
-
-Provide guidance on how to remove a host that is part of a {{prodname}} cluster
-and clean up the associated [node resource reference](../reference/resources/node.mdx)
-information.
-
-## Prerequisites
-
-- Prior to removing any Node resource from the datastore the `{{nodecontainer}}`
- container should be stopped on the corresponding host and it should be
- ensured that it will not be restarted.
-- You must have [calicoctl configured](calicoctl/install.mdx) and operational to run
- the commands listed here.
-
-## Removing a node resource
-
-Removing a Node resource will also remove the Workload Endpoint, Host
-Endpoint, and IP Address resources and any other sub configuration items
-associated with that Node.
-
-:::note
-
-- Deleting a Node resource may be service impacting if the host is still in
- service. Ensure that the host is no longer in service before deleting the
- Node resource.
-- Any configuration specific to the node will be removed. This would be
- configuration like node BGP peerings or custom Felix configs.
-
-:::
-
-## Removing a single node resource
-
-See the example below for how to remove a node with the calicoctl command.
-
-See [Removing a Node resource](#removing-a-node-resource) above.
-
-```bash
-calicoctl delete node
-```
-
-## Removing multiple node resources
-
-To remove several Nodes, a file can be created with several Node resources and
-then be passed to the `calicoctl delete` command with the `-f` flag.
-Below is an example of how to create a file of Nodes and delete them.
-
-1. Create a file with the [Node resources](../reference/resources/node.mdx) that need
- to be removed. For example:
-
- ```yaml
- - apiVersion: projectcalico.org/v3
- kind: Node
- metadata:
- name: node-02
- - apiVersion: projectcalico.org/v3
- kind: Node
- metadata:
- name: node-03
- ```
-
-2. To delete the nodes listed in the file pass it like below.
-
- :::caution
-
- See [Removing a Node resource](#removing-a-node-resource) above.
-
- :::
-
- ```bash
- calicoctl delete -f nodes_to_delete.yaml
- ```
-
diff --git a/calico_versioned_docs/version-3.25/operations/ebpf/enabling-ebpf.mdx b/calico_versioned_docs/version-3.25/operations/ebpf/enabling-ebpf.mdx
deleted file mode 100644
index f65fd76972..0000000000
--- a/calico_versioned_docs/version-3.25/operations/ebpf/enabling-ebpf.mdx
+++ /dev/null
@@ -1,381 +0,0 @@
----
-description: Step-by-step instructions for enabling the eBPF dataplane.
----
-
-# Enable the eBPF dataplane
-
-import EbpfValue from '@site/calico_versioned_docs/version-3.25/_includes/content/_ebpf-value.mdx';
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Enable the eBPF dataplane on an existing cluster.
-
-## Value
-
-
-
-## Concepts
-
-### eBPF
-
-eBPF (or "extended Berkeley Packet Filter"), is a technology that allows safe mini programs to be attached to various low-level hooks in the Linux kernel. eBPF has a wide variety of uses, including networking, security, and tracing. You’ll see a lot of non-networking projects leveraging eBPF, but for {{prodname}} our focus is on networking, and in particular, pushing the networking capabilities of the latest Linux kernels to the limit.
-
-## Before you begin
-
-### Supported
-
-- x86-64
-- ARM64 (community supported, not actively regression tested by the {{prodname}} team)
-
-- Distributions:
-
- - Generic or kubeadm
- - kOps
- - OpenShift
- - EKS
- - AKS with limitations:
- - [AKS with Azure CNI and Calico network policy](../../getting-started/kubernetes/managed-public-cloud/aks.mdx#install-aks-with-calico-for-network-policy) works, but it is not possible to disable kube-proxy resulting in wasted resources and suboptimal performance.
- - [AKS with {{prodname}} networking](../../getting-started/kubernetes/managed-public-cloud/aks.mdx#install-aks-with-calico-networking) is in testing with the eBPF dataplane. This should be a better solution overall but, at time of writing, the testing was not complete.
- - RKE (RKE2 recommended because it supports disabling `kube-proxy`)
-
-- Linux distribution/kernel:
-
- - Ubuntu 20.04.
- - Red Hat v8.2 with Linux kernel v4.18.0-193 or above (Red Hat have backported the required features to that build).
- - Another [supported distribution](../../getting-started/kubernetes/requirements.mdx) with Linux kernel v5.3 or above. Kernel v5.8 or above with CO-RE enabled is recommended for better performance.
-
-- An underlying network fabric that allows VXLAN traffic between hosts. In eBPF mode, VXLAN is used to forward Kubernetes NodePort traffic.
-
-### Not supported
-
-- Other processor architectures.
-
-- Distributions:
-
- - GKE. This is because of an incompatibility with the GKE CNI plugin.
-
-- Clusters with some eBPF nodes and some standard dataplane and/or Windows nodes.
-- IPv6.
-- Floating IPs.
-- SCTP (either for policy or services). This is due to lack of kernel support for the SCTP checksum in BPF.
-- `Log` action in policy rules. This is because the `Log` action maps to the iptables `LOG` action and BPF programs cannot access that log.
-- VLAN-based traffic.
-
-### Performance
-
-For best pod-to-pod performance, we recommend using an underlying network that doesn't require Calico to use an overlay. For example:
-
-- A cluster within a single AWS subnet.
-- A cluster using a compatible cloud provider's CNI (such as the AWS VPC CNI plugin).
-- An on-prem cluster with BGP peering configured.
-
-If you must use an overlay, we recommend that you use VXLAN, not IPIP. VXLAN has much better performance than IPIP in
-eBPF mode due to various kernel optimisations.
-
-## How to
-
-- [Verify that your cluster is ready for eBPF mode](#verify-that-your-cluster-is-ready-for-ebpf-mode)
-- [Configure {{prodname}} to talk directly to the API server](#configure-calico-to-talk-directly-to-the-api-server)
-- [Configure kube-proxy](#configure-kube-proxy)
-- [Enable eBPF mode](#enable-ebpf-mode)
-- [Try out DSR mode](#try-out-dsr-mode)
-- [Reversing the process](#reversing-the-process)
-
-### Verify that your cluster is ready for eBPF mode
-
-This section explains how to make sure your cluster is suitable for eBPF mode.
-
-To check that the kernel on a node is suitable, you can run
-
-```bash
-uname -rv
-```
-
-The output should look like this:
-
-```
-5.4.0-42-generic #46-Ubuntu SMP Fri Jul 10 00:24:02 UTC 2020
-```
-
-In this case the kernel version is v5.4, which is suitable.
-
-On Red Hat-derived distributions, you may see something like this:
-
-```
-4.18.0-193.el8.x86_64 (mockbuild@x86-vm-08.build.eng.bos.redhat.com)
-```
-
-Since the Red Hat kernel is v4.18 with at least build number 193, this kernel is suitable.
-
-### Configure {{prodname}} to talk directly to the API server
-
-In eBPF mode, {{prodname}} implements Kubernetes service networking directly (rather than relying on `kube-proxy`).
-Of course, this makes it highly desirable to disable `kube-proxy` when running in eBPF mode to save resources
-and avoid confusion over which component is handling services.
-
-To be able to disable `kube-proxy`, {{prodname}} needs to communicate to the API server _directly_ rather than
-going through `kube-proxy`. To make _that_ possible, we need to find a persistent, static way to reach the API server.
-The best way to do that varies by Kubernetes distribution:
-
-- If you created a cluster manually (for example by using `kubeadm`) then the right address to use depends on whether you
- opted for a high-availability cluster with multiple API servers or a simple one-node API server.
-
- - If you opted to set up a high availability cluster then you should use the address of the load balancer that you
- used in front of your API servers. As noted in the Kubernetes documentation, a load balancer is required for a
- HA set-up but the precise type of load balancer is not specified.
- - If you opted for a single control plane node then you can use the address of the control plane node itself. However,
- it's important that you use a _stable_ address for that node such as a dedicated DNS record, or a static IP address.
- If you use a dynamic IP address (such as an EC2 private IP) then the address may change when the node is restarted
- causing {{prodname}} to lose connectivity to the API server.
-
-- `kops` typically sets up a load balancer of some sort in front of the API server. You should use
- the FQDN and port of the API load balancer, for example `api.internal.` as the `KUBERNETES_SERVICE_HOST`
- below and 443 as the `KUBERNETES_SERVICE_PORT`.
-- OpenShift requires various DNS records to be created for the cluster; one of these is exactly what we need:
- `api-int..` should point to the API server or to the load balancer in front of the
- API server. Use that (filling in the `` and `` as appropriate for your cluster) for the
- `KUBERNETES_SERVICE_HOST` below. Openshift uses 6443 for the `KUBERNETES_SERVICE_PORT`.
-- For AKS and EKS clusters you should use the FQDN of the API server's load balancer. This can be found with
- ```
- kubectl cluster-info
- ```
- which gives output like the following:
- ```
- Kubernetes master is running at https://60F939227672BC3D5A1B3EC9744B2B21.gr7.us-west-2.eks.amazonaws.com
- ...
- ```
- In this example, you would use `60F939227672BC3D5A1B3EC9744B2B21.gr7.us-west-2.eks.amazonaws.com` for
- `KUBERNETES_SERVICE_HOST` and `443` for `KUBERNETES_SERVICE_PORT` when creating the config map.
-- MKE and Rancher neither allow `kube-proxy` to be disabled nor provide a stable address for the
- API server that is suitable for the next step. The best option on these platforms is to
-
- - Let {{prodname}} connect to the API server as through `kube-proxy` (by skipping the step below to create the
- `kubernetes-services-endpoint` config map).
- - Then, follow the instructions in [Avoiding conflicts with kube-proxy](#avoiding-conflicts-with-kube-proxy) below,
- or connectivity will fail when eBPF mode is enabled.
-
-**The next step depends on whether you installed {{prodname}} using the operator, or a manifest:**
-
-
-
-
-If you installed {{prodname}} using the operator, create the following config map in the `tigera-operator` namespace using the host and port determined above:
-
-```yaml
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: kubernetes-services-endpoint
- namespace: tigera-operator
-data:
- KUBERNETES_SERVICE_HOST: ""
- KUBERNETES_SERVICE_PORT: ""
-```
-
-The operator will pick up the change to the config map automatically and do a rolling update of {{prodname}} to pass on the change. Confirm that pods restart and then reach the `Running` state with the following command:
-
-```
-watch kubectl get pods -n calico-system
-```
-
-If you do not see the pods restart then it's possible that the `ConfigMap` wasn't picked up (sometimes Kubernetes is slow to propagate `ConfigMap`s (see Kubernetes [issue #30189](https://github.com/kubernetes/kubernetes/issues/30189))). You can try restarting the operator.
-
-
-
-
-If you installed {{prodname}} using a manifest, create the following config map in the `kube-system` namespace using the host and port determined above:
-
-```yaml
-kind: ConfigMap
-apiVersion: v1
-metadata:
- name: kubernetes-services-endpoint
- namespace: kube-system
-data:
- KUBERNETES_SERVICE_HOST: ""
- KUBERNETES_SERVICE_PORT: ""
-```
-
-Wait 60s for kubelet to pick up the `ConfigMap` (see Kubernetes [issue #30189](https://github.com/kubernetes/kubernetes/issues/30189)); then, restart the {{prodname}} pods to pick up the change:
-
-```
-kubectl delete pod -n kube-system -l k8s-app=calico-node
-kubectl delete pod -n kube-system -l k8s-app=calico-kube-controllers
-```
-
-And, if using Typha:
-
-```
-kubectl delete pod -n kube-system -l k8s-app=calico-typha
-```
-
-Confirm that pods restart and then reach the `Running` state with the following command:
-
-```
-watch "kubectl get pods -n kube-system | grep calico"
-```
-
-You can verify that the change was picked up by checking the logs of one of the {{nodecontainer}} pods.
-
-```
-kubectl get po -n kube-system -l k8s-app=calico-node
-```
-
-Should show one or more pods:
-
-```
-NAME READY STATUS RESTARTS AGE
-{{noderunning}}-d6znw 1/1 Running 0 48m
-...
-```
-
-Then, to search the logs, choose a pod and run:
-
-```
-kubectl logs -n kube-system | grep KUBERNETES_SERVICE_HOST
-```
-
-You should see the following log, with the correct `KUBERNETES_SERVICE_...` values.
-
-```
-2020-08-26 12:26:29.025 [INFO][7] daemon.go 182: Kubernetes server override env vars. KUBERNETES_SERVICE_HOST="172.16.101.157" KUBERNETES_SERVICE_PORT="6443"
-```
-
-
-
-
-### Configure kube-proxy
-
-In eBPF mode {{prodname}} replaces `kube-proxy` so it wastes resources (and reduces performance) to run both.
-This section explains how to disable `kube-proxy` in some common environments.
-
-#### Clusters that run `kube-proxy` with a `DaemonSet` (such as `kubeadm`)
-
-For a cluster that runs `kube-proxy` in a `DaemonSet` (such as a `kubeadm`-created cluster), you can disable `kube-proxy` reversibly by adding a node selector to `kube-proxy`'s `DaemonSet` that matches no nodes, for example:
-
-```
-kubectl patch ds -n kube-system kube-proxy -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}'
-```
-
-Then, should you want to start `kube-proxy` again, you can simply remove the node selector.
-
-:::note
-
-This approach is not suitable for AKS with Azure CNI since that platform makes use of the Kubernetes add-on manager.
-the change will be reverted by the system. For AKS, you should follow [Avoiding conflicts with kube-proxy](#avoiding-conflicts-with-kube-proxy)
-below.
-
-:::
-
-#### OpenShift
-
-If you are running OpenShift, you can disable `kube-proxy` as follows:
-
-```
-kubectl patch networks.operator.openshift.io cluster --type merge -p '{"spec":{"deployKubeProxy": false}}'
-```
-
-To re-enable it:
-
-```
-kubectl patch networks.operator.openshift.io cluster --type merge -p '{"spec":{"deployKubeProxy": true}}'
-```
-
-### Avoiding conflicts with kube-proxy
-
-If you cannot disable `kube-proxy` (for example, because it is managed by your Kubernetes distribution), then you _must_ change Felix configuration parameter `BPFKubeProxyIptablesCleanupEnabled` to `false`. This can be done with `kubectl` as follows:
-
-```
-kubectl patch felixconfiguration default --patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}'
-```
-
-If both `kube-proxy` and `BPFKubeProxyIptablesCleanupEnabled` is enabled then `kube-proxy` will write its iptables rules and Felix will try to clean them up resulting in iptables flapping between the two.
-
-### Enable eBPF mode
-
-**The next step depends on whether you installed {{prodname}} using the operator, or a manifest:**
-
-
-
-
-To enable eBPF mode, change the `spec.calicoNetwork.linuxDataplane` parameter in the operator's `Installation` resource to `"BPF"`.
-
-```bash
-kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"BPF"}}}'
-```
-
-:::note
-
-the operator rolls out the change with a rolling update which means that some nodes will be in eBPF mode
-before others. This can disrupt the flow of traffic through node ports. We plan to improve this in an upcoming release
-by having the operator do the update in two phases.
-
-:::
-
-
-
-
-If you installed {{prodname}} using a manifest, change Felix configuration parameter `BPFEnabled` to `true`. This can be done with `calicoctl`, as follows:
-
-```
-calicoctl patch felixconfiguration default --patch='{"spec": {"bpfEnabled": true}}'
-```
-
-
-
-
-
-When enabling eBPF mode, preexisting connections continue to use the non-BPF datapath; such connections should
-not be disrupted, but they do not benefit from eBPF mode’s advantages.
-
-### Try out DSR mode
-
-Direct return mode skips a hop through the network for traffic to services (such as node ports) from outside the cluster. This reduces latency and CPU overhead but it requires the underlying network to allow nodes to send traffic with each other's IPs. In AWS, this requires all your nodes to be in the same subnet and for the source/dest check to be disabled.
-
-DSR mode is disabled by default; to enable it, set the `BPFExternalServiceMode` Felix configuration parameter to `"DSR"`. This can be done with `calicoctl`:
-
-```
-calicoctl patch felixconfiguration default --patch='{"spec": {"bpfExternalServiceMode": "DSR"}}'
-```
-
-To switch back to tunneled mode, set the configuration parameter to `"Tunnel"`:
-
-```
-calicoctl patch felixconfiguration default --patch='{"spec": {"bpfExternalServiceMode": "Tunnel"}}'
-```
-
-Switching external traffic mode can disrupt in-progress connections.
-
-### Reversing the process
-
-To revert to standard Linux networking:
-
-1. (Depending on whether you installed Calico with the operator or with a manifest) reverse the changes to the operator's `Installation` or the `FelixConfiguration` resource:
-
-
-
-
- ```bash
- kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"Iptables"}}}'
- ```
-
-
-
-
- ```
- calicoctl patch felixconfiguration default --patch='{"spec": {"bpfEnabled": false}}'
- ```
-
-
-
-
-1. If you disabled `kube-proxy`, re-enable it (for example, by removing the node selector added above).
-
- ```
- kubectl patch ds -n kube-system kube-proxy --type merge -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": null}}}}}'
- ```
-
-1. Since disabling eBPF mode is disruptive to existing connections, monitor existing workloads to make sure they re-establish any connections that were disrupted by the switch.
diff --git a/calico_versioned_docs/version-3.25/operations/ebpf/index.mdx b/calico_versioned_docs/version-3.25/operations/ebpf/index.mdx
deleted file mode 100644
index 4350e69323..0000000000
--- a/calico_versioned_docs/version-3.25/operations/ebpf/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Documentation for eBPF dataplane mode, including how to enable eBPF dataplane mode.
-hide_table_of_contents: true
----
-
-# eBPF dataplane mode
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/operations/ebpf/install.mdx b/calico_versioned_docs/version-3.25/operations/ebpf/install.mdx
deleted file mode 100644
index d6a76aacda..0000000000
--- a/calico_versioned_docs/version-3.25/operations/ebpf/install.mdx
+++ /dev/null
@@ -1,426 +0,0 @@
----
-description: Install Calico in eBPF mode.
----
-
-# Install in eBPF mode
-
-import EbpfValue from '@site/calico_versioned_docs/version-3.25/_includes/content/_ebpf-value.mdx';
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Install the eBPF dataplane during the initial installation of {{prodname}}.
-
-## Value
-
-
-
-## Concepts
-
-### eBPF
-
-eBPF (or "extended Berkeley Packet Filter"), is a technology that allows safe mini programs to be attached to various
-low-level hooks in the Linux kernel. eBPF has a wide variety of uses, including networking, security, and tracing.
-You’ll see a lot of non-networking projects leveraging eBPF, but for {{prodname}} our focus is on networking,
-and in particular, pushing the networking capabilities of the latest Linux kernels to the limit.
-
-## Before you begin
-
-### Supported
-
-- x86-64
-
-- Distributions:
-
- - Generic or kubeadm
- - kOps
- - OpenShift
- - EKS
- - AKS
-
-- Linux distribution/kernel:
-
- - Ubuntu 20.04.
- - Red Hat v8.2 with Linux kernel v4.18.0-193 or above (Red Hat have backported the required features to that build).
- - Another [supported distribution](../../getting-started/kubernetes/requirements.mdx) with Linux kernel v5.3 or above.
-
-- An underlying network fabric that allows VXLAN traffic between hosts. In eBPF mode, VXLAN is used to forward Kubernetes NodePort traffic.
-
-### Not supported
-
-- Other processor architectures.
-
-- Distributions:
-
- - GKE. This is because of an incompatibility with the GKE CNI plugin.
-
- - RKE: eBPF mode cannot be enabled at install time because RKE doesn't provide
- a stable address for the API server. However, by following [these instructions](enabling-ebpf.mdx),
- it can be enabled as a post-install step.
-
-- Clusters with some eBPF nodes and some standard dataplane and/or Windows nodes.
-- IPv6.
-- Host endpoint `doNotTrack` policy (other policy types are supported).
-- Floating IPs.
-- SCTP (either for policy or services).
-- `Log` action in policy rules.
-- Tagged VLAN devices.
-
-### Performance
-
-For best pod-to-pod performance, we recommend using an underlying network that doesn't require Calico to use an overlay. For example:
-
-- A cluster within a single AWS subnet.
-- A cluster using a compatible cloud provider's CNI (such as the AWS VPC CNI plugin).
-- An on-prem cluster with BGP peering configured.
-
-If you must use an overlay, we recommend that you use VXLAN, not IPIP. VXLAN has better performance than IPIP in
-eBPF mode due to various kernel optimisations.
-
-## How to
-
-To install in eBPF mode, we recommend using the Tigera Operator to install {{prodname}} so these instructions
-use the operator. Installing {{prodname}} normally consists of the following stages, which are covered by the
-main installation guides:
-
-- Create a cluster suitable to run {{prodname}}.
-- Install the Tigera Operator (possibly via a Helm chart), and the associated Custom Resource Definitions.
-- Apply a set of Custom Resources to tell the operator what to install.
-- Wait for the operator to provision all the associated resources and report back via its status resource.
-
-To install directly in eBPF is very similar; this guide explains the differences:
-
-- [Create a cluster](#create-a-suitable-cluster) suitable to run {{prodname}} **with the added requirement that the nodes must use a recent
- enough kernel**.
-- [**Create a config map with the "real" address of the API server.**](#create-kubernetes-service-endpoint-config-map) This allows the operator to install {{prodname}}
- with a direct connection to the API server so that it can take over from `kube-proxy`.
-- [Install the Tigera Operator](#install-the-tigera-operator) (possibly via a Helm chart), and the associated Custom Resource Definitions.
-- **[Download and tweak the installation Custom Resource](#tweak-and-apply-installation-custom-resources) to tell the operator to use eBPF mode.**
-- [Apply a set of Custom Resources](#tweak-and-apply-installation-custom-resources) to tell the operator what to install.
-- [Wait for the operator to provision all the associated resources and report back via its status resource](#monitor-the-progress-of-the-installation).
-- [Disable kube-proxy or avoid conflicts.](#disable-kube-proxy-or-avoid-conflicts)
-
-These steps are explained in more detail below.
-
-### Create a suitable cluster
-
-The basic requirement for eBPF mode is to have a recent-enough kernel (see [above](#supported)).
-
-Select the appropriate tab below for distribution-specific instructions:
-
-
-
-
-`kubeadm` supports a number of base OSes; as long as the base OS chosen (such as Ubuntu 20.04) meets the kernel
-requirements, `kubeadm`-provisioned clusters are supported.
-
-Since `kube-proxy` is not required in eBPF mode, you may wish to disable `kube-proxy` at install time. With `kubeadm`
-you can do that by passing the ` --skip-phases=addon/kube-proxy` flag to `kubeadm init`:
-
-```
-kubeadm init --skip-phases=addon/kube-proxy
-```
-
-
-
-
-`kops` supports a number of base OSes; as long as the base OS chosen (such as Ubuntu 20.04 or RHEL 8.2) meets the kernel
-requirements, `kops`-provisioned clusters are supported.
-
-Since `kube-proxy` is not required in eBPF mode, you may wish to disable `kube-proxy` at install time. With `kops` you
-can do that by setting the following in your `kops` configuration:
-
-```yaml
-kubeProxy:
- enabled: false
-```
-
-
-
-
-OpenShift supports a number of base OSes; as long as the base OS chosen has a recent enough kernel, OpenShift clusters are
-fully supported. Since Red Hat have backported the eBPF features required by {{prodname}} the Red Hat kernel
-version required is lower than the mainline: v4.18.0-193 or above.
-
-
-
-
-Azure Kubernetes Service (AKS) supports a number of base OSes. The most recent Ubuntu 18.04 image has a recent enough
-kernel to use with eBPF mode.
-
-AKS does not support disabling `kube-proxy` so it's necessary to tell {{prodname}} not to try to clean up
-`kube-proxy`'s iptables rules at a later stage.
-
-
-
-
-Amazon's Elastic Kubernetes Service (EKS) supports a number of base OSes for nodes. At the time of writing, the
-default kernel used by Amazon Linux is recent enough to run eBPF mode, as is the Bottlerocket kernel. The Ubuntu
-18.04 image did not have a recent-enough kernel (but that may have changed by the time you read this).
-
-
-
-
-### Create kubernetes-service-endpoint config map
-
-In eBPF mode, {{prodname}} takes over from `kube-proxy`. This means that, like `kube-proxy`, it needs to be able
-to reach the API server _directly_ rather than by using the API server's `ClusterIP`. To tell {{prodname}} how
-to reach the API server we create a `ConfigMap` with the API server's "real" address. In this guide we do that before
-installing the Tigera Operator. That means that the operator itself can also use the direct connection and hence
-it doesn't require `kube-proxy` to be running.
-
-The tabs below explain how to find the "real" address of the API server for a range of distributions.
-**Note:** In all cases it's important that the address used is stable even if your API server is restarted or
-scaled up/down. If you have multiple API servers, with DNS or other load balancing in front it's important to use
-the address of the load balancer. This prevents {{prodname}} from being disconnected if the API servers IP changes.
-
-
-
-
-If you created a cluster manually (for example by using `kubeadm`) then the right address to use depends on whether you
-opted for a high-availability cluster with multiple API servers or a simple one-node API server.
-
-- If you opted to set up a high availability cluster then you should use the address of the load balancer that you
- used in front of your API servers. As noted in the Kubernetes documentation, a load balancer is required for a
- HA set-up but the precise type of load balancer is not specified.
-- If you opted for a single control plane node then you can use the address of the control plane node itself. However,
- it's important that you use a _stable_ address for that node such as a dedicated DNS record, or a static IP address.
- If you use a dynamic IP address (such as an EC2 private IP) then the address may change when the node is restarted
- causing {{prodname}} to lose connectivity to the API server.
-
-
-
-
-When using `kops`, `kops` typically sets up a load balancer of some sort in front of the API server. You should use
-the FQDN and port of the API load balancer: `api.internal.`.
-
-
-
-
-OpenShift requires various DNS records to be created for the cluster; one of these is exactly what we need:
-`api..` should point to the API server or to the load balancer in front of the
-API server. Use that (filling in the `` and `` as appropriate for your cluster) for the
-`KUBERNETES_SERVICE_HOST` below. Openshift uses 6443 for the `KUBERNETES_SERVICE_PORT`.
-
-
-
-
-For AKS clusters, you should use the FQDN of your API server. This can be found by running the following command:
-
-```
-kubectl cluster-info
-```
-
-which should give output similar to the following:
-
-```
-Kubernetes master is running at https://mycalicocl-calicodemorg-03a087-36558dbb.hcp.canadaeast.azmk8s.io:443
-```
-
-In this example, you would use `mycalicocl-calicodemorg-03a087-36558dbb.hcp.canadaeast.azmk8s.io` for
-`KUBERNETES_SERVICE_HOST` and `443` for `KUBERNETES_SERVICE_PORT` when creating the config map.
-
-
-
-
-For an EKS cluster, it's important to use the domain name of the EKS-provided load balancer that is in front of the API
-server. This can be found by running the following command:
-
-```
-kubectl cluster-info
-```
-
-which should give output similar to the following:
-
-```
-Kubernetes master is running at https://60F939227672BC3D5A1B3EC9744B2B21.gr7.us-west-2.eks.amazonaws.com
-...
-```
-
-In this example, you would use `60F939227672BC3D5A1B3EC9744B2B21.gr7.us-west-2.eks.amazonaws.com` for
-`KUBERNETES_SERVICE_HOST` and `443` for `KUBERNETES_SERVICE_PORT` when creating the config map.
-
-
-
-
-Create the following config map in the `tigera-operator` namespace using the host and port determined above:
-
-```
-kubectl apply -f - <"
- KUBERNETES_SERVICE_PORT: ""
-EOF
-```
-
-:::tip
-
-If you forget to create the config map before installing the operator you can create it afterwards and
-then wait 60 seconds (for the config map to propagate) before restarting the operator:
-
-```
-kubectl delete pod -n tigera-operator -l k8s-app=tigera-operator
-```
-
-:::
-
-### Install the Tigera Operator
-
-Follow the steps in the main install guide for your platform to install the Tigera Operator (and possibly the
-Prometheus Operator). However, **stop** before applying the `custom-resources.yaml`; we'll customise that file
-to enable eBPF mode in the next step...
-
-### Tweak and apply installation Custom Resources
-
-When the main install guide tells you to apply the `custom-resources.yaml`, typically by running `kubectl create` with
-the URL of the file directly, you should instead download the file, so that you can edit it:
-
-```bash
- curl -o custom-resources.yaml {{manifestsUrl}}/manifests/custom-resources.yaml
-```
-
-Edit the file in your editor of choice and find the `Installation` resource, which should be at the top of the file.
-To enable eBPF mode, we need to add a new `calicoNetwork` section inside the `spec` of the Installation resource,
-including the `linuxDataplane` field. For EKS Bottlerocket OS only, you should also add the `flexVolumePath` setting
-as shown below.
-
- For example:
-
- ```yaml
- # This section includes base {{prodname}} installation configuration.
-
- kind: Installation
- metadata:
- name: default
- spec:
- # Added calicoNetwork section with linuxDataplane field
- calicoNetwork:
- linuxDataplane: BPF
-
- # EKS with Bottlerocket as node image only:
- # flexVolumePath: /var/lib/kubelet/plugins
-
- # ... remainder of the Installation resource varies by platform ...
-
- # Install Calico Open Source
- variant: Calico
-
- # ... remainder of the Installation resource varies by platform ...
-
- # This section configures the Calico API server.
- apiVersion: operator.tigera.io/v1
- kind: APIServer
- metadata:
- name: default
- spec: {}
- ```
-
-Then apply the edited file:
-
-```bash
-kubectl create -f custom-resources.yaml
-```
-
-:::tip
-
-If you already created the custom resources, you can switch your cluster over to eBPF mode by updating the
-installation resource. The operator will automatically roll out the change.
-
-```bash
-kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"BPF", "hostPorts":null}}}'
-```
-
-:::
-
-### Monitor the progress of the installation
-
-You can monitor progress of the installation with the following command:
-
-```bash
-watch kubectl get tigerastatus
-```
-
-### Disable `kube-proxy` (or avoid conflicts)
-
-In eBPF mode, to avoid conflicts with `kube-proxy` it's necessary to either disable `kube-proxy` or to configure
-{{prodname}} not to clean up `kube-proxy`'s iptables rules. If you didn't disable `kube-proxy` when starting
-your cluster then follow the steps below to avoid conflicts:
-
-
-
-
-For a cluster that runs `kube-proxy` in a `DaemonSet` (such as a `kubeadm`-created cluster), you can disable
-`kube-proxy`, reversibly, by adding a node selector to `kube-proxy`'s `DaemonSet` that matches no nodes, for example:
-
-```
-kubectl patch ds -n kube-system kube-proxy -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}'
-```
-
-Then, should you want to start `kube-proxy` again, you can simply remove the node selector.
-
-
-
-
-`kops` allows `kube-proxy` to be disabled by setting
-
-```yaml
-kubeProxy:
- enabled: false
-```
-
-in its configuration. You will need to do `kops update cluster` to roll out the change.
-
-
-
-
-In OpenShift, you can disable `kube-proxy` as follows:
-
-```
-kubectl patch networks.operator.openshift.io cluster --type merge -p '{"spec":{"deployKubeProxy": false}}'
-```
-
-If you need to re-enable it later:
-
-```
-kubectl patch networks.operator.openshift.io cluster --type merge -p '{"spec":{"deployKubeProxy": true}}'
-```
-
-
-
-
-AKS with Azure CNI does not allow `kube-proxy` to be disabled, `kube-proxy` is deployed by the add-on manager, which will reconcile
-away any manual changes made to its configuration. To ensure `kube-proxy` and {{prodname}} don't fight, set
-the Felix configuration parameter `bpfKubeProxyIptablesCleanupEnabled` to false. This can be done with
-`kubectl` as follows:
-
-```
-kubectl patch felixconfiguration default --type merge --patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}'
-```
-
-
-
-
-In EKS, you can disable `kube-proxy`, reversibly, by adding a node selector that doesn't match and nodes to
-`kube-proxy`'s `DaemonSet`, for example:
-
-```
-kubectl patch ds -n kube-system kube-proxy -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}'
-```
-
-Then, should you want to start `kube-proxy` again, you can simply remove the node selector.
-
-
-
-
-## Next steps
-
-**Recommended**
-
-- [Learn more about eBPF](use-cases-ebpf.mdx)
diff --git a/calico_versioned_docs/version-3.25/operations/ebpf/troubleshoot-ebpf.mdx b/calico_versioned_docs/version-3.25/operations/ebpf/troubleshoot-ebpf.mdx
deleted file mode 100644
index c7991eef45..0000000000
--- a/calico_versioned_docs/version-3.25/operations/ebpf/troubleshoot-ebpf.mdx
+++ /dev/null
@@ -1,247 +0,0 @@
----
-description: How to troubleshoot when running in eBPF mode.
----
-
-# Troubleshoot eBPF mode
-
-This document gives some general troubleshooting guidance for the eBPF dataplane.
-
-To understand basic concepts, we recommend the following video by Tigera Engineers: [Opening the Black Box: Understanding and troubleshooting Calico's eBPF Data Plane](https://www.youtube.com/watch?v=Mh43sNBu208).
-
-## Troubleshoot access to services
-
-If pods or hosts within your cluster have trouble accessing services, check the following:
-
-- Either {{prodname}}'s eBPF mode or `kube-proxy` must be active on a host for services to function. If you
- disabled `kube-proxy` when enabling eBPF mode, verify that eBPF mode is actually functioning. If {{prodname}}
- detects that the kernel is not supported, it will fall back to standard dataplane mode (which does not support
- services).
-
- To verify that eBPF mode is correctly enabled, examine the log for a `{{noderunning}}` container; if
- eBPF mode is not supported it will log an `ERROR` log that says
-
- ```bash
- BPF dataplane mode enabled but not supported by the kernel. Disabling BPF mode.
- ```
-
- If BPF mode is correctly enabled, you should see an `INFO` log that says
-
- ```bash
- BPF enabled, starting BPF endpoint manager and map manager.
- ```
-
-- In eBPF mode, external client access to services (typically NodePorts) is implemented using VXLAN encapsulation.
- If NodePorts time out when the backing pod is on another node, check your underlying network fabric allows
- VXLAN traffic between the nodes. VXLAN is a UDP protocol; by default it uses port 4789.
-- In DSR mode, {{prodname}} requires that the underlying network fabric allows one node to respond on behalf of
- another.
-
- - In AWS, to allow this, the Source/Dest check must be disabled on the node's NIC. However, note that DSR only
- works within AWS; it is not compatible with external traffic through a load balancer. This is because the load
- balancer is expecting the traffic to return from the same host.
-
- - In GCP, the "Allow forwarding" option must be enabled. As with AWS, traffic through a load balancer does not
- work correctly with DSR because the load balancer is not consulted on the return path from the backing node.
-
-# The `calico-bpf` tool
-
-Since BPF maps contain binary data, the {{prodname}} team wrote a tool to examine {{prodname}}'s BPF maps.
-The tool is embedded in the {{nodecontainer}} container image. To run the tool:
-
-- Find the name of the {{nodecontainer}} Pod on the host of interest using
-
- ```bash
- kubectl get pod -o wide -n calico-system
- ```
-
- for example, `calico-node-abcdef`
-
-- Run the tool as follows:
-
- ```bash
- kubectl exec -n calico-system calico-node-abcdef -- calico-node -bpf ...
- ```
-
- For example, to show the tool's help:
-
- ```bash
- kubectl exec -n calico-system calico-node-abcdef -- calico-node -bpf help
-
- Usage:
- calico-bpf [command]
-
- Available Commands:
- arp Manipulates arp
- connect-time Manipulates connect-time load balancing programs
- conntrack Manipulates connection tracking
- counters Show and reset counters
- help Help about any command
- ipsets Manipulates ipsets
- nat Manipulates network address translation (nat)
- routes Manipulates routes
- version Prints the version and exits
-
- Flags:
- --config string config file (default is $HOME/.calico-bpf.yaml)
- -h, --help help for calico-bpf
- --log-level string Set log level (default "warn")
- -t, --toggle Help message for toggle
- ```
-
- (Since the tool is embedded in the main `calico-node` binary the `--help` option is not available, but running
- `calico-node -bpf help` does work.)
-
- To dump the BPF conntrack table:
-
- ```
- kubectl exec -n calico-system calico-node-abcdef -- calico-node -bpf conntrack dump
- ...
- ```
-
- Also, it is possible to fetch various counters, like packets dropped by a policy or different errors, from BPF dataplane using the same tool.
- For example, to dump the BPF counters of `eth0` interface:
-
- ```
- kubectl exec -n calico-system calico-node-abcdef -- calico-node -bpf counters dump --iface=eth0
- +----------+--------------------------------+---------+--------+-----+
- | CATEGORY | TYPE | INGRESS | EGRESS | XDP |
- +----------+--------------------------------+---------+--------+-----+
- | Accepted | by another program | 0 | 0 | 0 |
- | | by failsafe | 0 | 2 | 23 |
- | | by policy | 1 | 0 | 0 |
- | Dropped | by policy | 0 | 0 | 0 |
- | | failed decapsulation | 0 | 0 | 0 |
- | | failed encapsulation | 0 | 0 | 0 |
- | | incorrect checksum | 0 | 0 | 0 |
- | | malformed IP packets | 0 | 0 | 0 |
- | | packets with unknown route | 0 | 0 | 0 |
- | | packets with unknown source | 0 | 0 | 0 |
- | | packets with unsupported IP | 0 | 0 | 0 |
- | | options | | | |
- | | too short packets | 0 | 0 | 0 |
- | Total | packets | 27 | 124 | 41 |
- +----------+--------------------------------+---------+--------+-----+
- dumped eth0 counters.
- ```
-
-## Check if a program is dropping packets
-
-To check if an eBPF program is dropping packets, you can use either the `calico-bpf` or `tc` command-line tool. For example, if you
-are worried that the eBPF program attached to `eth0` is dropping packets, you can use `calico-bpf` to fetch BPF counters as described
-in the previous section and look for one of the `Dropped` counters or you can run the following command:
-
-```
-tc -s qdisc show dev eth0
-```
-
-The output should look like the following; find the `clsact` qdisc, which is the attachment point for eBPF programs.
-The `-s` option to `tc` causes `tc` to display the count of dropped packets, which amounts to the count of packets
-dropped by the eBPF programs.
-
-```
-...
-qdisc clsact 0: dev eth0 root refcnt 2
- sent 1340 bytes 10 pkt (dropped 10, overlimits 0 requeues 0)
- backlog 0b 0p requeues 0
-...
-```
-
-## Debug high CPU usage
-
-If you notice `{{noderunning}}` using high CPU:
-
-- Check if `kube-proxy` is still running. If `kube-proxy` is still running, you must either disable `kube-proxy` or
- ensure that the Felix configuration setting `bpfKubeProxyIptablesCleanupEnabled` is set to `false`. If the setting
- is set to `true` (its default), then Felix will attempt to remove `kube-proxy`'s iptables rules. If `kube-proxy` is
- still running, it will fight with `Felix`.
-- If your cluster is very large, or your workload involves significant service churn, you can increase the interval
- at which Felix updates the services dataplane by increasing the `bpfKubeProxyMinSyncPeriod` setting. The default is
- 1 second. Increasing the value has the trade-off that service updates will happen more slowly.
-- {{prodname}} supports endpoint slices, similarly to `kube-proxy`. If your Kubernetes cluster supports endpoint
- slices and they are enabled, then you can enable endpoint slice support in {{prodname}} with the
- `bpfKubeProxyEndpointSlicesEnabled` configuration flag.
-
-## eBPF program debug logs
-
-{{prodname}}'s eBPF programs contain optional detailed debug logging. Although th logs can be very verbose (because
-the programs will log every packet), they can be invaluable to diagnose eBPF program issues. To enable the log, set the
-`bpfLogLevel` Felix configuration setting to `Debug`.
-
-:::caution
-
-Enabling logs in this way has a significant impact on eBPF program performance.
-
-:::
-
-> The logs are emitted to the kernel trace buffer, and they can be examined using the following command:
-
-```
-tc exec bpf debug
-```
-
-Logs have the following format:
-
-```
- <...>-84582 [000] .Ns1 6851.690474: 0: ens192---E: Final result=ALLOW (-1). Program execution time: 7366ns
-```
-
-The parts of the log are explained below:
-
-- `<...>-84582` gives an indication about what program (or kernel process) was handling the
- packet. For packets that are being sent, this is usually the name and PID of the program that is actually sending
- the packet. For packets that are received, it is typically a kernel process, or an unrelated program that happens to
- trigger the processing.
-- `6851.690474` is the log timestamp.
-
-- `ens192---E` is the {{prodname}} log tag. For programs attached to interfaces, the first part contains the
- first few characters of the interface name. The suffix is either `-I` or `-E` indicating "Ingress" or "Egress".
- "Ingress" and "Egress" have the same meaning as for policy:
-
- - A workload ingress program is executed on the path from the host network namespace to the workload.
- - A workload egress program is executed on the workload to host path.
- - A host endpoint ingress program is executed on the path from external node to the host.
- - A host endpoint egress program is executed on the path from host to external host.
-
-- `Final result=ALLOW (-1). Program execution time: 7366ns` is the message. In this case, logging the final result of
- the program. Note that the timestamp is massively distorted by the time spent logging.
-
-## Poor performance
-
-A number of problems can reduce the performance of the eBPF dataplane.
-
-- Verify that you are using the best networking mode for your cluster. If possible, avoid using an overlay network;
- a routed network with no overlay is considerably faster. If you must use one of {{prodname}}'s overlay modes,
- use VXLAN, not IPIP. IPIP performs poorly in eBPF mode due to kernel limitations.
-- If you are not using an overlay, verify that the [Felix configuration parameters](../../reference/felix/configuration.mdx)
- `ipInIpEnabled` and `vxlanEnabled` are set to `false`. Those parameters control whether Felix configured itself to
- allow IPIP or VXLAN, even if you have no IP pools that use an overlay. The parameters also disable certain eBPF
- mode optimisations for compatibility with IPIP and VXLAN.
-
- To examine the configuration:
-
- ```bash
- kubectl get felixconfiguration -o yaml
- ```
-
- ```yaml noValidation
- apiVersion: projectcalico.org/v3
- items:
- - apiVersion: projectcalico.org/v3
- kind: FelixConfiguration
- metadata:
- creationTimestamp: "2020-10-05T13:41:20Z"
- name: default
- resourceVersion: "767873"
- uid: 8df8d751-7449-4b19-a4f9-e33a3d6ccbc0
- spec:
- ...
- ipipEnabled: false
- ...
- vxlanEnabled: false
- kind: FelixConfigurationList
- metadata:
- resourceVersion: "803999"
- ```
-
-- If you are running your cluster in a cloud such as AWS, then your cloud provider may limit the bandwidth between
- nodes in your cluster. For example, most AWS nodes are limited to 5GBit per connection.
diff --git a/calico_versioned_docs/version-3.25/operations/ebpf/use-cases-ebpf.mdx b/calico_versioned_docs/version-3.25/operations/ebpf/use-cases-ebpf.mdx
deleted file mode 100644
index db8385260d..0000000000
--- a/calico_versioned_docs/version-3.25/operations/ebpf/use-cases-ebpf.mdx
+++ /dev/null
@@ -1,95 +0,0 @@
----
-description: Learn when to use eBPF, and when not to.
----
-
-# eBPF use cases
-
-## Big picture
-
-Learn when to use eBPF (and when not to).
-
-## What is eBPF?
-
-eBPF is a feature available in Linux kernels that allows you to run a virtual machine inside the kernel. This virtual machine allows you to safely load programs into the kernel, to customize its operation. Why is this important?
-
-In the past, making changes to the kernel was difficult: there were APIs you could call to get data, but you couldn’t influence what was inside the kernel or execute code. Instead, you had to submit a patch to the Linux community and wait for it to be approved. With eBPF, you can load a program into the kernel and instruct the kernel to execute your program if, for example, a certain packet is seen or another event occurs.
-
-With eBPF, the kernel and its behavior become highly customizable, instead of being fixed. This can be extremely beneficial, when used under the right circumstances.
-
-## {{prodname}} and eBPF
-
-{{prodname}} offers an eBPF data plane as an alternative to our standard Linux dataplane (which is iptables based). While the standard data plane focuses on compatibility by working together with kube-proxy and your own iptables rules, the eBPF data plane focuses on performance, latency, and improving user experience with features that aren’t possible with the standard data plane.
-
-But {{prodname}} doesn’t only support standard Linux and eBPF; it currently supports a total of three data planes, including Windows HNS, and has plans to add support for even more data planes in the near future. {{prodname}} enables you, the user, to decide what works best for what you want to do.
-
-If you enable eBPF within {{prodname}} but have existing iptables flows, we won’t touch them. Because maybe you want to use connect-time load balancing, but leave iptables as is. With {{prodname}}, it’s not an all-or-nothing deal—we allow you to easily load and unload our eBPF data plane to suit your needs, which means you can quickly try it out before making a decision. {{prodname}} offers you the ability to leverage eBPF as needed, as an additional control to build your Kubernetes cluster security.
-
-## Use cases
-
-There are several use cases for eBPF, including traffic control, creating network policy, and connect-time load balancing.
-
-### Traffic control
-
-Without eBPF, packets use the standard Linux networking path on their way to a final destination. If a packet shows up at point A, and you know that the packet needs to go to point B, you can optimize the network path in the Linux kernel by sending it straight to point B. With eBPF, you can leverage additional context to make these changes in the kernel so that packets bypass complex routing and simply arrive at their final destination.
-
-This is especially relevant in a Kubernetes container environment, where you have numerous networks. (In addition to the host network stack, each container has its own mini network stack.) When traffic comes in, it is usually routed to a container stack and must travel a complex path as it makes its way there from the host stack. This routing can be bypassed using eBPF.
-
-### Creating network policy
-
-When creating network policy, there are two instances where eBPF can be used:
-
-- **eXpress Data Path (XDP)** – As a raw packet buffer enters the system, eBPF gives you an efficient way to examine that buffer and make quick decisions about what to do with it.
-
-- **Network policy** – eBPF allows you to efficiently examine a packet and apply network policy, both for pods and hosts.
-
-### Connect-time load balancing
-
-When load balancing service connections in Kubernetes, a port needs to talk to a service and therefore network address translation (NAT) must occur. A packet is sent to a virtual IP, and that virtual IP translates it to the destination IP of the pod backing the service; the pod then responds to the virtual IP and the return packet is translated back to the source.
-
-With eBPF, you can avoid this packet translation by using an eBPF program that you’ve loaded into the kernel and load balancing at the source of the connection. All NAT overhead from service connections is removed because destination network address translation (DNAT) does not need to take place on the packet processing path.
-
-## The price of performance
-
-So is eBPF more efficient than standard Linux iptables? The short answer: it depends.
-
-If you were to micro-benchmark how iptables works when applying network policies with a large number of IP addresses (i.e. ipsets), iptables in many cases is better than eBPF. But if you want to do something in the Linux kernel where you need to alter the packet flow in the kernel, eBPF would be the better choice. Standard Linux iptables is a complex system and certainly has its limitations, but at the same time it provides options to manipulate traffic; if you know how to program iptables rules, you can achieve a lot. eBPF allows you to load your own programs into the kernel to influence behavior that can be customized to your needs, so it is more flexible than iptables as it is not limited to one set of rules.
-
-Something else to consider is that, while eBPF allows you to run a program, add logic, redirect flows, and bypass processing—which is a definite win—it’s a virtual machine and as such must be translated to bytecode. By comparison, the Linux kernel’s iptables is already compiled to code.
-
-As you can see, comparing eBPF to iptables is not a straight apples-to-apples comparison. What we need to assess is performance, and the two key factors to look at here are latency (speed) and expense. If eBPF is very fast but takes up 80% of your resources, then it’s like a Lamborghini—an expensive, fast car. And if that works for you, great (maybe you really like expensive, fast cars). Just keep in mind that more CPU usage means more money spent with your cloud providers. So while a Lamborghini might be faster than a lot of other cars, it might not be the best use of money if you need to comply with speed limits on your daily commute.
-
-## When to use eBPF (and when not to)
-
-With eBPF, you get performance—but it comes at a cost. You need to find a balance between the two by figuring out the price of performance, and deciding if it’s acceptable to you from an eBPF perspective.
-
-Let’s look at some specific cases where it would make sense to use eBPF, and some where it would not.
-
-### When not to use eBPF
-
-### ✘ Packet-by-packet processing
-
-Using eBPF to perform CPU intensive or packet-by-packet processing, such as decryption and re-encryption for encrypted flows, would not be efficient because you would need to build a structure and do a lookup for every packet, which is expensive.
-
-### When to use eBPF
-
-### ✔ XDP
-
-eBPF provides an efficient way to examine raw packet buffers as they enter the system, allowing you to make quick decisions about what to do with them.
-
-### ✔ Connect-time load balancing
-
-With eBPF, you can load balance at the source using a program you’ve loaded into the kernel, instead of using a virtual IP. Since DNAT does not need to take place on the packet processing path, all NAT overhead from service connections is removed.
-
-### ✔ Building a service mesh control plane
-
-Service mesh relies on proxies like Envoy. A lot of thought has gone into designing this process over the years. The main reason for doing it this way is that, in many cases, it is not viable to do inline processing for application protocols like HTTP at the high speeds seen inside a cluster. Therefore, you should think of using eBPF to route traffic to a proxy like Envoy in an efficient way, rather than using it to replace the proxy itself. However, you do need to turn off connect-time load balancing (CTLB) so sidecars can see the service addresses. Given you are already taking a performance hit by the extra hop to the sidecar, not using CTLB performance optimization to avoid NAT overhead is likely not a big deal.
-
-## Summary
-
-Is eBPF a replacement for iptables? Not exactly. It’s hard to imagine everything working as efficiently with eBPF as it does with iptables. For now, the two co-exist and it’s up to the user to weigh the price-performance tradeoff and decide which feature to use when, given their specific needs.
-
-We believe the right solution is to leverage eBPF, along with existing mechanisms in the Linux kernel, to achieve your desired outcome. That’s why {{prodname}} offers support for multiple data planes, including standard Linux, Windows HNS, and Linux eBPF. Since we have established that both eBPF and iptables are useful, the only logical thing to do in our opinion is to support both. {{prodname}} gives you the choice so you can choose the best tool for the job.
-
-## Additional resources
-
-To learn more and see performance metrics from our test environment, see the blog, [Introducing the eBPF dataplane](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/).
diff --git a/calico_versioned_docs/version-3.25/operations/fips.mdx b/calico_versioned_docs/version-3.25/operations/fips.mdx
deleted file mode 100644
index 63d79718f3..0000000000
--- a/calico_versioned_docs/version-3.25/operations/fips.mdx
+++ /dev/null
@@ -1,60 +0,0 @@
----
-description: Run Calico using FIPS validated cryptography.
----
-
-# FIPS mode
-
-## Big picture
-
-Run {{prodname}} in [FIPS 140-2](https://csrc.nist.gov/publications/detail/fips/140/2/final) compliant mode.
-
-## Value
-
-When running in FIPS compliant mode, {{prodname}} uses FIPS-approved cryptographic algorithms and NIST-validated cryptographic modules.
-
-## Concepts
-
-The Federal Information Processing Standards (FIPS) are publicly announced standards developed by the National Institute of Standards and Technology for use in computer systems by government agencies and government contractors. {{prodname}} FIPS mode is enabled during installation by:
-
-- Switching the cryptographic modules for the golang-based applications to use the FIPS-140-2 validated [Tigera Cryptographic Module](https://csrc.nist.gov/projects/cryptographic-module-validation-program/certificate/4095)
-- Configuring TLS servers and other cryptographic functions to use FIPS 140-2 approved cryptographic algorithms
-
-## Before you begin
-
-**Required**
-
-- A Kubernetes distribution and cluster that run in FIPS mode
-- The hosts must run Linux x86_64 distributions
-- {{prodname}} contains programs that run directly on the host that use dynamic linking of c libraries. For this reason, it is a requirement for host systems to contain the following libraries:
- - ld-linux-x86-64.so.2
- - libpthread.so.0
- - libc.so.6
-
-**Unsupported**
-
-- The following features are disabled and are not allowed to be used:
- - Application Layer API
- - BGP password
- - WireGuard
-- Switching FIPS mode off and then on again is not supported because this can break hashes and other cryptographic settings.
-
-## How To
-
-To install {{prodname}} in FIPS mode follow these steps.
-
-Follow [the installation steps](../getting-started/kubernetes/index.mdx) for your platform.
-
- - In the step for installing custom resources, edit `custom-resources.yaml` and enable FIPS mode in the installation spec.
-
- ```yaml
- apiVersion: operator.tigera.io/v1
- kind: Installation
- metadata:
- name: default
- spec:
- fipsMode: Enabled
- ```
-
- - For more information on configuration options available in this manifest, see [the installation reference](../reference/installation/api.mdx).
-
-After you apply the YAML, FIPS mode is fully operational.
diff --git a/calico_versioned_docs/version-3.25/operations/image-options/alternate-registry.mdx b/calico_versioned_docs/version-3.25/operations/image-options/alternate-registry.mdx
deleted file mode 100644
index 0321850ebd..0000000000
--- a/calico_versioned_docs/version-3.25/operations/image-options/alternate-registry.mdx
+++ /dev/null
@@ -1,86 +0,0 @@
----
-description: Configure Calico to pull images from a public or private registry.
----
-
-# Configure use of your image registry
-
-import MaintenanceImageOptionsAlternateRegistry from '../../_includes/components/MaintenanceImageOptionsAlternateRegistry';
-
-## Big picture
-
-Configure {{prodname}} to pull images from a registry (public or private).
-
-## Value
-
-In many deployments, installing {{prodname}} in clusters from third-party private repos is not an option. {{prodname}} offers these public and private registry options, which can be used in any combination:
-
-- **Install from a registry** for use cases like air-gapped clusters, or clusters with bandwidth or security constraints
-- **Install from an image path in a registry** if you have pulled {{prodname}} images to a sub path in your registry
-- [Install images by registry digest](imageset.mdx)
-
-## Concepts
-
-A **container image registry** (often known as a **registry**), is a service where you can push, pull, and store container images. In Kubernetes, a registry is considered _private_ if it is not publicly available.
-
-A **private registry** requires an **image pull secret**. An **image pull secret** provides authentication for an image registry; this allows you to control access to certain images or give access to higher pull rate limits (like with DockerHub).
-
-An **image path** is a directory in a registry that contains images required to install {{prodname}}.
-
-## Before you begin
-
-**Required**
-
-- {{prodname}} is managed by the operator
-- Configure pull access to your registry
-- If you are using a private registry that requires user authentication, ensure that an image pull secret is configured for your registry in the tigera-operator namespace. Set the environment variable, `REGISTRY_PULL_SECRET` to the secret name. For help, see `imagePullSecrets` and `registry` fields, in [Installation resource reference](../../reference/installation/api.mdx).
-
-## How to
-
-The following examples show the path format for public and private registry, `$REGISTRY/`. If you are using an image path, substitute the format: `$REGISTRY/$IMAGE_PATH/`.
-
-### Push {{prodname}} images to your registry
-
-To install images from your registry, you must first pull the images from Tigera's registry, retag them with your own registry, and then push the newly-tagged images to your own registry.
-
-
-
-### Run the operator using images from your registry
-
-Before applying `tigera-operator.yaml`, modify registry references to use your custom registry:
-
-**For OpenShift**
-
-Download all manifests first, then modify the following:
-
-```bash
-sed -ie "s?quay.io?$REGISTRY?g" manifests/02-tigera-operator.yaml
-```
-
-**For all other platforms**
-
-```bash
-sed -ie "s?quay.io?$REGISTRY?g" tigera-operator.yaml
-```
-
-Next, if you are implementing user authentication to access a private registry, add the image pull secret for your `registry` to the secret `tigera-pull-secret`.
-
-```bash
-sed -ie "/serviceAccountName: tigera-operator/a \ imagePullSecrets:\n\ - name: $REGISTRY_PULL_SECRET" tigera-operator.yaml
-```
-
-### Configure the operator to use images
-
-Set the `spec.registry` field of your Installation resource to the name of your custom registry. For example:
-
-```yaml
-apiVersion: operator.tigera.io/v1
-kind: Installation
-metadata:
- name: default
-spec:
- variant: Calico
- imagePullSecrets:
- - name: tigera-pull-secret
- // highlight-next-line
- registry: myregistry.com
-```
diff --git a/calico_versioned_docs/version-3.25/operations/image-options/imageset.mdx b/calico_versioned_docs/version-3.25/operations/image-options/imageset.mdx
deleted file mode 100644
index df60821f52..0000000000
--- a/calico_versioned_docs/version-3.25/operations/image-options/imageset.mdx
+++ /dev/null
@@ -1,244 +0,0 @@
----
-description: Specify the digests for operator to use to deploy images.
----
-
-# Install images by registry digest
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Deploy images by container registry digest for operator installations.
-
-## Value
-
-Some deployments have strict security requirements that require deploying images by immutable digest instead of tags.
-Once released, official {{prodname}} images and tags will not be modified. However using an immutable digest allows specific images to be reviewed
-and verified by security teams.
-
-## Concepts
-
-### Container registry
-
-A container registry provides access to container images referenced by tags or digest.
-
-### Image tag
-
-Versioned container images are typically referenced by a tag which is appended to an image reference. Example: `/:`. Container image tags are typically not expected be changed or updated, but this is not required or enforced by most image registries, meaning it is possible to push new code to the same image tag.
-
-### Image digest
-
-Container images, when added to a container registry, have a unique hash created that can be used to pull a specific version of an image that cannot be changed or updated.
-
-## Before you begin
-
-**Required**
-
-- {{prodname}} managed by the operator
-- Docker client is configured to pull images from the container registries where images are stored
-- Kubernetes permissions to apply an ImageSet manifest to your cluster
-
-## How to
-
-1. [Update the operator deployment with a digest](#update-the-operator-deployment-with-a-digest)
-2. [Create an ImageSet](#create-an-imageset)
-3. [Verify the correct ImageSet is being used](#verify-the-correct-imageset-is-being-used)
-
-**Other tasks**
-
-- [Create new ImageSet when upgrading or downgrading](#create-new-imageset-when-upgrading-or-downgrading)
-
-**Troubleshooting**
-
-- [Why does the Installation resource status not include my ImageSet?](#why-does-the-installation-resource-status-not-include-my-imageset)
-- [How can I tell if there is a problem with my ImageSet?](#how-can-i-tell-if-there-is-a-problem-with-my-imageset)
-
-### Update the operator deployment with a digest
-
-Before applying `tigera-operator.yaml`, modify the operator deployment to use the operator image digest.
-
-Use commands like the following to get the image digest (adjust the image in the commands if you are using a different operator image):
-
-```bash
-docker pull {{tigeraOperator.registry}}/{{tigeraOperator.image}}:{{tigeraOperator.version}}
-docker inspect {{tigeraOperator.registry}}/{{tigeraOperator.image}}:{{tigeraOperator.version}} -f '{{range .RepoDigests}}{{printf "%s\n" .}}{{end}}'
-```
-
-If multiple digests are returned, select the one matching the registry you are using.
-
-Update the tigera-operator deployment:
-
-```bash
-sed -ie "s|\(image: .*/operator\):.*|?\1@|" tigera-operator.yaml
-```
-
-### Create an ImageSet
-
-Create an [ImageSet](../../reference/installation/api.mdx#operator.tigera.io/v1.ImageSet) manifest file named `imageset.yaml` like the following:
-
-```yaml
-apiVersion: operator.tigera.io/v1
-kind: ImageSet
-metadata:
- name: calico-{{releaseTitle}}
-spec:
- images:
- - image: 'calico/apiserver'
- digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
- - image: 'calico/cni'
- digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
- - image: 'calico/kube-controllers'
- digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
- - image: 'calico/node'
- digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
- - image: 'calico/typha'
- digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
- - image: 'calico/pod2daemon-flexvol'
- digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
- - image: 'calico/windows-upgrade'
- digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
- - image: 'tigera/operator'
- digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
- - image: 'tigera/key-cert-provisioner'
- digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
-```
-
-You can create an ImageSet manifest manually or by script.
-
-
-
-
-1. Copy the above example into a file called `imageset.yaml` and edit that file in the steps below.
-1. Set the name for your ImageSet to `calico-` (Example: `calico-{{releaseTitle}}`).
- The version can be obtained by running:
- ```
- docker run {{tigeraOperator.registry}}/{{tigeraOperator.image}}:{{tigeraOperator.version}} --version
- ```
-1. Add the correct digest for each image. If you are using a private registry, ensure you pull the image from the private registry and use the digest associated with the private registry.
-
- 1. If using the default images, get a list of them by running:
-
- ```
- docker run {{tigeraOperator.registry}}/{{tigeraOperator.image}}:{{tigeraOperator.version}} --print-images=list
- ```
-
- :::note
-
- If you are not using the default image registries or paths, you must create your own list of images (and the above command will not apply).
-
- :::
- :::note
-
- The list will contain images for an Enterprise deployment but they do not need to be added to the ImageSet.
-
- :::
-
- 1. Get the needed digests by using the images returned from the above step in the following command:
- ```
- docker pull && docker inspect -f '{{range .RepoDigests}}{{printf "%s\n" .}}{{end}}'
- ```
- 1. Use the digest from the image that matches the repo/image you will use.
- If you are using a private registry or have specified an [imagePath](../../reference/installation/api.mdx#operator.tigera.io/v1.Installation)
- you will still use the "default" `/` in the `image` field, for example if you your node image is coming from
- `example.com/registry/imagepath/node` you will still use `calico/node` in the image field of the ImageSet.
- :::note
-
- For image `quay.io/tigera/operator@sha256:d111db2f94546415a30eff868cb946d47e183faa804bd2e9a758fd9a8a4eaff1` copy everything after `@` and add it as the digest for the `tigera/operator` image.
-
- :::
-
-
-
-
-Copy the following script into a file, make it executable, and run the script. The script creates an `imageset.yaml` file in the directory it was run.
-:::note
-
-This script will only work if using the default registries and image paths.
-
-:::
-
-```
-#!/bin/bash -e
-
-images=(calico/apiserver calico/cni calico/kube-controllers calico/node calico/typha calico/pod2daemon-flexvol calico/windows-upgrade tigera/key-cert-provisioner tigera/operator)
-
-OPERATOR_IMAGE={{tigeraOperator.registry}}/{{tigeraOperator.image}}:{{tigeraOperator.version}}
-echo "Pulling $OPERATOR_IMAGE"
-echo
-docker pull $OPERATOR_IMAGE -q >/dev/null
-versions=$(docker run $OPERATOR_IMAGE --version)
-ver=$(echo -e "$versions" | grep 'Calico:')
-
-imagelist=($(docker run $OPERATOR_IMAGE --print-images=list))
-
-cat > ./imageset.yaml <> ./imageset.yaml
- echo " digest: \"$digest\"" >> ./imageset.yaml
- fi
- done
-done
-```
-
-
-
-
-Apply the created `imageset.yaml` to your cluster.
-
-### Verify the correct ImageSet is being used
-
-1. Check tigerastatus for components that are Degraded with `kubectl get tigerastatus`.
- - If any components show Degraded, [investigate further](#how-can-i-tell-if-there-is-a-problem-with-my-imageset).
-2. When tigerastatus for all components show Available True, the ImageSet has been applied.
- ```
- NAME AVAILABLE PROGRESSING DEGRADED SINCE
- calico True False False 54s
- ```
-3. Verify that the correct ImageSet is being used. In Installation status, check that the `imageset` field is set to the ImageSet you created.
- Check the field by running the following command:
- ```
- kubectl get installation default -o yaml | grep imageSet
- ```
- You should see output similar to:
- ```
- imageSet: calico-{{releaseTitle}}
- ```
-
-## Other tasks
-
-### Create new ImageSet when upgrading or downgrading
-
-Before upgrading to a new release or downgrading, you must create a new [ImageSet](../../reference/installation/api.mdx#operator.tigera.io/v1.ImageSet)
-with updated image references and names for the new release. This must be done prior
-to upgrading the cluster so when the new manifests are applied, the appropriate ImageSet is available.
-
-## Troubleshooting
-
-### Why does the Installation Resource status not include my ImageSet?
-
-The [status.imageset](../../reference/installation/api.mdx#operator.tigera.io/v1.InstallationStatus) field of
-the Installation Resource will not be updated until the `calico` component has fully been deployed. `calico` is
-fully deployed when `kubectl get tigerastatus calico` reports Available True with Progressing and Degraded as False.
-
-### How can I tell if there is a problem with my ImageSet?
-
-If you suspect an issue with your ImageSet, check tigerastatus with `kubectl get tigerastatus`. If any components are
-degraded, you can get additional information with `kubectl get tigerastatus -o yaml`. If the digest
-provided for an image is incorrect or unable to be pulled, the tigerastatus will not directly report that information,
-but you should see information that there is an issue rolling out a Deployment, Daemonset, or Job. If you suspect
-an issue with a resource rollout due to an issue with an image, you will need to `get` or `describe` a specific pod
-to see details about the problem.
diff --git a/calico_versioned_docs/version-3.25/operations/image-options/index.mdx b/calico_versioned_docs/version-3.25/operations/image-options/index.mdx
deleted file mode 100644
index b65e8c2905..0000000000
--- a/calico_versioned_docs/version-3.25/operations/image-options/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Install Calico using from a public or private registry, or by digest.
-hide_table_of_contents: true
----
-
-# Install using an alternate registry
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/operations/index.mdx b/calico_versioned_docs/version-3.25/operations/index.mdx
deleted file mode 100644
index a068ff8c55..0000000000
--- a/calico_versioned_docs/version-3.25/operations/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Post-installation tasks for managing Calico including upgrading and troubleshooting.
-hide_table_of_contents: true
----
-
-# Operations
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/operations/install-apiserver.mdx b/calico_versioned_docs/version-3.25/operations/install-apiserver.mdx
deleted file mode 100644
index b035c7598d..0000000000
--- a/calico_versioned_docs/version-3.25/operations/install-apiserver.mdx
+++ /dev/null
@@ -1,194 +0,0 @@
----
-description: Install the Calico API server on an existing Calico cluster
----
-
-# Enable kubectl to manage Calico APIs
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-[ **Feature status**: GA in Calico v3.20+ ]
-
-Install the Calico API server on an existing cluster to enable management of Calico APIs using kubectl.
-
-## Value
-
-The API server provides a REST API for Calico, and allows management of `projectcalico.org/v3` APIs using kubectl without the need for calicoctl.
-
-:::note
-
-Starting in Calico v3.20.0, new operator-based installations of Calico include the API server component by default, so the instructions
-in this document are not required.
-
-:::
-
-## Before you begin
-
-- Make sure you have a cluster with Calico installed using the Kubernetes API data store. If not, you can [migrate from etcd](datastore-migration.mdx).
-
-- Upgrade to Calico v3.20+ using the appropriate [upgrade instructions](upgrading/index.mdx).
-
-- For non-operator installations, you will need a machine with `openssl` installed.
-
-## Concepts
-
-### calicoctl vs kubectl
-
-In previous releases, calicoctl has been required to manage Calico API resources in the `projectcalico.org/v3` API group. The calicoctl CLI tool provides important validation and defaulting on these APIs. The Calico API server performs
-that defaulting and validation server-side, exposing the same API semantics without a dependency on calicoctl.
-
-calicoctl is still required for the following subcommands:
-
-- [calicoctl node](../reference/calicoctl/node/index.mdx)
-- [calicoctl ipam](../reference/calicoctl/ipam/index.mdx)
-- [calicoctl convert](../reference/calicoctl/convert.mdx)
-- [calicoctl version](../reference/calicoctl/version.mdx)
-
-## How to
-
-### Install the API server
-
-Select the method below based on your installation method.
-
-
-
-
-1. Create an instance of an `operator.tigera.io/APIServer` with the following contents.
-
- ```yaml
- apiVersion: operator.tigera.io/v1
- kind: APIServer
- metadata:
- name: default
- spec: {}
- ```
-
-1. Confirm it appears as `Available` with the following command.
-
- ```
- kubectl get tigerastatus apiserver
- ```
-
- You should see the following output:
-
- ```
- NAME AVAILABLE PROGRESSING DEGRADED SINCE
- apiserver True False False 1m10s
- ```
-
-
-
-
-1. Create the following manifest, which will install the API server as a deployment in the `calico-apiserver` namespace.
-
- ```
- kubectl create -f {{manifestsUrl}}/manifests/apiserver.yaml
- ```
-
- You will notice that the API server remains in a `ContainerCreating` state, as it is waiting for credentials to be provided for authenticating the main Kubernetes API server.
-
-1. Generate a private key and CA bundle using the following openssl command. This certificate will be used by the main API server to authenticate with the Calico API server.
-
- :::note
-
- Please note in the following command `-addext` argument requires openssl 1.1.1 or above. You can check your version of openssl using `openssl version`.
-
- :::
-
- ```
- openssl req -x509 -nodes -newkey rsa:4096 -keyout apiserver.key -out apiserver.crt -days 365 -subj "/" -addext "subjectAltName = DNS:calico-api.calico-apiserver.svc"
- ```
-
-1. Provide the key and certificate to the Calico API server as a Kubernetes secret.
-
- ```
- kubectl create secret -n calico-apiserver generic calico-apiserver-certs --from-file=apiserver.key --from-file=apiserver.crt
- ```
-
-1. Configure the main API server with the CA bundle.
-
- ```
- kubectl patch apiservice v3.projectcalico.org -p \
- "{\"spec\": {\"caBundle\": \"$(kubectl get secret -n calico-apiserver calico-apiserver-certs -o go-template='{{ index .data "apiserver.crt" }}')\"}}"
- ```
-
-
-
-
-After following the above steps, you should see the API server pod become ready, and Calico API resources become available. You can check whether the APIs are available with the following command:
-
-```
-kubectl api-resources | grep '\sprojectcalico.org'
-```
-
-You should see the following output:
-
-```
-bgpconfigurations bgpconfig,bgpconfigs projectcalico.org false BGPConfiguration
-bgppeers projectcalico.org false BGPPeer
-clusterinformations clusterinfo projectcalico.org false ClusterInformation
-felixconfigurations felixconfig,felixconfigs projectcalico.org false FelixConfiguration
-globalnetworkpolicies gnp,cgnp,calicoglobalnetworkpolicies projectcalico.org false GlobalNetworkPolicy
-globalnetworksets projectcalico.org false GlobalNetworkSet
-hostendpoints hep,heps projectcalico.org false HostEndpoint
-ippools projectcalico.org false IPPool
-kubecontrollersconfigurations projectcalico.org false KubeControllersConfiguration
-networkpolicies cnp,caliconetworkpolicy,caliconetworkpolicies projectcalico.org true NetworkPolicy
-networksets netsets projectcalico.org true NetworkSet
-profiles projectcalico.org false Profile
-```
-
-:::note
-
-kubectl may continue to prefer the crd.projectcalico.org API group due to the way it caches APIs locally. You can force kubectl to update
-by removing its cache directory for your cluster. By default, the cache is located in `$(HOME)/.kube/cache`.
-
-:::
-
-### Use kubectl for projectcalico.org APIs
-
-Once the API server has been installed, you can use kubectl to interact with the Calico APIs. For example, you can view and edit IP pools.
-
-```
-kubectl get ippools
-```
-
-You should see output that looks like this:
-
-```
-NAME CREATED AT
-default-ipv4-ippool 2021-03-19T16:47:12Z
-```
-
-### Uninstall the Calico API server
-
-To uninstall the API server, use the following instructions depending on your install method.
-
-
-
-
-```
- kubectl delete apiserver default
-```
-
-
-
-
-```
- kubectl delete -f {{manifestsUrl}}/manifests/apiserver.yaml
-```
-
-
-
-
-Once removed, you will need to use calicoctl to manage projectcalico.org/v3 APIs.
-
-## Next steps
-
-**Recommended tutorials**
-
-- [Secure a simple application using the Kubernetes NetworkPolicy API](../network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx)
-- [Control ingress and egress traffic using the Kubernetes NetworkPolicy API](../network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx)
-- [Run a tutorial that shows blocked and allowed connections in real time](../network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx)
diff --git a/calico_versioned_docs/version-3.25/operations/monitor/index.mdx b/calico_versioned_docs/version-3.25/operations/monitor/index.mdx
deleted file mode 100644
index 486c353c54..0000000000
--- a/calico_versioned_docs/version-3.25/operations/monitor/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Tools for scraping useful metrics
-hide_table_of_contents: true
----
-
-# Monitor
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/operations/monitor/monitor-component-metrics.mdx b/calico_versioned_docs/version-3.25/operations/monitor/monitor-component-metrics.mdx
deleted file mode 100644
index c11b109b4d..0000000000
--- a/calico_versioned_docs/version-3.25/operations/monitor/monitor-component-metrics.mdx
+++ /dev/null
@@ -1,661 +0,0 @@
----
-description: Use open source Prometheus for monitoring and alerting on Calico components.
----
-
-# Monitor Calico component metrics
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-## Big picture
-
-Use Prometheus configured for {{prodname}} components to get valuable metrics about the health of {{prodname}}.
-
-## Value
-
-Using the open-source Prometheus monitoring and alerting toolkit, you can view time-series metrics from {{prodname}} components in the Prometheus or Grafana interfaces.
-
-## Concepts
-
-### About Prometheus
-
-The Prometheus monitoring tool scrapes metrics from instrumented jobs and displays time series data in a visualizer (such as Grafana). For {{prodname}}, the “jobs” that Prometheus can harvest metrics from are the Felix and Typha components.
-
-### About {{prodname}} Felix, Typha, and kube-controllers components
-
-**Felix** is a daemon that runs on every machine that implements network policy. Felix is the brains of {{prodname}}. Typha is an optional set of pods that extends Felix to scale traffic between {{prodname}} nodes and the datastore. The kube-controllers pod runs a set of controllers which are responsible for a variety of control plane functions, such as resource garbage collection and synchronization with the Kubernetes API.
-
-You can configure Felix, Typha, and/or kube-controllers to provide metrics to Prometheus.
-
-## Before you begin...
-
-In this tutorial we assume that you have completed all other introductory tutorials and possess a running Kubernetes cluster with {{prodname}}. You can either use `kubectl` or `calicoctl` to perform the following steps. Depending on which tool you would like to use, make sure you have the necessary prerequisites as shown below.
-
-
-
-
-If you wish to modify {{prodname}} configurations with `kubectl` binary you need to make sure you have the {{prodname}} API server in your cluster. The API server allows you to manage resources within the `projectcalico.org/v3` api group.
-
-:::note
-
-Operator based installs include the API server by default.
-
-:::
-
-For more information about the API server please use [this link](../install-apiserver.mdx).
-
-
-
-
-You can run `calicoctl` on any host with network access to the Calico datastore as either a binary or a container to manage Calico APIs in the `projectcalico.org/v3` API group.
-
-For more information about calicoctl please use [this link](../calicoctl/install.mdx).
-
-
-
-
-## How to
-
-This tutorial will go through the necessary steps to implement basic monitoring of {{prodname}} with Prometheus.
-
-1. Configure {{prodname}} to enable the metrics reporting.
-2. Create the namespace and service account that Prometheus will need.
-3. Deploy and configure Prometheus.
-4. View the metrics in the Prometheus dashboard and create a simple graph.
-
-### 1. Configure {{prodname}} to enable metrics reporting
-
-#### **Felix configuration**
-
-Felix prometheus metrics are **disabled** by default.
-:::note
-
-A comprehensive list of configuration values can be [found at this link](../../reference/felix/configuration.mdx).
-
-:::
-
-Use the following command to enable Felix metrics.
-
-
-
-
-```bash
-kubectl patch felixconfiguration default --type merge --patch '{"spec":{"prometheusMetricsEnabled": true}}'
-```
-
-You should see an output like below:
-
-```
-felixconfiguration.projectcalico.org/default patched
-```
-
-
-
-
-```bash
-calicoctl patch felixconfiguration default --patch '{"spec":{"prometheusMetricsEnabled": true}}'
-```
-
-You should see an output like below:
-
-```
-Successfully patched 1 'FelixConfiguration' resource
-```
-
-
-
-
-#### **Creating a service to expose Felix metrics**
-
-Prometheus uses Kubernetes services to dynamically discover endpoints. Here you will create a service named `felix-metrics-svc` which Prometheus will use to discover all the Felix metrics endpoints.
-
-:::note
-
-Felix by default uses port 9091 TCP to publish its metrics.
-
-:::
-
-
-
-
-```bash
-kubectl apply -f - <
-
-
-```bash
-kubectl apply -f - <
-
-
-#### **Typha Configuration**
-
-
-
-
-An Operator installation of {{prodname}} automatically deploys one or more Typha instances depending on the scale of your cluster. By default metrics for these instances are disabled.
-
-Use the following command to instruct `tigera-operator` to enable Typha metrics.
-
-```bash
-kubectl patch installation default --type=merge -p '{"spec": {"typhaMetricsPort":9093}}'
-```
-
-You should see a result similar to:
-
-```bash
-installation.operator.tigera.io/default patched
-```
-
-
-
-
-:::note
-
-Typha implementation is optional, if you don't have Typha in your cluster you can skip [Typha configuration](#typha-configuration) section.
-
-:::
-
-If you are uncertain whether you have `Typha` in your cluster execute the following code:
-
-```bash
-kubectl get pods -A | grep typha
-```
-
-If your result is similar to what is shown below you are using Typha in your cluster.
-
-:::note
-
-The name suffix of pods shown below was dynamically generated. Your typha instance might have a different suffix.
-
-:::
-
-```
-kube-system calico-typha-56fccfcdc4-z27xj 1/1 Running 0 28h
-kube-system calico-typha-horizontal-autoscaler-74f77cd87c-6hx27 1/1 Running 0 28h
-```
-
-You can enable Typha metrics to be consumed by Prometheus via [two ways](../../reference/typha/configuration.mdx).
-
-
-
-
-#### **Creating a service to expose Typha metrics**
-
-:::note
-
-Typha uses **port 9091** TCP by default to publish its metrics. However, if {{prodname}} is installed using [Amazon yaml file](https://github.com/aws/amazon-vpc-cni-k8s/blob/b001dc6a8fff52926ed9a93ee6c4104f02d365ab/config/v1.5/calico.yaml#L535-L536) this port will be 9093 as its set manually via **TYPHA_PROMETHEUSMETRICSPORT** environment variable.
-
-:::
-
-
-
-
-```bash
-kubectl apply -f - <
-
-
-```bash
-kubectl apply -f - <
-
-
-#### **kube-controllers configuration**
-
-Prometheus metrics are **enabled** by default on [TCP port 9094](../../reference/resources/kubecontrollersconfig.mdx) for `calico-kube-controllers`.
-
-
-
-
-The operator automatically creates a service that exposes these metrics.
-
-You can use the following command to verify it.
-
-```bash
-kubectl get svc -n calico-system
-```
-
-You should see a result similar to:
-
-```bash
-calico-kube-controllers-metrics ClusterIP 10.43.77.57 9094/TCP 39d
-```
-
-
-
-
-#### **Creating a service to expose kube-controllers metrics**
-
-Create a service to expose `calico-kube-controllers` metrics to Prometheus.
-
-```bash
-kubectl apply -f - <
-
-
-**Optionally**, you can use the following command to modify the port by changing the `KubeControllersConfiguration` resource if desired.
-
-:::note
-
-Setting this value to zero will disable metrics in the kube-controllers pod.
-
-:::
-
-
-
-
-```bash
-kubectl patch kubecontrollersconfiguration default --type=merge --patch '{"spec":{"prometheusMetricsPort": 9095}}'
-```
-
-
-
-
-```bash
-calicoctl patch kubecontrollersconfiguration default --patch '{"spec":{"prometheusMetricsPort": 9095}}'
-```
-
-
-
-
-### 2. Cluster preparation
-
-#### **Namespace creation**
-
-`Namespace` isolates resources in your cluster. Here you will create a Namespace called `calico-monitoring` to hold your monitoring resources.
-:::note
-
-Kubernetes namespaces guide can be [found at this link](https://kubernetes.io/docs/tasks/administer-cluster/namespaces/).
-
-:::
-
-```bash
-kubectl create -f -<
-
-
-```bash
-kubectl apply -f - <
-
-
-```bash
-kubectl apply -f - <
-
-
-#### **Create Prometheus pod**
-
-Now that you have a `serviceaccount` with permissions to gather metrics and have a valid config file for your Prometheus, it's time to create the Prometheus pod.
-
-```bash
-kubectl apply -f - <
-
-
-```bash
-kubectl delete service felix-metrics-svc -n calico-system
-kubectl delete service typha-metrics-svc -n calico-system
-```
-
-
-
-
-```
-kubectl delete service felix-metrics-svc -n kube-system
-kubectl delete service typha-metrics-svc -n kube-system
-kubectl delete service kube-controllers-metrics-svc -n kube-system
-```
-
-
-
-
-Return {{prodname}} configurations to their default state.
-
-
-
-
-```bash
-kubectl patch felixConfiguration default --type merge --patch '{"spec":{"prometheusMetricsEnabled": false}}'
-kubectl patch installation default --type=json -p '[{"op": "remove", "path":"/spec/typhaMetricsPort"}]'
-```
-
-
-
-
-```bash
-calicoctl patch felixConfiguration default --patch '{"spec":{"prometheusMetricsEnabled": false}}'
-```
-
-
-
-
-Finally, remove the namespace and RBAC permissions.
-
-```bash
-kubectl delete namespace calico-monitoring
-kubectl delete ClusterRole calico-prometheus-user
-kubectl delete clusterrolebinding calico-prometheus-user
-```
-
-## Best practices
-
-If you enable {{prodname}} metrics to Prometheus, a best practice is to use network policy to limit access to the {{prodname}} metrics endpoints. For details, see [Secure {{prodname}} Prometheus endpoints](../../network-policy/comms/secure-metrics.mdx).
-
-If you are not using Prometheus metrics, we recommend disabling the Prometheus ports entirely for more security.
-
-## Next Steps
-
-[Visualizing metrics via Grafana.](monitor-component-visual.mdx)
diff --git a/calico_versioned_docs/version-3.25/operations/monitor/monitor-component-visual.mdx b/calico_versioned_docs/version-3.25/operations/monitor/monitor-component-visual.mdx
deleted file mode 100644
index 4484417523..0000000000
--- a/calico_versioned_docs/version-3.25/operations/monitor/monitor-component-visual.mdx
+++ /dev/null
@@ -1,199 +0,0 @@
----
-description: Use open source Grafana for visualizing Calico components.
----
-
-# Visualizing metrics via Grafana
-
-## Big picture
-
-Use Grafana dashboard to view {{prodname}} component metrics.
-
-## Value
-
-Using Grafana can be beneficial by providing a means to visualize metrics through graphs that can help you quickly identify unusual activity. The following image shows some of the graphs and metrics that are available for you to leverage to achieve this goal.
-
-![](/img/calico/grafana-dashboard.png)
-
-## Concepts
-
-### About Grafana
-
-Grafana is an open source visualization and analytics tool that allows you to query, visualize, alert on, and explore metrics from a variety of data source, including Calico component metrics stored in Prometheus.
-
-### About Prometheus
-
-Prometheus is an open source monitoring tool that scrapes metrics from instrumented components and stores them as time series data which can then be visualized using tools such as Grafana.
-
-## Before you begin...
-
-In this tutorial we assume you have
-
-- a running Kubernetes cluster with {{prodname}}, calicoctl and kubectl installed
-- completed all steps in the [monitor component metrics](monitor-component-metrics.mdx) guide to set up Prometheus to gather {{prodname}} component metrics.
-
-## How to
-
-This tutorial will go through the necessary steps to create {{prodname}} metrics dashboards with Grafana.
-
-### Preparing Prometheus
-
-Here you will create a service to make your prometheus visible to Grafana.
-
-```bash
-kubectl apply -f - < 27h v1.18.0
-ip-10-0-0-12 Ready 27h v1.18.0
-
-```
-
-### Verify calico-node pods are running on every node, and are in a healthy state
-
-```bash
-kubectl get pods -n calico-system -o wide
-```
-
-```
-NAME READY STATUS RESTARTS AGE IP NODE
-calico-node-77zgj 1/1 Running 0 27h 10.0.0.10 ip-10-0-0-10
-calico-node-nz8k2 1/1 Running 0 27h 10.0.0.11 ip-10-0-0-11
-calico-node-7trv7 1/1 Running 0 27h 10.0.0.12 ip-10-0-0-12
-```
-
-### Exec into pod for further troubleshooting
-
-```bash
-kubectl run multitool --image=praqma/network-multitool
-
-kubectl exec -it multitool -- bash
-```
-
-```
-bash-5.0 ping 8.8.8.8
-PING 8.8.8.8 (8.8.8.8) 56(84) bytes of data.
-64 bytes from 8.8.8.8: icmp_seq=1 ttl=97 time=6.61 ms
-64 bytes from 8.8.8.8: icmp_seq=2 ttl=97 time=6.64 ms
-```
-
-### Collect {{prodname}} diagnostic logs
-
-```bash
-sudo calicoctl node diags
-```
-
-```
-Collecting diagnostics
-Using temp dir: /tmp/calico194224816
-Dumping netstat
-Dumping routes (IPv4)
-Dumping routes (IPv6)
-Dumping interface info (IPv4)
-Dumping interface info (IPv6)
-Dumping iptables (IPv4)
-Dumping iptables (IPv6)
-
-Diags saved to /tmp/calico194224816/diags-20201127_010117.tar.gz
-```
-
-## Kubernetes
-
-### Verify all pods are running
-
-```bash
-kubectl get pods -A
-```
-
-```
-kube-system coredns-66bff467f8-dxbtl 1/1 Running 0 27h
-kube-system coredns-66bff467f8-n95vq 1/1 Running 0 27h
-kube-system etcd-ip-10-0-0-10 1/1 Running 0 27h
-kube-system kube-apiserver-ip-10-0-0-10 1/1 Running 0 27h
-```
-
-### Verify Kubernetes API server is running
-
-```bash
-kubectl cluster-info
-```
-
-```
-Kubernetes master is running at https://10.0.0.10:6443
-KubeDNS is running at https://10.0.0.10:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
-ubuntu@master:~$ kubectl get svc
-NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
-kubernetes ClusterIP 10.49.0.1 443/TCP 2d2h
-```
-
-### Verify Kubernetes kube-dns is working
-
-```bash
-kubectl get svc
-```
-
-```
-NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
-kubernetes ClusterIP 10.49.0.1 443/TCP 2d2h
-```
-
-```bash
-kubectl exec -it multitool bash
-```
-
-```
-bash-5.0 curl -I -k https://kubernetes
-HTTP/2 403
-cache-control: no-cache, private
-content-type: application/json
-x-content-type-options: nosniff
-content-length: 234
-```
-
-```bash
-bash-5.0 nslookup google.com
-```
-
-```
-Server: 10.49.0.10
-Address: 10.49.0.10#53
-Non-authoritative answer:
-Name: google.com
-Address: 172.217.14.238
-Name: google.com
-Address: 2607:f8b0:400a:804::200e
-```
-
-### Verify that kubelet is running on the node with the correct flags
-
-```bash
-systemctl status kubelet
-```
-
-If there is a problem, check the journal
-
-```bash
-journalctl -u kubelet | head
-```
-
-### Check the status of other system pods
-
-Look especially at coredns; if they are not getting an IP, something is wrong with the CNI
-
-```bash
-kubectl get pod -n kube-system -o wide
-```
-
-But if other pods fail, it is likely a different issue. Perform normal Kubernetes troubleshooting. For example:
-
-```bash
-kubectl describe pod kube-scheduler-ip-10-0-1-20.eu-west-1.compute.internal -n kube-system | tail -15
-```
-
-## Calico components
-
-### View Calico CNI configuration on a node
-
-```bash
-cat /etc/cni/net.d/10-calico.conflist
-```
-
-### Verify calicoctl matches cluster
-
-The cluster version and type must match the calicoctl version.
-
-```bash
-calicoctl version
-```
-
-For syntax:
-
-```bash
-calicoctl version -help
-```
-
-### Check tigera operator status
-
-```bash
-kubectl get tigerastatus
-```
-
-```
-NAME AVAILABLE PROGRESSING DEGRADED SINCE
-calico True False False 27h
-```
-
-### Check if operator pod is running
-
-```bash
-kubectl get pod -n tigera-operator
-```
-
-### View calico nodes
-
-```bash
-kubectl get pod -n calico-system -o wide
-```
-
-### View {{prodname}} installation parameters
-
-```bash
-kubectl get installation -o yaml
-```
-
-```yaml
-apiVersion: v1
-items:
-- apiVersion: operator.tigera.io/v1
- kind: Installation
- metadata:
- - apiVersion: operator.tigera.io/v1
- spec:
- calicoNetwork:
- bgp: Enabled
- hostPorts: Enabled
- ipPools:
- - blockSize: 26
- cidr: 10.48.0.0/16
- encapsulation: VXLANCrossSubnet
- natOutgoing: Enabled
- nodeSelector: all()
- multiInterfaceMode: None
- nodeAddressAutodetectionV4:
- firstFound: true
- cni:
- ipam:
- type: Calico
- type: Calico
-```
-
-### Run commands across multiple nodes
-
-```bash
-export THE_COMMAND_TO_RUN=date && for calinode in `kubectl get pod -o wide -n calico-system | grep calico-node | awk '{print $1}'`; do echo $calinode; echo "-----"; kubectl exec -n calico-system $calinode -- $THE_COMMAND_TO_RUN; printf "\n"; done
-```
-
-```bash
-calico-node-87lpx
------
-Defaulted container "calico-node" out of: calico-node, flexvol-driver (init), install-cni (init)
-Thu Apr 28 13:48:06 UTC 2022
-
-calico-node-x5fmm
------
-Defaulted container "calico-node" out of: calico-node, flexvol-driver (init), install-cni (init)
-Thu Apr 28 13:48:07 UTC 2022
-
-```
-
-### View pod info
-
-```bash
-kubectl describe pods `` -n ` `
-```
-
-```bash
-kubectl describe pods busybox -n default
-```
-
-```
-Events:
- Type Reason Age From Message
- ---- ------ ---- ---- -------
- Normal Scheduled 21s default-scheduler Successfully assigned default/busybox to ip-10-0-0-11
- Normal Pulling 20s kubelet, ip-10-0-0-11 Pulling image "busybox"
- Normal Pulled 19s kubelet, ip-10-0-0-11 Successfully pulled image "busybox"
- Normal Created 19s kubelet, ip-10-0-0-11 Created container busybox
- Normal Started 18s kubelet, ip-10-0-0-11 Started container busybox
-```
-
-### View logs of a pod
-
-```bash
-kubectl logs `` -n ``
-```
-
-```bash
-kubectl logs busybox -n default
-```
-
-### View kubelet logs
-
-```bash
-journalctl -u kubelet
-```
-
-## Routing
-
-### Verify routing table on the node
-
-```bash
-ip route
-```
-
-```
-default via 10.0.0.1 dev eth0 proto dhcp src 10.0.0.10 metric 100
-10.0.0.0/24 dev eth0 proto kernel scope link src 10.0.0.10
-10.0.0.1 dev eth0 proto dhcp scope link src 10.0.0.10 metric 100
-10.48.66.128/26 via 10.0.0.12 dev eth0 proto 80 onlink
-10.48.231.0/26 via 10.0.0.11 dev eth0 proto 80 onlink
-172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown
-```
-
-### Verify BGP peer status
-
-```bash
-sudo calicoctl node status
-```
-
-```
-Calico process is running.
-
-IPv4 BGP status
-+--------------+-------------------+-------+------------+-------------+
-| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
-+--------------+-------------------+-------+------------+-------------+
-| 10.0.0.12 | node-to-node mesh | up | 2020-11-25 | Established |
-| 10.0.0.11 | node-to-node mesh | up | 2020-11-25 | Established |
-+--------------+-------------------+-------+------------+-------------+
-```
-
-### Verify overlay configuration
-
-```bash
-kubectl get ippools default-ipv4-ippool -o yaml
-```
-
-```yaml
-
----
-spec:
- ipipMode: Always
- vxlanMode: Never
-```
-
-### Verify bgp learned routes
-
-```bash
-ip r | grep bird
-```
-
-```
-192.168.66.128/26 via 10.0.0.12 dev tunl0 proto bird onlink
-192.168.180.192/26 via 10.0.0.10 dev tunl0 proto bird onlink
-blackhole 192.168.231.0/26 proto bird
-```
-
-### Verify BIRD routing table
-
-**Note**: The BIRD routing table gets pushed to node routing tables.
-
-```bash
-kubectl exec -it -n calico-system calico-node-8cfc8 -- /bin/bash
-```
-
-```
-[root@ip-10-0-0-11 /] birdcl
-BIRD v0.3.3+birdv1.6.8 ready.
-bird> show route
-0.0.0.0/0 via 10.0.0.1 on eth0 [kernel1 18:13:33] * (10)
-10.0.0.0/24 dev eth0 [direct1 18:13:32] * (240)
-10.0.0.1/32 dev eth0 [kernel1 18:13:33] * (10)
-10.48.231.2/32 dev calieb874a8ef0b [kernel1 18:13:41] * (10)
-10.48.231.1/32 dev caliaeaa173109d [kernel1 18:13:35] * (10)
-10.48.231.0/26 blackhole [static1 18:13:32] * (200)
-10.48.231.0/32 dev vxlan.calico [direct1 18:13:32] * (240)
-10.48.180.192/26 via 10.0.0.10 on eth0 [Mesh_10_0_0_10 18:13:34] * (100/0) [i]
- via 10.0.0.10 on eth0 [Mesh_10_0_0_12 18:13:41 from 10.0.0.12] (100/0) [i]
- via 10.0.0.10 on eth0 [kernel1 18:13:33] (10)
-10.48.66.128/26 via 10.0.0.12 on eth0 [Mesh_10_0_0_10 18:13:36 from 10.0.0.10] * (100/0) [i]
- via 10.0.0.12 on eth0 [Mesh_10_0_0_12 18:13:41] (100/0) [i]
- via 10.0.0.12 on eth0 [kernel1 18:13:36] (10)
-```
-
-### Capture traffic
-
-For example,
-
-```bash
-sudo tcpdump -i calicofac0017c3 icmp
-```
-
-## Network policy
-
-### Verify existing Kubernetes network policies
-
-```bash
-kubectl get networkpolicy --all-namespaces
-```
-
-```
-NAMESPACE NAME POD-SELECTOR AGE
-client allow-ui 20m
-client default-deny 4h51m
-stars allow-ui 20m
-stars backend-policy role=backend 20m
-stars default-deny 4h51m
-```
-
-### Verify existing {{prodname}} network policies
-
-```bash
-calicoctl get networkpolicy --all-namespaces -o wide
-```
-
-```
-NAMESPACE NAME ORDER SELECTOR
-calico-demo allow-busybox 50 app == 'porter'
-client knp.default.allow-ui 1000 projectcalico.org/orchestrator == 'k8s'
-client knp.default.default-deny 1000 projectcalico.org/orchestrator == 'k8s'
-stars knp.default.allow-ui 1000 projectcalico.org/orchestrator == 'k8s'
-stars knp.default.backend-policy 1000 projectcalico.org/orchestrator == 'k8s'
-stars knp.default.default-deny 1000 projectcalico.org/orchestrator == 'k8s'
-```
-
-### Verify existing {{prodname}} global network policies
-
-```bash
-calicoctl get globalnetworkpolicy -o wide
-```
-
-```
-NAME ORDER SELECTOR
-default-app-policy 100
-egress-lockdown 600
-default-node-policy 100 has(kubernetes.io/hostname)
-nodeport-policy 100 has(kubernetes.io/hostname)
-```
-
-### Check policy selectors and order
-
-For example,
-
-```bash
-calicoctl get np -n yaobank -o wide
-```
-
-If the selectors should match, check the endpoint IP and the node where it is running. For example,
-
-```bash
-kubectl get pod -l app=customer -n yaobank
-```
diff --git a/calico_versioned_docs/version-3.25/operations/troubleshoot/component-logs.mdx b/calico_versioned_docs/version-3.25/operations/troubleshoot/component-logs.mdx
deleted file mode 100644
index be96f90ab4..0000000000
--- a/calico_versioned_docs/version-3.25/operations/troubleshoot/component-logs.mdx
+++ /dev/null
@@ -1,121 +0,0 @@
----
-description: Where to find component logs.
----
-
-# Component logs
-
-## Big picture
-
-View and collect {{prodname}} logs.
-
-## Value
-
-It is useful to view logs to monitor component health and diagnose potential issues.
-
-## Concepts
-
-### {{nodecontainer}} logs
-
-The {{nodecontainer}} logs contain log output from the following subcomponents:
-
-- Per-node startup logic
-- BGP agent
-- Felix policy agent
-
-Components log either to disk within `/var/log/calico`, to stdout, or both.
-
-For components that log to disk, files are automatically rotated, and by default 10 files of 1MB each are kept. The current log file is called `current` and rotated files have @ followed by a timestamp detailing when the files was rotated in [tai64n](http://cr.yp.to/libtai/tai64.html#tai64n) format.
-
-## How to
-
-## View logs for a {{nodecontainer}} instance
-
-You can view logs for a node using the `kubectl logs` command. This will show logs for all subcomponents of the given node.
-
-For example:
-
-```
-kubectl logs -n calico-system calico-node-xxxx
-```
-
-## View logs from the CNI plugin
-
-CNI plugin logs are not available through kubectl and are instead logged both to the host machine's disk as well as stderr.
-
-By default, these logs can be found at `/var/log/calico/cni/` on the host machine.
-
-The container runtime may also display the CNI plugin logs within its own log output.
-
-## Configure BGP agent log level
-
-BGP log level is configured via the [BGPConfiguration](../../reference/resources/bgpconfig.mdx) API, and can be one of the following values:
-
-- `Debug`: enables "debug all" logging for BIRD. The most verbose logging level.
-- `Info`: enables logging for protocol state changes. This is the default log level.
-- `Warning`: disables BIRD logging, emits warning level configuration logs only.
-- `Error`: disables BIRD logging, emits error level configuration logs only.
-- `Fatal`: disables BIRD logging, emits fatal level configuration logs only.
-
-To modify the BGP log level:
-
-1. Get the current bgpconfig settings.
-
- ```bash
- kubectl get bgpconfig -o yaml > bgp.yaml
- ```
-
-1. Modify logSeverityScreen to the desired value.
-
- ```bash
- vim bgp.yaml
- ```
-
- :::tip
-
- For a global change set the name to "default".
- For a node-specific change set the name to the node name prefixed with "node.", e.g., "node.node-1".
-
- :::
-
-1. Replace the current bgpconfig settings.
-
- ```bash
- kubectl replace -f bgp.yaml
- ```
-
-## Configure Felix log level
-
-Felix log level is configured via the [FelixConfiguration](../../reference/resources/felixconfig.mdx) API, and can be one of the following values:
-
-- `Debug`: The most verbose logging level - for development and debugging.
-- `Info`: The default log level. Shows important state changes.
-- `Warning`: Shows warnings only.
-- `Error`: Shows errors only.
-- `Fatal`: Shows fatal errors only.
-
-To modify Felix's log level:
-
-1. Get the current felixconfig settings.
-
- ```bash
- kubectl get felixconfig -o yaml > felixconfig.yaml
- ```
-
-1. Modify logSeverityScreen to desired value.
-
- ```bash
- vim felixconfig.yaml
- ```
-
- :::tip
-
- For a global change set the name to "default".
- For a node-specific change set the name to the node name, e.g., "{{prodname}}-Node-1".
-
- :::
-
-1. Replace the current felixconfig settings.
-
- ```bash
- kubectl replace -f felixconfig.yaml
- ```
diff --git a/calico_versioned_docs/version-3.25/operations/troubleshoot/index.mdx b/calico_versioned_docs/version-3.25/operations/troubleshoot/index.mdx
deleted file mode 100644
index fb1ae8c95e..0000000000
--- a/calico_versioned_docs/version-3.25/operations/troubleshoot/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Troubleshooting, logs, and diagnostics.
-hide_table_of_contents: true
----
-
-# Troubleshoot
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/operations/troubleshoot/troubleshooting.mdx b/calico_versioned_docs/version-3.25/operations/troubleshoot/troubleshooting.mdx
deleted file mode 100644
index bdc9c39295..0000000000
--- a/calico_versioned_docs/version-3.25/operations/troubleshoot/troubleshooting.mdx
+++ /dev/null
@@ -1,101 +0,0 @@
----
-description: View logs and diagnostics, common issues, and where to report issues in github.
----
-
-# Troubleshooting and diagnostics
-
-## Logs and diagnostics
-
-To collect diagnostics use the `calicoctl` command line tool using superuser privileges. For example:
-
-```bash
-sudo calicoctl node diags
-```
-
-To view logs, use the following command:
-
-`kubectl logs -n calico-system `
-
-To view debug logs on some Calico components, set the `LogSeverityScreen` through the associated environment variable.
-
-To report a problem, please [open an issue in GitHub](https://github.com/projectcalico/calico/issues).
-
-### Check BGP peer status
-
-If you have connectivity between containers on the same host, and between
-containers and the Internet, but not between containers on different hosts, it
-probably indicates a problem in your BGP configuration.
-
-Look at `calicoctl node status` on each host. It should include output like this:
-
-```
-Calico process is running.
-
-IPv4 BGP status
-+--------------+-------------------+-------+----------+-------------+
-| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
-+--------------+-------------------+-------+----------+-------------+
-| 172.17.8.102 | node-to-node mesh | up | 23:30:04 | Established |
-+--------------+-------------------+-------+----------+-------------+
-
-IPv6 BGP status
-No IPv6 peers found.
-```
-
-Alternatively, you can create a [`CalicoNodeStatus` resource](../../reference/resources/caliconodestatus.mdx) to get BGP session status for the node.
-
-If you do not see this, please check the following.
-
-- Make sure there is IP connectivity between your hosts.
-
-- Make sure your network allows the requisite BGP traffic on TCP port 179.
-
-### Configure NetworkManager
-
-Configure [NetworkManager](https://help.ubuntu.com/community/NetworkManager) before
-attempting to use {{prodname}} networking.
-
-NetworkManager manipulates the routing table for interfaces in the default network
-namespace where {{prodname}} veth pairs are anchored for connections to containers.
-This can interfere with the {{prodname}} agent's ability to route correctly.
-
-Create the following configuration file at `/etc/NetworkManager/conf.d/calico.conf` to prevent
-NetworkManager from interfering with the interfaces:
-
-```conf
-[keyfile]
-unmanaged-devices=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico;interface-name:vxlan-v6.calico;interface-name:wireguard.cali;interface-name:wg-v6.cali
-```
-
-## Errors when running sudo calicoctl
-
-If you use `sudo` for commands, remember that your environment variables are not transferred to the `sudo` environment. You must run `sudo` with the `-E` flag to include your environment variables:
-
-```bash
-sudo -E calicoctl node diags
-```
-
-or you can set environment variables for `sudo` commands like this:
-
-```bash
-sudo ETCD_ENDPOINTS=http://172.25.0.1:2379 calicoctl node run
-```
-
-Also be aware that connection information can be specified as a config file rather than using environment variables. See [Installing calicoctl](../calicoctl/install.mdx)
-for details.
-
-## Error: {{nodecontainer}} is not ready: BIRD is not ready: BGP not established with 10.0.0.1
-
-In most cases, this "unready" status error in Kubernetes means that a particular peer is unreachable in the cluster. Check that BGP connectivity between the two peers is allowed in the environment.
-
-This error can also occur if inactive Node resources are configured for node-to-node mesh. To fix this, [decommission the stale nodes](../decommissioning-a-node.mdx).
-
-## Linux conntrack table is out of space
-
-A common problem on Linux systems is running out of space in the conntrack table, which can cause poor iptables performance. This can
-happen if you run a lot of workloads on a given host, or if your workloads create a lot of TCP connections or bidirectional UDP streams. To avoid this problem, we recommend increasing the conntrack table size using the following commands:
-
-```bash
-sysctl -w net.netfilter.nf_conntrack_max=1000000
-echo "net.netfilter.nf_conntrack_max=1000000" >> /etc/sysctl.conf
-```
diff --git a/calico_versioned_docs/version-3.25/operations/troubleshoot/vpp.mdx b/calico_versioned_docs/version-3.25/operations/troubleshoot/vpp.mdx
deleted file mode 100644
index cd643816fe..0000000000
--- a/calico_versioned_docs/version-3.25/operations/troubleshoot/vpp.mdx
+++ /dev/null
@@ -1,424 +0,0 @@
----
-description: Specific troubleshooting steps for the VPP dataplane.
----
-
-# VPP dataplane troubleshooting
-
-## Big picture
-
-This page describes the troubleshooting steps for the [VPP dataplane](../../getting-started/kubernetes/vpp/getting-started.mdx). If you did not configure the VPP dataplane, this page is not for you!
-
-If you're encountering issues with the VPP dataplane, feel free to reach out to us either on the [#vpp channel](https://calicousers.slack.com/archives/C017220EXU1) on the {{prodname}} slack, or by opening a new issue in [GitHub](https://github.com/projectcalico/vpp-dataplane/issues)).
-
-## Installing calivppctl
-
-`calivppctl` is a helper bash script shipped alongside vpp container images. It can be installed to your host with the following methods, and helps collecting logs and debugging a running cluster with the VPP dataplane installed.
-
-- With curl
-
-```bash
-curl https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/test/scripts/vppdev.sh \
- | tee /usr/bin/calivppctl
-chmod +x /usr/bin/calivppctl
-```
-
-- With docker (and a cluster with calico-vpp running)
-
-```bash
-vppcontainer=$(docker ps | grep vpp_calico-vpp | awk '{ print $1 }')
-docker cp ${vppcontainer}:/usr/bin/calivppctl /usr/bin/calivppctl
-```
-
-- With kubectl (and a cluster with calico-vpp running)
-
-```bash
-vpppod=$(kubectl -n calico-vpp-dataplane get pods -o wide | grep calico-vpp-node- | awk '{ print $1 }' | head -1)
-kubectl -n calico-vpp-dataplane exec -it ${vpppod} -c vpp -- cat /usr/bin/calivppctl | tee /usr/bin/calivppctl > /dev/null
-chmod +x /usr/bin/calivppctl
-```
-
-## Troubleshooting
-
-### Kubernetes Cluster
-
-First you need to make sure Kubernetes is up and running.
-
-- `service kubelet status` should give you a first hint.
-- Issues should be reported in the kubelet logs, which you can check with this command if you are using systemd: `journalctl -u kubelet -r -n200`
-
-:::note
-
-Kubernetes does not run with swap enabled.
-
-:::
-
-### Starting calico-vpp-node Daemon set
-
-Once the cluster is correctly started, the next issue can come from the Daemonset configuration.
-Best is to start by inspecting the pods : are they running correctly ?
-Usually configuration issues (available hugepages, memory, ...) will be reported here
-
-```bash
-kubectl -n calico-vpp-dataplane describe pod/calico-vpp-node-XXXXX
-```
-
-:::note
-
-If at this point you don't have enough hugepages, you'll have to restart kubelet
-after allocating them for taking it into account (using for instance `service kubelet restart`)
-
-:::
-
-### Having VPP up and running
-
-Once the pods don't report any issue, the pods should have started. There are two
-containers for each node : VPP that starts the vpp process and setups connectivity,
-and the agent handling pod connectivity, service load balancing, BGP, policies, etc.
-
-First check that VPP is running correctly. If the connectivity configuration, interface naming
-is not correct, this will be reported here. Once this is running, you should be able to ping your other nodes through VPP.
-
-```bash
- Print VPP's log : basic connectivity and NIC configuration
-calivppctl log -vpp myk8node1
-```
-
-Then you can check for any issues reported by the Agent (e.g. BGP listen issue
-if the port is already taken, or missing configuration pieces). If this doesn't
-show any errors, you should be able to `nslookup kubernetes.default` from pods.
-
-```bash
- Print the logs for the {{prodname}} VPP dataplane agent, programming serviceIPs, BGP, ...
-calivppctl log -agent myk8node1
-```
-
-If all this doesn't play well you can always use the export to generate an export.tar.gz
-bundle and ask for help on the [#vpp channel](https://calicousers.slack.com/archives/C017220EXU1)
-
-```bash
-calivppctl export
-```
-
-## Accessing the VPP cli
-
-For further debugging, tracing packets and inspecting VPP's internals, you can
-get a vpp shell using the following
-
-```bash
-calivppctl vppctl myk8node1
-```
-
-### Listing interfaces and basics
-
-To list existing interfaces and basic counters use
-
-```
-vpp show int
-vpp show int addr
-```
-
-To get more insights on the main interface (e.g. if you're using dpdk) you can check
-for errors & drops in
-
-```
-vpp show hardware-interfaces
-```
-
-Other places to look for errors
-
-```
-vpp show log # VPP startup log
-vpp show err # Prints out packet counters (not always actual errors, but includes drops)
-vpp show buffers # You should have non zero free buffers, otherwise traffic won't flow
-```
-
-## Tracing packets
-
-### Internal network layout
-
-For starters, here is a small schematic of how the network looks like:
-![k8-calico-vpp](/img/calico/vpp-tracing-net.svg)
-
-Container interfaces are named `tun[0-9]+`. You can find which one belong to which container as follows.
-
-```
- Connect to vppctl
-$ calivppctl vppctl NODENAME
-
- List interfaces
-vpp show interface
- Name Idx State MTU (L3/IP4/IP6/MPLS) Counter Count
-avf-0/d8/a/0 1 up 9000/0/0/0 tx packets 2
- tx bytes 216
-local0 0 down 0/0/0/0
-tap0 2 up 0/0/0/0 rx packets 9
-[...]
-tun3 5 up 0/0/0/0 rx packets 5
- rx bytes 431
- tx packets 5
- tx bytes 387
- ip4 5
-
- Show the route for address 11.0.166.132
-vpp show ip fib 11.0.166.132
-ipv4-VRF:0, fib_index:0, flow hash:[src dst sport dport symmetric ] epoch:0 flags:none locks:[adjacency:1, default-route:1, ]
-11.0.166.132/32 fib:0 index:19 locks:5
- cnat refs:1 entry-flags:uRPF-exempt,interpose, src-flags:added,contributing,active, cover:-1 interpose:
- [@0]: [4] cnat-client:[11.0.166.132] tr:0 sess:1
- path-list:[26] locks:3 flags:shared, uPRF-list:24 len:1 itfs:[5, ]
- path:[32] pl-index:26 ip4 weight=1 pref=0 attached-nexthop: oper-flags:resolved, cfg-flags:attached,
- 11.0.166.132 tun3 (p2p)
- [@0]: ipv4 via 0.0.0.0 tun3: mtu:9000 next:7
- [...]
-
- This one is behind `tun3`
- If you want more info about this interface (name in Linux, queues, descriptors, ...)
-vpp show tun tun3
-Interface: tun3 (ifindex 5)
- name "eth0"
- host-ns "/proc/17675/ns/net"
- [...]
-```
-
-`tap0` is the interface providing connectivity to the host, using the original interface name on the Linux side (use `show tap tap0` and `show ip punt redirect`).
-
-### Capturing traffic inside the cluster
-
-Let's take the case of two pods talking to each other in your cluster (see the schema above).
-You might want to inspect the traffic at 3 different locations :
-
-- as it exits the pod (in Linux inside the first pod)
-- as it goes through VPP
-- as it is received in the second pod (in Linux again)
-
-We cover the three cases, first inside VPP (depending on where your traffic is coming from : a pod or outside your host)
-then inside your pods (usually with tcpdump)
-
-### Traffic capture inside VPP
-
-#### Traffic from a pod
-
-The following snippet will allow you to capture all traffic coming from containers on a particular node, grep from a specific packet,
-and see what happened to it.
-
-```bash
- Make sure that the trace buffer is clean in VPP
-calivppctl vppctl NODENAME clear trace
- Add a trace from the virtio-input input-node
-calivppctl vppctl NODENAME trace add virtio-input 500
- generate some traffic
-calivppctl vppctl NODENAME show trace max 500 > somefile
- Grep for your IPs
-cat somefile | grep '1.2.3.4 -> 5.6.7.8' -A40 -B40
-```
-
-Output looks quite cumbersome at first as it contains the whole path of a packet through VPP, from reception to tx.
-
-```
-vpp show trace
-Packet 1
-
-00:09:46:518858: virtio-input
- This packet has been received on the interface number #2 (column Idx in `show int`)
- and is 688 Bytes long
- virtio: hw_if_index 2 next-index 1 vring 0 len 688
- hdr: flags 0x00 gso_type 0x00 hdr_len 0 gso_size 0 csum_start 0 csum_offset 0 num_buffers 1
-00:09:46:518866: ip4-input
- we read TCP header, addresses and ports
- TCP: 20.0.0.1 -> 11.0.166.133
- tos 0x00, ttl 64, length 688, checksum 0x1bc5 dscp CS0 ecn NON_ECN
- fragment id 0x56fd, flags DONT_FRAGMENT
- TCP: 6443 -> 34112
- seq. 0xa1f93599 ack 0x818eb1c1
- flags 0x18 PSH ACK, tcp header: 32 bytes
- window 502, checksum 0x00b7
-00:09:46:518870: ip4-lookup
- fib 0 dpo-idx 5 flow hash: 0x00000000
- TCP: 20.0.0.1 -> 11.0.166.133
- tos 0x00, ttl 64, length 688, checksum 0x1bc5 dscp CS0 ecn NON_ECN
- fragment id 0x56fd, flags DONT_FRAGMENT
- TCP: 6443 -> 34112
- seq. 0xa1f93599 ack 0x818eb1c1
- flags 0x18 PSH ACK, tcp header: 32 bytes
- window 502, checksum 0x00b7
-00:09:46:518873: ip4-cnat-tx
- We need to do some NATing as it's Kubernetes
- found: session:[20.0.0.1;6443 -> 11.0.166.133;34112, TCP] => 11.96.0.1;443 -> 11.0.166.133;34112 lb:-1 age:4190
-00:09:46:518879: ip4-rewrite
- We rewrite the ip packet
- mac addresses only when coming / going to a PHY, as tun interfaces are L3-only
- tx_sw_if_index 6 dpo-idx 7 : ipv4 via 0.0.0.0 tun4: mtu:9000 next:8 flow hash: 0x00000000
- 00000000: 450002b056fd40003f0625650b6000010b00a68501bb8540a1f93599818eb1c1
- 00000020: 801801f620c700000101080a3f906c98fbaaba031703030277413d39
- Output happens on the interface `tun4`
-00:09:46:518880: tun4-output
- tun4
- 00000000: 450002b056fd40003f0625650b6000010b00a68501bb8540a1f93599818eb1c1
- 00000020: 801801f620c700000101080a3f906c98fbaaba031703030277413d39b97817c1
- 00000040: 41392fdbe0e9d4886849851476cdb8986362ee2f789bfefd8a5c106c898d1309
- 00000060: 4f8f8cb89159d99e986813a48d91334930eb5eb10ca4248c
-00:09:46:518881: tun4-tx
- buffer 0x24cf615: current data 0, length 688, buffer-pool 1, ref-count 1, totlen-nifb 0, trace handle 0x1000000
- ipv4 tcp hdr-sz 52 l2-hdr-offset 0 l3-hdr-offset 0 l4-hdr-offset 20 l4-hdr-sz 32
- 0x0b60: 40:00:3f:06:25:65 -> 45:00:02:b0:56:fd
-
-Packet 2
-[...]
-```
-
-#### Traffic from the phy
-
-If you want to capture traffic coming from the physical NIC, you should use `trace add` but with a different source node a.k.a `dpdk-input` `af-packet-input` `af_xdp-input` `avf-input` instead of `virtio-input`.
-
-`show run` should give you a hint of the `X-input` node you want to trace from.
-
-```
-vpp show run
-Thread 1 vpp_wk_0 (lcore 25)
-Time 1.9, 10 sec internal node vector rate 1.05 loops/sec 1074819.68
- vector rates in 7.5356e0, out 7.5356e0, drop 0.0000e0, punt 0.0000e0
- Name State Calls Vectors Suspends Clocks Vectors/Call
-avf-input polling 2233530 0 0 8.24e1 0.00
-ip4-cnat-snat active 1 1 0 5.35e3 1.00
-ip4-cnat-tx active 14 15 0 1.18e3 1.07
-[...]
-
- Here we seem to want to use trace add avf-input 200
-```
-
-Same as with traffic from a container, you can use
-
-```bash
- Make sure that the trace buffer is clean in VPP
-calivppctl vppctl NODENAME clear trace
- Add a trace from the virtio-input input-node
-calivppctl vppctl NODENAME trace add avf-input 500
- generate some traffic
-calivppctl vppctl NODENAME show trace max 500 > somefile
- Grep for your IPs
-cat somefile | grep '1.2.3.4 -> 5.6.7.8' -A40 -B40
-```
-
-#### With Wireshark
-
-Alternatively to the trace, you can do a capture and analyze it inside Wireshark. You can do this with:
-
-```
-vpp pcap dispatch trace on max 1000 file vppcapture buffer-trace dpdk-input 1000
-vpp pcap dispatch trace off
-```
-
-This will generate a file named `/tmp/vppcapture`
-
-Then on your host run:
-
-```bash
-calivppctl sh vpp NODENAME
-root@server:~ mv /tmp/vppcapture /var/lib/vpp/
-root@server:~ exit
- The file should now be at /var/lib/vpp/vppcapture on your host 'NODENAME'
-```
-
-You can then `scp NODENAME:/var/lib/vpp/vppcapture .` on your machine and open it with Wireshark
-[More info about this here](https://haryachyy.wordpress.com/2019/09/29/learning-vpp-trace-with-wireshark/)
-
-### Traffic received in the pods
-
-To inspect traffic actually received by the pods (if `tcpdump` is installed in the pod), simply run `tcpdump -ni eth0` inside the pod. If tcpdump is not available in the pod, here are two options to still be able to capture pod traffic:
-
-#### Tcpdump is available on the host
-
-Provided that you have `tcpdump` installed on the host, you can use `nsenter` to attach to the pod's network namespace and use the host's `tcpdump` on the container's interface.
-
-This works on docker as follows :
-
-```bash
-
- Find the container ID you want to inspect
-$ docker ps
-CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
-4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds 3300-3310/tcp webapp
-
- Get the container PID out of it
-$ docker inspect --format '{{ .State.Pid }}' 4c01db0b339c
-12345
-
- Attach
-$ nsenter -t 12345 -n bash
-
-```
-
-#### No tcpdump, but we have python !
-
-Open an AF_PACKET socket in python with the following code
-and run it attached to the running namespace as previously.
-
-```python
-#!/usr/bin/env python
-from socket import *
-from struct import unpack
-
-IFNAME = "eth0"
-N_PKT = 50
-MTU=1500
-
-sock = socket(AF_PACKET, SOCK_DGRAM, 0x0800)
-sock.bind((IFNAME, 0x0800))
-for _ in range(N_PKT):
- data = sock.recvfrom(MTU, 0)[0]
- src_addr = inet_ntop(AF_INET, data[12:16])
- dst_addr = inet_ntop(AF_INET, data[16:20])
- src_port, = unpack("!H", data[20:22])
- dst_port, = unpack("!H", data[22:24])
- data_len, = unpack("!H", data[24:26])
- cksum, = unpack("!H", data[26:28])
-
- print("%s:%d -> %s:%d len %d cs %d" % (src_addr, src_port, dst_addr, dst_port, data_len, cksum))
-```
-
-This requires privileges and thus is usually easier to run from the host. From the host, you can use `echo "the python blob above" | nsenter -t -n python` to execute this code.
-
-### Traffic to the kubelet agent
-
-As the kubelet agent runs directly on the host without a network namespace, pods talking to it (e.g. coredns resolvers) would go through a specific path. Packets destined to it will be caught by VPP's punt mechanism, and will be forwarded to the host through a tap interface which will have the same name as the original interface in Linux.
-
-To debug traffic within VPP, use the trace & check that traffic is correctly punted to the tap0 interface.
-
-On the host, you can use `tcpdump` normally to check the traffic.
-
-## Crashes & coredumps
-
-To instruct vpp to leave a coredump in the event of a crash, you can pass the `CALICOVPP_CORE_PATTERN` environment variable to the vpp container:
-
-```yaml
-kind: DaemonSet
-apiVersion: apps/v1
-metadata:
- name: calico-vpp-node
-
----
-- name: vpp
- env:
- - name: CALICOVPP_CORE_PATTERN
- value: '/home/hostuser/vppcore.%e.%p'
- volumeMounts:
- - name: userhome
- mountPath: /home/hostuser
-
----
-volumes:
- - name: userhome
- hostPath:
- path: ${SOME_DIRECTORY}
-```
-
-This will generate a `vppcore.vpp_main.` file in `${DIR}` if vpp aborts unexpectedly. If you encounter this situation, please note the exact version of the vpp image that generated the corefile (using the image hash) to facilitate further troubleshooting.
-
-To explore it run:
-
-```bash
-docker run -it --entrypoint=bash -v $DIR/vppcore.vpp_main.12345:/root/vppcore calicovpp/vpp:VERSION
- You should have a shell inside the vpp container
-apt update && apt install -y gdb
-gdb vpp ./vppcore
-```
diff --git a/calico_versioned_docs/version-3.25/operations/upgrading/index.mdx b/calico_versioned_docs/version-3.25/operations/upgrading/index.mdx
deleted file mode 100644
index 20bc2723b1..0000000000
--- a/calico_versioned_docs/version-3.25/operations/upgrading/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Upgrade to a newer version of Calico.
-hide_table_of_contents: true
----
-
-# Upgrade
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/operations/upgrading/kubernetes-upgrade.mdx b/calico_versioned_docs/version-3.25/operations/upgrading/kubernetes-upgrade.mdx
deleted file mode 100644
index 0f93b9224a..0000000000
--- a/calico_versioned_docs/version-3.25/operations/upgrading/kubernetes-upgrade.mdx
+++ /dev/null
@@ -1,267 +0,0 @@
----
-description: Upgrade to a newer version of Calico for Kubernetes.
----
-
-# Upgrade Calico on Kubernetes
-
-## About upgrading {{prodname}}
-
-This page describes how to upgrade to {{version}} from {{prodname}} v3.0 or later. The
-procedure varies by datastore type and install method.
-
-If you are using {{prodname}} in etcd mode on a Kubernetes cluster, we recommend upgrading to the Kubernetes API datastore [as discussed here](../datastore-migration.mdx).
-
-If you have installed {{prodname}} using the `calico.yaml` manifest, we recommend upgrading to the {{prodname}} operator, [as discussed here](../operator-migration.mdx).
-
-- [Upgrading an installation that was installed using Helm](#upgrading-an-installation-that-was-installed-using-helm)
-
-- [Upgrading an installation that uses the operator](#upgrading-an-installation-that-uses-the-operator)
-
-- [Upgrading an installation that uses manifests and the Kubernetes API datastore](#upgrading-an-installation-that-uses-manifests-and-the-kubernetes-api-datastore)
-
-- [Upgrading an installation that connects directly to an etcd datastore](#upgrading-an-installation-that-uses-an-etcd-datastore)
-
-:::note
-
-Do not use older versions of `calicoctl` after the upgrade.
-This may result in unexpected behavior and data.
-
-:::
-
-
-
-## Upgrading an installation that was installed using helm
-
-Prior to release v3.23, the Calico helm chart itself deployed the `tigera-operator` namespace and required that the helm release was
-installed in the `default` namespace. Newer releases properly defer creation of the `tigera-operator` namespace to the user and allow installation
-of the chart into the `tigera-operator` namespace.
-
-When upgrading from a version of Calico v3.22 or lower to a version of Calico v3.23 or greater, you must complete the following steps to migrate
-ownership of the helm resources to the new chart location.
-
-### Upgrade from Calico versions prior to v3.23.0
-
-1. Patch existing resources so that the new chart can assume ownership.
-
- ```
- kubectl patch installation default --type=merge -p '{"metadata": {"annotations": {"meta.helm.sh/release-namespace": "tigera-operator"}}}'
- kubectl patch apiserver default --type=merge -p '{"metadata": {"annotations": {"meta.helm.sh/release-namespace": "tigera-operator"}}}'
- kubectl patch podsecuritypolicy tigera-operator --type=merge -p '{"metadata": {"annotations": {"meta.helm.sh/release-namespace": "tigera-operator"}}}'
- kubectl patch -n tigera-operator deployment tigera-operator --type=merge -p '{"metadata": {"annotations": {"meta.helm.sh/release-namespace": "tigera-operator"}}}'
- kubectl patch -n tigera-operator serviceaccount tigera-operator --type=merge -p '{"metadata": {"annotations": {"meta.helm.sh/release-namespace": "tigera-operator"}}}'
- kubectl patch clusterrole tigera-operator --type=merge -p '{"metadata": {"annotations": {"meta.helm.sh/release-namespace": "tigera-operator"}}}'
- kubectl patch clusterrolebinding tigera-operator tigera-operator --type=merge -p '{"metadata": {"annotations": {"meta.helm.sh/release-namespace": "tigera-operator"}}}'
- ```
-
-1. Apply the {{version}} CRDs:
-
- ```bash
- kubectl apply --server-side --force-conflicts -f {{manifestsUrl}}/manifests/operator-crds.yaml
- ```
-
-1. Install the helm chart in the `tigera-operator` namespace.
-
- ```
- helm install {{prodnamedash}} projectcalico/tigera-operator --version {{releaseTitle}} --namespace tigera-operator
- ```
-
-1. Once the install has succeeded, you can delete any old releases in the `default` namespace.
-
- ```
- kubectl delete secret -n default -l name=calico,owner=helm --dry-run
- ```
-
-:::note
-
-The above command uses --dry-run to avoid making changes to your cluster. We recommend reviewing
-the output and then re-running the command without --dry-run to commit to the changes.
-
-:::
-
-### All other upgrades
-
-1. Apply the {{version}} CRDs:
-
- ```bash
- kubectl apply --server-side --force-conflicts -f {{manifestsUrl}}/manifests/operator-crds.yaml
- ```
-
-1. Run the helm upgrade:
-
- ```bash
- helm upgrade {{prodnamedash}} projectcalico/tigera-operator
- ```
-
-## Upgrading an installation that uses the operator
-
-1. Download the {{version}} operator manifest.
-
- ```bash
- curl {{manifestsUrl}}/manifests/tigera-operator.yaml -O
- ```
-
-1. Use the following command to initiate an upgrade.
-
- ```bash
- kubectl apply --server-side --force-conflicts -f tigera-operator.yaml
- ```
-
-## Upgrading an installation that uses manifests and the Kubernetes API datastore
-
-1. Download the {{version}} manifest that corresponds to your original installation method.
-
- **{{prodname}} for policy and networking**
-
- ```bash
- curl {{manifestsUrl}}/manifests/calico.yaml -o upgrade.yaml
- ```
-
- **{{prodname}} for policy and flannel for networking**
-
- ```bash
- curl {{manifestsUrl}}/manifests/canal.yaml -o upgrade.yaml
- ```
-
- **{{prodname}} for policy (advanced)**
-
- ```bash
- curl {{manifestsUrl}}/manifests/calico-policy-only.yaml -o upgrade.yaml
- ```
-
- :::note
-
- If you manually modified the manifest, you must manually apply the
- same changes to the downloaded manifest.
-
- :::
-
-1. Use the following command to initiate a rolling update.
-
- ```bash
- kubectl apply --server-side --force-conflicts -f upgrade.yaml
- ```
-
-1. Watch the status of the upgrade as follows.
-
- ```bash
- watch kubectl get pods -n kube-system
- ```
-
- Verify that the status of all {{prodname}} pods indicate `Running`.
-
- ```bash
- {{noderunning}}-hvvg8 2/2 Running 0 3m
- {{noderunning}}-vm8kh 2/2 Running 0 3m
- {{noderunning}}-w92wk 2/2 Running 0 3m
- ```
-
-1. Remove any existing `calicoctl` instances, [install the new `calicoctl`](../calicoctl/install.mdx)
- and [configure it to connect to your datastore](../calicoctl/configure/overview.mdx).
-
-1. Use the following command to check the {{prodname}} version number.
-
- ```bash
- calicoctl version
- ```
-
- It should return a `Cluster Version` of `{{version}}.x`.
-
-1. If you have [enable application layer policy](../../network-policy/istio/app-layer-policy.mdx),
- follow [the instructions below](#upgrading-if-you-have-application-layer-policy-enabled) to complete your upgrade. Skip this if you are not using Istio with {{prodname}}.
-
-1. If you were upgrading from a version of Calico prior to v3.14 and followed the pre-upgrade steps for host endpoints above, review traffic logs from the temporary policy,
- add any global network policies needed to allow traffic, and delete the temporary network policy **allow-all-upgrade**.
-
-1. Congratulations! You have upgraded to {{prodname}} {{version}}.
-
-## Upgrading an installation that uses an etcd datastore
-
-1. Download the {{version}} manifest that corresponds to your original installation method.
-
- **{{prodname}} for policy and networking**
-
- ```bash
- curl {{manifestsUrl}}/manifests/calico-etcd.yaml -o upgrade.yaml
- ```
-
- **{{prodname}} for policy and flannel for networking**
-
- ```bash
- curl {{manifestsUrl}}/manifests/canal-etcd.yaml -o upgrade.yaml
- ```
-
- :::note
-
- You must manually apply the changes you made to the manifest
- during installation to the downloaded {{version}} manifest. At a minimum,
- you must set the `etcd_endpoints` value.
-
- :::
-
-1. Use the following command to initiate a rolling update.
-
- ```bash
- kubectl apply --server-side --force-conflicts -f upgrade.yaml
- ```
-
-1. Watch the status of the upgrade as follows.
-
- bash
- ```
- watch kubectl get pods -n kube-system
- ```
-
- Verify that the status of all {{prodname}} pods indicate `Running`.
-
- ```
- calico-kube-controllers-6d4b9d6b5b-wlkfj 1/1 Running 0 3m
- {{noderunning}}-hvvg8 1/2 Running 0 3m
- {{noderunning}}-vm8kh 1/2 Running 0 3m
- {{noderunning}}-w92wk 1/2 Running 0 3m
- ```
-
- :::tip
-
- The {{noderunning}} pods will report `1/2` in the `READY` column, as shown.
-
- :::
-
-1. Remove any existing `calicoctl` instances, [install the new `calicoctl`](../calicoctl/install.mdx)
- and [configure it to connect to your datastore](../calicoctl/configure/overview.mdx).
-
-1. Use the following command to check the {{prodname}} version number.
-
- ```bash
- calicoctl version
- ```
-
- It should return a `Cluster Version` of `{{version}}`.
-
-1. If you have [enabled application layer policy](../../network-policy/istio/app-layer-policy.mdx),
- follow [the instructions below](#upgrading-if-you-have-application-layer-policy-enabled) to complete your upgrade. Skip this if you are not using Istio with {{prodname}}.
-
-1. If you were upgrading from a version of Calico prior to v3.14 and followed the pre-upgrade steps for host endpoints above, review traffic logs from the temporary policy,
- add any global network policies needed to allow traffic, and delete the temporary network policy **allow-all-upgrade**.
-
-1. Congratulations! You have upgraded to {{prodname}} {{version}}.
-
-## Upgrading if you have Application Layer Policy enabled
-
-Dikastes is versioned the same as the rest of {{prodname}}, but an upgraded `calico-node` will still be able to work with a downlevel Dikastes
-so that you will not lose data plane connectivity during the upgrade. Once `calico-node` is upgraded, you can begin redeploying your service pods
-with the updated version of Dikastes.
-
-If you have [enabled application layer policy](../../network-policy/istio/app-layer-policy.mdx),
-take the following steps to upgrade the Dikastes sidecars running in your application pods. Skip these steps if you are not using Istio with {{prodname}}.
-
-1. Update the Istio sidecar injector template to use the new version of Dikastes. Replace `` below with
- the full version string of your Istio install, for example `1.4.2`.
-
- ```bash
- kubectl apply -f {{manifestsUrl}}/manifests/alp/istio-inject-configmap-.yaml
- ```
-
-1. Once the new template is in place, newly created pods use the upgraded version of Dikastes. Perform a rolling update of each of your service deployments
- to get them on the new version of Dikastes.
-
-
diff --git a/calico_versioned_docs/version-3.25/operations/upgrading/openshift-upgrade.mdx b/calico_versioned_docs/version-3.25/operations/upgrading/openshift-upgrade.mdx
deleted file mode 100644
index ae7fe6ea10..0000000000
--- a/calico_versioned_docs/version-3.25/operations/upgrading/openshift-upgrade.mdx
+++ /dev/null
@@ -1,38 +0,0 @@
----
-description: Upgrade to a newer version of Calico for OpenShift.
----
-
-# Upgrade Calico on OpenShift 4
-
-## About upgrading {{prodname}}
-
-This page describes how to upgrade to {{version}} for OpenShift 4 from an existing {{prodname}} cluster.
-
-
-
-## Upgrading Calico on OpenShift 4
-
-Make a manifests directory.
-
-```bash
-mkdir manifests
-```
-
-
-
-Apply the updated manifests.
-
-```bash
-oc apply -f manifests/
-```
-
-You can now monitor the upgrade progress with the following command:
-
-```bash
-watch oc get tigerastatus
-```
-
-If you were upgrading from a version of Calico prior to v3.14 and followed the pre-upgrade steps for host endpoints above, review traffic logs from the temporary policy,
-add any global network policies needed to allow traffic, and delete the temporary network policy **allow-all-upgrade**.
-
-
diff --git a/calico_versioned_docs/version-3.25/operations/upgrading/openstack-upgrade.mdx b/calico_versioned_docs/version-3.25/operations/upgrading/openstack-upgrade.mdx
deleted file mode 100644
index 9d75f97bd8..0000000000
--- a/calico_versioned_docs/version-3.25/operations/upgrading/openstack-upgrade.mdx
+++ /dev/null
@@ -1,166 +0,0 @@
----
-description: Upgrade to a newer version of Calico for OpenStack.
----
-
-# Upgrade Calico on OpenStack
-
-## {{prodname}} package update
-
-This page describes how to upgrade to {{version}} from {{prodname}} v3.0 or later. The procedure
-varies by Linux distribution.
-
-- [Upgrading an OpenStack cluster based on CentOS](#upgrading-an-openstack-cluster-based-on-centos)
-
-- [Upgrading an OpenStack cluster based on Ubuntu](#upgrading-an-openstack-cluster-based-on-ubuntu)
-
-:::note
-
-Do not use older versions of `calicoctl` after the upgrade.
-This may result in unexpected behavior and data.
-
-:::
-
-## Upgrading an OpenStack cluster based on CentOS
-
-1. On all nodes, change the location of the {{prodname}} packages to point to the {{version}} repo:
-
- ```
- sudo sed -i 's/calico-X.X/calico-Y.Y/g' /etc/yum.repos.d/calico.repo
- ```
-
- Replace `X.X` in the above command with the version you're upgrading from (must be v3.0 or later).
- Replace `Y.Y` with the version of the release you're upgrading to. Example: if you are upgrading from v3.1
- to v3.5, replace `X.X` with `3.1` and replace `Y.Y` with `3.5`.
-
-1. On all compute nodes, update packages:
-
- ```
- sudo yum update
- ```
-
- We recommend upgrading the whole distribution as shown here. In case you prefer to upgrade particular packages only, those needed for a {{prodname}} compute node are the following.
-
- - `calico-common`
- - `calico-compute`
- - `calico-dhcp-agent`
- - `calico-felix`
- - `dnsmasq`
- - `networking-calico`
- - `openstack-neutron`
- - `openstack-nova-api`
- - `openstack-nova-compute`
-
-
-
-1. Use the following command on the compute nodes to confirm that Felix has upgraded to {{version}}.
-
- ```
- calico-felix --version
- ```
-
- It should return `{{version}}`.
-
-1. On all compute nodes, add the following line to the end of `/etc/calico/felix.cfg`:
-
- ```
- DatastoreType = etcdv3
- ```
-
- If you need to change the EtcdEndpoints address (e.g. because you've installed a new etcdv3 cluster
- rather than upgrading your existing etcdv2 cluster), you should update the EtcdEndpoints addresses
- in `/etcd/calico/felix.cfg` at this point.
-
-1. On all control nodes, update packages:
-
- ```
- sudo yum update
- ```
-
- We recommend upgrading the whole distribution as shown here. In case you prefer to upgrade particular packages only, those needed for a {{prodname}} control node are the following.
-
- - `calico-common`
- - `calico-control`
- - `networking-calico`
- - `openstack-neutron`
-
-
-
-1. On all control nodes, restart `neutron-server`:
-
- ```
- sudo systemctl restart neutron-server
- ```
-
-1. If you ran `calico-upgrade` earlier to migrate non-openstack data, on the control node run:
-
- ```
- calico-upgrade complete
- ```
-
-1. Remove any existing `calicoctl` instances and [install the new `calicoctl`](../calicoctl/install.mdx).
-
-1. Congratulations! You have upgraded to {{prodname}} {{version}}.
-
-## Upgrading an OpenStack cluster based on Ubuntu
-
-1. On all nodes, change the location of the {{prodname}} packages to point to the {{version}} repo:
-
- ```
- sudo bash -c 'cat > /etc/apt/sources.list.d/project-calico-calico-X_X-trusty.list' << EOF
- deb http://ppa.launchpad.net/project-calico/calico-X.X/ubuntu trusty main
- # deb-src http://ppa.launchpad.net/project-calico/calico-X.X/ubuntu trusty main
- EOF
- ```
-
- Replace `X_X` and `X.X` with the version you're upgrading to. Example: if you're upgrading to v3.5, replace `X_X` with
- `3_5` and replace `X.X` with `3.5`. Also replace `trusty` with the code name of your Ubuntu version.
-
-1. On all compute nodes, update packages:
-
- ```
- sudo apt-get update
- sudo apt-get install calico-compute calico-felix calico-common \
- python-etcd networking-calico calico-dhcp-agent
-
- ```
-
-1. Use the following command on the compute nodes to confirm that Felix has upgraded to {{version}}.
-
- ```
- calico-felix --version
- ```
-
- It should return `{{version}}`.
-
-1. On all compute nodes, add the following line to the end of `/etc/calico/felix.cfg`:
-
- ```
- DatastoreType = etcdv3
- ```
-
- If you need to change the EtcdEndpoints address (e.g. because you've installed a new etcdv3 cluster
- rather than upgrading your existing etcdv2 cluster), you should update the EtcdEndpoints addresses
- in `/etcd/calico/felix.cfg` at this point.
-
-1. On all control nodes, update packages:
-
- ```
- sudo apt-get update
- sudo apt-get install calico-control calico-common python-etcd networking-calico
- ```
-
-1. On all control nodes, restart `neutron-server`:
-
- ```
- sudo service neutron-server restart
- ```
-
-1. If you ran `calico-upgrade` earlier to migrate non-openstack data, on the control node run:
-
- ```
- calico-upgrade complete
- ```
-
-1. Remove any existing `calicoctl` instances and [install the new `calicoctl`](../calicoctl/install.mdx).
-
-1. Congratulations! You have upgraded to {{prodname}} {{version}}.
diff --git a/calico_versioned_docs/version-3.25/reference/api.mdx b/calico_versioned_docs/version-3.25/reference/api.mdx
deleted file mode 100644
index 43939af0ce..0000000000
--- a/calico_versioned_docs/version-3.25/reference/api.mdx
+++ /dev/null
@@ -1,12 +0,0 @@
----
-description: Learn about the Calico API and how to use it.
----
-
-# Calico API
-
-{{prodname}} provides and consumes a public API in Go that allows
-developers to work with {{prodname}} resources.
-
-To learn more about the {{prodname}} API and how to use it, see the Calico API project [README](https://github.com/projectcalico/api/blob/master/README.md) or
-the [github.com/projectcalico/api Go module page](https://pkg.go.dev/github.com/projectcalico/api)
-.
diff --git a/calico_versioned_docs/version-3.25/reference/architecture/data-path.mdx b/calico_versioned_docs/version-3.25/reference/architecture/data-path.mdx
deleted file mode 100644
index c66f5b0f5b..0000000000
--- a/calico_versioned_docs/version-3.25/reference/architecture/data-path.mdx
+++ /dev/null
@@ -1,63 +0,0 @@
----
-description: Learn how packets flow between workloads in a datacenter, or between a workload and the internet.
----
-
-# 'The Calico data path: IP routing and iptables'
-
-One of {{prodname}}’s key features is how packets flow between workloads in a
-data center, or between a workload and the Internet, without additional
-encapsulation.
-
-In the {{prodname}} approach, IP packets to or from a workload are routed and
-firewalled by the Linux routing table and iptables or eBPF infrastructure on the
-workload’s host. For a workload that is sending packets, {{prodname}} ensures
-that the host is always returned as the next hop MAC address regardless
-of whatever routing the workload itself might configure. For packets
-addressed to a workload, the last IP hop is that from the destination
-workload’s host to the workload itself.
-
-![Calico datapath](/img/calico/calico-datapath.png)
-
-Suppose that IPv4 addresses for the workloads are allocated from a
-datacenter-private subnet of 10.65/16, and that the hosts have IP
-addresses from 172.18.203/24. If you look at the routing table on a host:
-
-```bash
-route -n
-```
-
-You will see something like this:
-
-```
-Kernel IP routing table
-Destination Gateway Genmask Flags Metric Ref Use Iface
-0.0.0.0 172.18.203.1 0.0.0.0 UG 0 0 0 eth0
-10.65.0.0 0.0.0.0 255.255.0.0 U 0 0 0 ns-db03ab89-b4
-10.65.0.21 172.18.203.126 255.255.255.255 UGH 0 0 0 eth0
-10.65.0.22 172.18.203.129 255.255.255.255 UGH 0 0 0 eth0
-10.65.0.23 172.18.203.129 255.255.255.255 UGH 0 0 0 eth0
-10.65.0.24 0.0.0.0 255.255.255.255 UH 0 0 0 tapa429fb36-04
-172.18.203.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
-```
-
-There is one workload on this host with IP address 10.65.0.24, and
-accessible from the host via a TAP (or veth, etc.) interface named
-tapa429fb36-04. Hence there is a direct route for 10.65.0.24, through
-tapa429fb36-04. Other workloads, with the .21, .22 and .23 addresses,
-are hosted on two other hosts (172.18.203.126 and .129), so the routes
-for those workload addresses are via those hosts.
-
-The direct routes are set up by a {{prodname}} agent named Felix when it is
-asked to provision connectivity for a particular workload. A BGP client
-(such as BIRD) then notices those and distributes them – perhaps via a
-route reflector – to BGP clients running on other hosts, and hence the
-indirect routes appear also.
-
-## Is that all?
-
-As far as the static data path is concerned, yes. It’s just a
-combination of responding to workload ARP requests with the host MAC, IP
-routing and iptables or eBPF. There’s a great deal more to {{prodname}} in terms of
-how the required routing and security information is managed, and for
-handling dynamic things such as workload migration – but the basic data
-path really is that simple.
diff --git a/calico_versioned_docs/version-3.25/reference/architecture/design/index.mdx b/calico_versioned_docs/version-3.25/reference/architecture/design/index.mdx
deleted file mode 100644
index 717ac9f295..0000000000
--- a/calico_versioned_docs/version-3.25/reference/architecture/design/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Deep dive into using Calico over Ethernet and IP fabrics.
-hide_table_of_contents: true
----
-
-# Network design
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/reference/architecture/design/l2-interconnect-fabric.mdx b/calico_versioned_docs/version-3.25/reference/architecture/design/l2-interconnect-fabric.mdx
deleted file mode 100644
index 422372357d..0000000000
--- a/calico_versioned_docs/version-3.25/reference/architecture/design/l2-interconnect-fabric.mdx
+++ /dev/null
@@ -1,117 +0,0 @@
----
-description: Understand the interconnect fabric options in a Calico network.
----
-
-# Calico over Ethernet fabrics
-
-Any technology that is capable of transporting IP packets can be used as the interconnect fabric in a {{prodname}} network. This means that the standard tools used to transport IP, such as MPLS and Ethernet can be used in a {{prodname}} network.
-
-The focus of this article is on Ethernet as the interconnect network. Most at-scale cloud operators have converted to IP fabrics, and that infrastructure will work for {{prodname}} as well. However, the concerns that drove most of those operators to IP as the interconnection network in their pods are largely ameliorated by {{prodname}}, allowing Ethernet to be viably considered as a {{prodname}} interconnect, even in large-scale deployments.
-
-## Concerns over Ethernet at scale
-
-It has been acknowledged by the industry for years that, beyond a certain size, classical Ethernet networks are unsuitable for production deployment. Although there have been [multiple](https://en.wikipedia.org/wiki/Provider_Backbone_Bridge_Traffic_Engineering) [attempts](https://web.archive.org/web/20150923231827/https://www.cisco.com/web/about/ac123/ac147/archived_issues/ipj_14-3/143_trill.html) [to address](https://en.wikipedia.org/wiki/Virtual_Private_LAN_Service) these issues, the scale-out networking community has largely abandoned Ethernet for anything other than providing physical point-to-point links in the networking fabric. The principle reasons for Ethernet failures at large scale are:
-
-- Large numbers of _endpoints_ ([note 1](#note-1))
-
- Each switch in an Ethernet network must learn the path to all Ethernet endpoints that are connected to the Ethernet network. Learning this amount of state can become a substantial task when we are talking about hundreds of thousands of _endpoints_.
-
-- High rate of _churn_ or change in the network
-
- With that many endpoints, most of them being ephemeral (such as virtual machines or containers), there is a large amount of _churn_ in the network. That load of re-learning paths can be a substantial burden on the control plane processor of most Ethernet switches.
-
-- High volumes of broadcast traffic
-
- As each node on the Ethernet network must use Broadcast packets to locate peers, and many use broadcast for other purposes, the resultant packet replication to each and every endpoint can lead to _broadcast storms_ in large Ethernet networks, effectively consuming most, if not all resources in the network and the attached endpoints.
-
-- Spanning tree
-
- Spanning tree is the protocol used to keep an Ethernet network from forming loops. The protocol was designed in the era of smaller, simpler networks, and it has not aged well. As the number of links and interconnects in an Ethernet network goes up, many implementations of spanning tree become more _fragile_. Unfortunately, when spanning tree fails in an Ethernet network, the effect is a catastrophic loop or partition (or both) in the network, and, in most cases, difficult to troubleshoot or resolve.
-
-Although many of these issues are crippling at _VM scale_ (tens of thousands of endpoints that live for hours, days, weeks), they will be absolutely lethal at _container scale_ (hundreds of thousands of endpoints that live for seconds, minutes, days).
-
-If you weren't ready to turn off your Ethernet data center network before this, I bet you are now. Before you do, however, let's look at how {{prodname}} can mitigate these issues, even in very large deployments.
-
-## How does {{prodname}} tame the Ethernet daemons?
-
-First, let's look at how {{prodname}} uses an Ethernet interconnect fabric. It's important to remember that an Ethernet network _sees_ nothing on the other side of an attached IP router, the Ethernet network just _sees_ the router itself. This is why Ethernet switches can be used at Internet peering points, where large fractions of Internet traffic is exchanged. The switches only see the routers from the various ISPs, not those ISPs' customers' nodes. We leverage the same effect in {{prodname}}.
-
-To take the issues outlined above, let's revisit them in a {{prodname}}
-context.
-
-- Large numbers of endpoints
-
- In a {{prodname}} network, the Ethernet interconnect fabric only sees the routers/compute servers, not the
- endpoint. In a standard cloud model, where there is tens of VMs per server (or hundreds of containers), this reduces the number of nodes that the Ethernet sees (and has to learn) by one to two orders
- of magnitude. Even in very large pods (say twenty thousand servers), the Ethernet network would still only see a few tens of thousands of endpoints. Well within the scale of any competent data center
- Ethernet top of rack (ToR) switch.
-
-- High rate of churn
-
- In a classical Ethernet data center fabric, there is a _churn_ event each time an endpoint is created,
- destroyed, or moved. In a large data center, with hundreds of thousands of endpoints, this _churn_ could run into tens of events per second, every second of the day, with peaks easily in the hundreds or thousands of events per second. In a {{prodname}} network, however, the _churn_ is very low. The only event that would lead to _churn_
- orders of magnitude more than what is normally experienced), there would only be two thousand events per **day**. Any switch that cannot handle that volume of change in the network should not be used
- for any application.
-
-- High volume of broadcast traffic
-
- Because the first (and last) hop for any traffic in a {{prodname}} network is an IP hop, and IP hops terminate
- broadcast traffic, there is no endpoint broadcast network in the Ethernet fabric, period. In fact, the only broadcast traffic that should be seen in the Ethernet fabric is the ARPs of the compute servers locating each other. If the traffic pattern is fairly consistent, the steady-state ARP rate should be almost zero. Even in a pathological case, the ARP rate should be well within normal accepted boundaries.
-
-- Spanning tree
-
- Depending on the architecture chosen for the Ethernet fabric, it may even be possible to turn off spanning tree. However, even if it is left on, due to the reduction in node count, and reduction in churn, most competent spanning tree implementations should be able to handle the load without stress.
-
-With these considerations in mind, it should be evident that an Ethernet connection fabric in {{prodname}} is not only possible, it is practical and should be seriously considered as the interconnect fabric for a {{prodname}}
-network.
-
-As mentioned in the IP fabric post, an IP fabric is also quite feasible for {{prodname}}, but there are more considerations that must be taken into account. The Ethernet fabric option has fewer architectural considerations in its design.
-
-## A brief note about Ethernet topology
-
-As mentioned elsewhere in the {{prodname}} documentation, because {{prodname}} can use most of the standard IP tooling, some interesting options regarding fabric topology become possible.
-
-We assume that an Ethernet fabric for {{prodname}} would most likely be constructed as a _leaf/spine_ architecture. Other options are possible, but the _leaf/spine_ is the predominant architectural model in use in
-scale-out infrastructure today.
-
-Because {{prodname}} is an IP routed fabric, a {{prodname}} network can use [ECMP](https://en.wikipedia.org/wiki/Equal-cost_multi-path_routing) to distribute traffic across multiple links (instead of using Ethernet techniques such as MLAG). By leveraging ECMP load balancing on the {{prodname}} compute servers, it is possible to build the fabric out of multiple _independent_ leaf/spine planes using no technologies other than IP routing in the {{prodname}} nodes, and basic Ethernet switching in the interconnect fabric. These planes would operate completely independently and could be designed such that they would not share a fault domain. This would allow for the catastrophic failure of one (or more) plane(s) of Ethernet interconnect fabric without the loss of the pod (the failure would just decrease the amount of interconnect bandwidth in the pod). This is a gentler failure mode than the pod-wide IP or Ethernet failure that is possible with today's designs.
-
-You might find this [Facebook blog post](https://code.facebook.com/posts/360346274145943/introducing-data-center-fabric-the-next-generation-facebook-data-center-network/)
- on their fabric approach interesting. A graphic to visualize the idea is shown below.
-
-![Ethernet spine planes](/img/calico/l2-spine-planes.png)
-
-The diagram does not show the endpoints in this diagram, and the endpoints would be unaware of anything in the fabric (as noted above).
-
-In this diagram, each ToR is segmented into four logical switches (possibly by using 'port VLANs'), ([note 2](#note-2)) and each compute server has a connection to each of those logical switches. We will identify those logical switches by their color. Each ToR would then have a blue, green, orange, and red logical switch. Those 'colors' would be members of a given _plane_, so there would be a blue plane, a green plane, an orange plane, and a red plane. Each plane would have a dedicated spine switch. and each ToR in a given spine would be connected to its spine, and only its spine.
-
-Each plane would constitute an IP network, so the blue plane would be 2001:db8:1000::/36, the green would be 2001:db8:2000::/36, and the orange and red planes would be 2001:db8:3000::/36 and 2001:db8:4000::/36 respectively ([note 3](#note-3)).
-
-Each IP network (plane) requires its own BGP route reflectors. Those route reflectors need to be peered with each other within the plane, but the route reflectors in each plane do not need to be peered with one another. Therefore, a fabric of four planes would have four route reflector meshes. Each compute server, border router, _etc._ would need
-to be a route reflector client of at least one route reflector in each plane, and very preferably two or more in each plane.
-
-The following diagram visualizes the route reflector environment.
-
-![route-reflector](/img/calico/l2-rr-spine-planes.png)
-
-These route reflectors could be dedicated hardware connected to the spine switches (or the spine switches themselves), or physical or virtual route reflectors connected to the necessary logical leaf switches (blue, green, orange, and red). That may be a route reflector running on a compute server and connected directly to the correct plane link, and not routed through the vRouter, to avoid the chicken and egg problem that would occur if the route reflector were "behind" the {{prodname}} network.
-
-Other physical and logical configurations and counts are, of course, possible, this is just an example.
-
-The logical configuration would then have each compute server would have an address on each plane's subnet, and announce its endpoints on each subnet. If ECMP is then turned on, the compute servers would distribute the load across all planes.
-
-If a plane were to fail (say due to a spanning tree failure), then only that one plane would fail. The remaining planes would stay running.
-
-### Footnotes
-
-### Note 1
-
-In this document (and in all {{prodname}} documents) we tend to use the term _endpoint_ to refer to a virtual machine, container, appliance, bare metal server, or any other entity that is connected to a {{prodname}} network. If we are referring to a specific type of endpoint, we will call that out (such as referring to the behavior of VMs as distinct from containers).
-
-### Note 2
-
-We are using logical switches in this example. Physical ToRs could also be used, or a mix of the two (say 2 logical switches hosted on each physical switch).
-
-### Note 3
-
-We use IPv6 here purely as an example. IPv4 would be configured similarly.
diff --git a/calico_versioned_docs/version-3.25/reference/architecture/design/l3-interconnect-fabric.mdx b/calico_versioned_docs/version-3.25/reference/architecture/design/l3-interconnect-fabric.mdx
deleted file mode 100644
index ee58dbba6a..0000000000
--- a/calico_versioned_docs/version-3.25/reference/architecture/design/l3-interconnect-fabric.mdx
+++ /dev/null
@@ -1,282 +0,0 @@
----
-description: Understand considerations for implementing interconnect fabrics with Calico.
----
-
-# Calico over IP fabrics
-
-{{prodname}} provides an end-to-end IP network that interconnects the endpoints ([note 1](#note-1)) in a scale-out or cloud environment. To do that, it needs an _interconnect fabric_ to provide the physical networking layer on which {{prodname}} operates ([note 2](#note-2)).
-
-Although {{prodname}} is designed to work with any underlying interconnect fabric that can support IP traffic, the fabric that has the least considerations attached to its implementation is an Ethernet fabric as
-discussed in [Calico over Ethernet fabrics](l2-interconnect-fabric.mdx).
-
-In most cases, the Ethernet fabric is the appropriate choice, but there are infrastructures where L3 (an IP fabric) has already been deployed, or will be deployed, and it makes sense for {{prodname}} to operate in those
-environments.
-
-However, because {{prodname}} is, itself, a routed infrastructure, there are more engineering, architecture, and operations considerations that have to be weighed when running {{prodname}} with an IP routed interconnection
-fabric. We will briefly outline those in the rest of this post. That said, {{prodname}} operates equally well with Ethernet or IP interconnect fabrics.
-
-## Background
-
-### Basic {{prodname}} architecture overview
-
-A description of the {{prodname}} architecture can be found in our [architectural overview](../overview.mdx). However, a brief discussion of the routing and data paths is useful for
-the discussion.
-
-In a {{prodname}} network, each compute server acts as a router for all of the endpoints that are hosted on that compute server. We call that function a vRouter. The data path is provided by the Linux kernel, the control
-plane by a BGP protocol server, and management plane by {{prodname}}'s on-server agent, _Felix_.
-
-Each endpoint can only communicate through its local vRouter, and the first and last _hop_ in any {{prodname}} packet flow is an IP router hop through a vRouter. Each vRouter announces all of the endpoints it is attached to all the other vRouters and other routers on the infrastructure fabric, using BGP, usually with BGP route reflectors to
-increase scale. A discussion of why we use BGP can be found in [Why BGP?](https://www.tigera.io/blog/why-bgp/).
-
-Access control lists (ACLs) enforce security (and other) policy as directed by whatever cloud orchestrator is in use. There are other components in the {{prodname}} architecture, but they are irrelevant to the interconnect network fabric discussion.
-
-### Overview of current common IP scale-out fabric architectures
-
-There are two approaches to building an IP fabric for a scale-out infrastructure. However, all of them, to date, have assumed that the edge router in the infrastructure is the top of rack (TOR) switch. In the {{prodname}} model, that function is pushed to the compute server itself.
-
-The two approaches are:
-
-**Routing infrastructure is based on some form of IGP**
-
-Due to the limitations in scale of IGP networks, the {{prodname}} team does not believe that using an IGP to distribute endpoint reachability information will adequately scale in a {{prodname}} environment. However, it is possible to use a combination of IGP and BGP in the interconnect fabric, where an IGP communicates the path to the _next-hop_ router (in {{prodname}}, this is often the destination compute server) and BGP is used to distribute the actual next-hop for a given endpoint. This is a valid model, and, in fact is the most common approach in a widely distributed IP network (say a carrier's backbone network). The design of these networks is somewhat complex though, and will not be addressed further in this article. ([note 3](#note-3)).
-
-**Routing infrastructure is based entirely on BGP**
-
-In this model, the IP network is "tight enough" or has a small enough diameter that BGP can be used to distribute endpoint routes, and the paths to the next-hops for those routes is known to all of the routers in the network (in a {{prodname}} network this includes the compute servers). This is the network model that this note will address.
-
-In this article, we will cover the second option because it is more common in the scale-out world.
-
-### BGP-only interconnect fabrics
-
-There are multiple methods to build a BGP-only interconnect fabric. We will focus on three models, each with two widely viable variations. There are other options, and we will briefly touch on why we didn't include some of them in [Other Options](#other-options).
-
-The two methods are:
-
-- A BGP fabric where each of the TOR switches (and their subsidiary compute servers) are a unique [Autonomous System (AS)]()
- and they are interconnected via either an Ethernet switching plane provided by the spine switches in a [leaf/spine](http://bradhedlund.com/2012/10/24/video-a-basic-introduction-to-the-leafspine-data-center-networking-fabric-design/) architecture, or via a set of spine switches, each of which is also a unique AS. We'll refer to this as the _AS per rack_ model. This model is detailed in [IETF RFC 7938](https://datatracker.ietf.org/doc/html/rfc7938).
-
-- A BGP fabric where each of the compute servers is a unique AS, and the TOR switches make up a transit AS. We'll refer to this as the _AS per server_ model.
-
-Each of these models can either have an Ethernet or IP spine. In the case of an Ethernet spine, each spine switch provides an isolated Ethernet connection _plane_ as in the {{prodname}} Ethernet interconnect fabric model and each TOR switch is connected to each spine switch.
-
-Another model is where each spine switch is a unique AS, and each TOR switch BGP peers with each spine switch. In both cases, the TOR switches use ECMP to load-balance traffic between all available spine switches.
-
-### BGP network design considerations
-
-Contrary to popular opinion, BGP is actually a fairly simple protocol. For example, the BGP configuration on a {{prodname}} compute server is approximately sixty lines long, not counting comments. The perceived complexity is due to the things that you can _do_ with BGP. Many uses of BGP involve complex policy rules, where the behavior of BGP can be modified to meet technical (or business, financial, political, etc.) requirements. A default {{prodname}} network does not venture into those areas, ([note 4](#note-4)) and therefore is fairly straight forward.
-
-That said, there are a few design rules for BGP that need to be kept in mind when designing an IP fabric that will interconnect nodes in a {{prodname}} network. These BGP design requirements _can_ be worked around, if necessary, but doing so takes the designer out of the standard BGP _envelope_ and should only be done by an implementer who is _very_ comfortable with advanced BGP design.
-
-These considerations are:
-
-- AS continuity or _AS puddling_
-
- Any router in an AS _must_ be able to communicate with any other router in that same AS without transiting another AS.
-
-- Next hop behavior
-
- By default BGP routers do not change the _next hop_ of a route if it is peering with another router in its same AS. The inverse is also true, a BGP router will set itself as the _next hop_ of a route if it is peering with a router in another AS.
-
-- Route reflection
-
- All BGP routers in a given AS must _peer_ with all the other routers in that AS. This is referred to a _complete BGP mesh_. This can become problematic as the number of routers in the AS scales up. The use of _route reflectors_ reduce the need for the complete BGP mesh. However, route reflectors also have scaling considerations.
-
-- Endpoints
-
- In a {{prodname}} network, each endpoint is a route. Hardware networking platforms are constrained by the number of routes they can learn. This is usually in range of 10,000's or 100,000's of routes. Route aggregation can help, but that is usually dependent on the capabilities of the scheduler used by the orchestration software (_e.g._ OpenStack).
-
-A deeper discussion of these considerations can be found in the [IP Fabric Design Considerations](#ip-fabric-design-considerations).
-
-The designs discussed below address these considerations.
-
-### The AS Per Rack model
-
-This model is the closest to the model suggested by [IETF RFC 7938](https://datatracker.ietf.org/doc/html/rfc7938).
-
-As mentioned earlier, there are two versions of this model, one with an set of Ethernet planes interconnecting the ToR switches, and the other where the core planes are also routers. The following diagrams may be useful for the discussion.
-
-![](/img/calico/l3-fabric-diagrams-as-rack-l2-spine.png)
-
-The diagram above shows the **AS per rack model** where the ToR switches are physically meshed via a set of Ethernet switching planes.
-
-![](/img/calico/l3-fabric-diagrams-as-rack-l3-spine.png)
-
-The diagram above shows the **AS per rack model** where the ToR switches are physically meshed via a set of discrete BGP spine routers, each in their own AS.
-
-In this approach, every ToR-ToR or ToR-Spine (in the case of an AS per spine) link is an eBGP peering which means that there is no route-reflection possible (using standard BGP route reflectors) _north_ of the ToR switches.
-
-If the L2 spine option is used, the result of this is that each ToR must either peer with every other ToR switch in the cluster (which could be hundreds of peers).
-
-If the AS per spine option is used, then each ToR only has to peer with each spine (there are usually somewhere between two and sixteen spine switches in a pod). However, the spine switches must peer with all ToR
-switches (again, that would be hundreds, but most spine switches have more control plane capacity than the average ToR, so this might be more scalable in many circumstances).
-
-Within the rack, the configuration is the same for both variants, and is somewhat different than the configuration north of the ToR.
-
-Every router within the rack, which, in the case of {{prodname}} is every compute server, shares the same AS as the ToR that they are connected to. That connection is in the form of an Ethernet switching layer. Each router in the rack must be directly connected to enable the AS to remain contiguous. The ToR's _router_ function is then connected to that Ethernet switching layer as well. The actual configuration of this is dependent on the ToR in use, but usually it means that the ports that are connected to the compute servers are treated as _subnet_ or _segment_ ports, and then the ToR's _router_ function has a single interface into that subnet.
-
-This configuration allows each compute server to connect to each other compute server in the rack without going through the ToR router, but it will, of course, go through the ToR switching function. The compute servers and the ToR router could all be directly meshed, or a route reflector could be used within the rack, either hosted on the ToR
-itself, or as a virtual function hosted on one or more compute servers within the rack.
-
-The ToR, as the eBGP router redistributes all of the routes from other ToRs as well as routes external to the data center to the compute servers that are in its AS, and announces all of the routes from within
-the AS (rack) to the other ToRs and the larger world. This means that each compute server will see the ToR as the next hop for all external routes, and the individual compute servers are the next hop for all routes internal to the rack.
-
-### The AS per Compute Server model
-
-This model takes the concept of an AS per rack to its logical conclusion. In the earlier referenced [IETF RFC 7938](https://datatracker.ietf.org/doc/html/rfc7938) the assumption in the overall model is that the ToR is first tier aggregating and routing element. In {{prodname}}, the ToR, if it is an L3 router, is actually the second tier. Remember, in {{prodname}}, the compute server is always the first/last router for an endpoint, and is also the first/last point of aggregation.
-
-Therefore, if we follow the architecture of the draft, the compute server, not the ToR should be the AS boundary. The differences can be seen in the following two diagrams.
-
-![](/img/calico/l3-fabric-diagrams-as-server-l2-spine.png)
-
-The diagram above shows the _AS per compute server model_ where the ToR
-switches are physically meshed via a set of Ethernet switching planes.
-
-![](/img/calico/l3-fabric-diagrams-as-server-l3-spine.png)
-
-The diagram above shows the _AS per compute server model_ where the ToR switches are physically connected to a set of independent routing planes.
-
-As can be seen in these diagrams, there are still the same two variants as in the _AS per rack_ model, one where the spine switches provide a set of independent Ethernet planes to interconnect the ToR switches, and the other where that is done by a set of independent routers.
-
-The real difference in this model, is that the compute servers as well as the ToR switches are all independent autonomous systems. To make this work at scale, the use of four byte AS numbers as discussed in [RFC 4893](http://www.faqs.org/rfcs/rfc4893.html). Without
-using four byte AS numbering, the total number of ToRs and compute servers in a {{prodname}} fabric would be limited to the approximately five thousand available private AS ([note 5](#note-5)) numbers. If four byte AS numbers are used, there are approximately ninety-two million private AS numbers available. This should be sufficient for any given {{prodname}} fabric.
-
-The other difference in this model _vs._ the AS per rack model, is that there are no route reflectors used, as all BGP peerings are eBGP. In this case, each compute server in a given rack peers with its ToR switch which is also acting as an eBGP router. For two servers within the same rack to communicate, they will be routed through the ToR. Therefore, each server will have one peering to each ToR it is connected to, and each ToR will have a peering with each compute server that it is connected to (normally, all the compute servers in the rack).
-
-The inter-ToR connectivity considerations are the same in scale and scope as in the AS per rack model.
-
-### The Downward Default model
-
-The final model is a bit different. Whereas, in the previous models, all of the routers in the infrastructure carry full routing tables, and leave their AS paths intact, this model ([note 6](#note-6)) removes the AS numbers at
-each stage of the routing path. This is to prevent routes from other nodes in the network from not being installed due to it coming from the _local_ AS (since they share the source and dest of the route share the same AS).
-
-The following diagram will show the AS relationships in this model.
-
-![](/img/calico/l3-fabric-downward-default.png)
-
-In the diagram above, we are showing that all {{prodname}} nodes share the same AS number, as do all ToR switches. However, those ASs are different (_A1_ is not the same network as _A2_, even though the both share the
-same AS number _A_ ).
-
-Although the use of a single AS for all ToR switches, and another for all compute servers simplifies deployment (standardized configuration), the real benefit comes in the offloading of the routing tables in the ToR
-switches.
-
-In this model, each router announces all of its routes to its upstream peer (the {{prodname}} routers to their ToR, the ToRs to the spine switches). However, in return, the upstream router only announces a default route.
-In this case, a given {{prodname}} router only has routes for the endpoints that are locally hosted on it, as well as the default from the ToR. Because the ToR is the only route for the {{prodname}} network the rest of the
-network, this matches reality. The same happens between the ToR switches and the spine. This means that the ToR only has to install the routes that are for endpoints that are hosted on its downstream {{prodname}} nodes.
-Even if we were to host 200 endpoints per {{prodname}} node, and stuff 80 {{prodname}} nodes in each rack, that would still limit the routing table on the ToR to a maximum of 16,000 entries (well within the capabilities of
-even the most modest of switches).
-
-Because the default is originated by the Spine (originally) there is no chance for a downward announced route to originate from the recipient's AS, preventing the **AS puddling** problem.
-
-There is one (minor) drawback to this model, in that all traffic that is destined for an invalid destination (the destination IP does not exist) will be forwarded to the spine switches before they are dropped.
-
-It should also be noted that the spine switches do need to carry all of the {{prodname}} network routes, just as they do in the routed spines in the previous examples. In short, this model imposes no more load on the
-spines than they already would have, and substantially reduces the amount of routing table space used on the ToR switches. It also reduces the number of routes in the {{prodname}} nodes, but, as we have discussed
-before, that is not a concern in most deployments as the amount of memory consumed by a full routing table in {{prodname}} is a fraction of the total memory available on a modern compute server.
-
-## Recommendation
-
-The {{prodname}} team recommends the use of the [AS per rack](#the-as-per-rack-model) model if the resultant routing table size can be accommodated by the ToR and spine switches, remembering to account for projected growth.
-
-If there is concern about the route table size in the ToR switches, the {{prodname}} recommends the [Downward Default](#the-downward-default-model) model.
-
-If there are concerns about both the spine and ToR switch route table capacity, or there is a desire to run a very simple L2 fabric to connect the {{prodname}} nodes, then the user should consider the Ethernet fabric as
-detailed in [Calico over Ethernet fabrics](l2-interconnect-fabric.mdx).
-
-If you are interested in the AS per compute server, the {{prodname}} team would be very interested in discussing the deployment of that model.
-
-## Other options
-
-The way the physical and logical connectivity is laid out in this article, and the [Ethernet fabric](l2-interconnect-fabric.mdx), the next hop router for a given route is always directly connected to the router receiving that route. This makes the need for another protocol to distribute the next hop routes unnecessary.
-
-However, in many (or most) WAN BGP networks, the routers within a given AS may not be directly adjacent. Therefore, a router may receive a route with a next hop address that it is not directly adjacent to. In those cases, an IGP, such as OSPF or IS-IS, is used by the routers within a given AS to determine the path to the BGP next hop route.
-
-There may be {{prodname}} architectures where there are similar models where the routers within a given AS are not directly adjacent. In those models, the use of an IGP in {{prodname}} may be warranted. The configuration
-of those protocols are, however, beyond the scope of this technical
-note.
-
-### IP fabric design considerations
-
-**AS puddling**
-
-The first consideration is that an AS must be kept contiguous. This means that any two nodes in a given AS must be able to communicate without traversing any other AS. If this rule is not observed, the effect is often referred to as _AS puddling_ and the network will _not_ function correctly.
-
-A corollary of that rule is that any two administrative regions that share the same AS number, are in the same AS, even if that was not the desire of the designer. BGP has no way of identifying if an AS is local or foreign other than the AS number. Therefore re-use of an AS number for two _networks_ that are not directly connected, but only connected
-through another _network_ or AS number will not work without a lot of policy changes to the BGP routers.
-
-Another corollary of that rule is that a BGP router will not propagate a route to a peer if the route has an AS in its path that is the same AS as the peer. This prevents loops from forming in the network. The effect of this prevents two routers in the same AS from transiting another router (either in that AS or not).
-
-**Next hop behavior**
-
-Another consideration is based on the differences between iBGP and eBGP. BGP operates in two modes, if two routers are BGP peers, but share the same AS number, then they are considered to be in an _internal_ BGP (or iBGP) peering relationship. If they are members of different AS's, then they are in an _external_ or eBGP relationship.
-
-BGP's original design model was that all BGP routers within a given AS would know how to get to one another (via static routes, IGP ([note 7](#note-7)) routing protocols, or the like), and that routers in different ASs would
-not know how to reach one another unless they were directly connected.
-
-Based on that design point, routers in an iBGP peering relationship assume that they do not transit traffic for other iBGP routers in a given AS (i.e. A can communicate with C, and therefore will not need to route through B), and therefore, do not change the _next hop_ attribute in BGP ([note 8](#note-8)).
-
-A router with an eBGP peering, on the other hand, assumes that its eBGP peer will not know how to reach the next hop route, and then will substitute its own address in the next hop field. This is often referred
-to as _next hop self_.
-
-In the {{prodname}} [Ethernet fabric](l2-interconnect-fabric.mdx)
-model, all of the compute servers (the routers in a {{prodname}} network) are directly connected over one or more Ethernet network(s) and therefore are directly reachable. In this case, a router in the {{prodname}} network
-does not need to set _next hop self_ within the {{prodname}} fabric.
-
-The models we present in this article ensure that all routes that may traverse a non-{{prodname}} router are eBGP routes, and therefore _next hop self_ is automatically set correctly. If a deployment of {{prodname}} in
-an IP interconnect fabric does not satisfy that constraint, then _next hop self_ must be appropriately configured.
-
-**Route reflection**
-
-As mentioned above, BGP expects that all of the iBGP routers in a network can see (and speak) directly to one another, this is referred to as a _BGP full mesh_. In small networks this is not a problem, but it does become interesting as the number of routers increases. For example, if you have 99 BGP routers in an AS and wish to add one more, you would
-have to configure the peering to that new router on each of the 99 existing routers. Not only is this a problem at configuration time, it means that each router is maintaining 100 protocol adjacencies, which can start being a drain on constrained resources in a router. While this might be _interesting_ at 100 routers, it becomes an impossible task
-with 1000's or 10,000's of routers (the potential size of a {{prodname}} network).
-
-Conveniently, large scale/Internet scale networks solved this problem almost 20 years ago by deploying BGP route reflection as described in [RFC 1966](http://www.faqs.org/rfcs/rfc1966.html). This is a technique supported by almost all BGP routers today. In a large network, a number of route reflectors ([note 9](#note-9)) are evenly distributed and each iBGProuter is _peered_ with one or more route reflectors (usually 2 or 3). Each route reflector can handle 10's or 100's of route reflector clients (in {{prodname}}'s case, the compute server), depending on the route reflector being used. Those route reflectors are, in turn, peered with each other. This means that there are an order of magnitude less route reflectors that need to be completely meshed, and each route reflector client is only configured to peer to 2 or 3 route reflectors. This is much easier to manage.
-
-Other route reflector architectures are possible, but those are beyond the scope of this document.
-
-**Endpoints**
-
-The final consideration is the number of endpoints in a {{prodname}} network. In the [Ethernet fabric](l2-interconnect-fabric.mdx) case the number of endpoints is not constrained by the interconnect fabric, as the interconnect fabric does not _see_ the actual endpoints, it only _sees_ the actual vRouters, or compute servers. This is not the case in an IP fabric, however. IP networks forward by using the
-destination IP address in the packet, which, in {{prodname}}'s case, is the destination endpoint. That means that the IP fabric nodes (ToR switches and/or spine switches, for example) must know the routes to each endpoint in the network. They learn this by participating as route reflector clients in the BGP mesh, just as the {{prodname}} vRouter/compute server does.
-
-However, unlike a compute server which has a relatively unconstrained amount of memory, a physical switch is either memory constrained, or quite expensive. This means that the physical switch has a limit on how many _routes_ it can handle. The current industry standard for modern commodity switches is in the range of 128,000 routes. This means that,
-without other routing _tricks_, such as aggregation, a {{prodname}} installation that uses an IP fabric will be limited to the routing table size of its constituent network hardware, with a reasonable upper limit
-today of 128,000 endpoints.
-
-### Footnotes
-
-### Note 1
-
-In {{prodname}}'s terminology, an endpoint is an IP address and interface. It could refer to a VM, a container, or even a process bound to an IP address running on a bare metal server.
-
-### Note 2
-
-This interconnect fabric provides the connectivity between the {{prodname}} (v)Router (in almost all cases, the compute servers) nodes, as well as any other elements in the fabric (_e.g._ bare metal servers, border routers, and appliances).
-
-### Note 3
-
-If there is interest in a discussion of this approach, please let us know. The {{prodname}} team could either arrange a discussion, or if there was enough interest, publish a follow-up tech note.
-
-### Note 4
-
-However those tools are available if a given {{prodname}} instance needs to utilize those policy constructs.
-
-### Note 5
-
-The two byte AS space reserves approximately the last five thousand AS numbers for private use. There is no technical reason why other AS numbers could not be used. However the re-use of global scope AS numbers within a private infrastructure is strongly discouraged. The chance for routing system failure or incorrect routing is substantial, and not restricted to the entity that is doing the reuse.
-
-### Note 6
-
-We first saw this design in a customer's lab, and thought it innovative enough to share (we asked them first, of course). Similar **AS Path Stripping** approaches are used in ISP networks, however.
-
-### Note 7
-
-An Interior Gateway Protocol is a local routing protocol that does not cross an AS boundary. The primary IGPs in use today are OSPF and IS-IS. While complex iBGP networks still use IGP routing protocols, a data center is normally a fairly simple network, even if it has many routers in it. Therefore, in the data center case, the use of an IGP can often be disposed of.
-
-### Note 8
-
-A Next hop is an attribute of a route announced by a routing protocol. In simple terms a route is defined by a _target_, or the destination that is to be reached, and a _next hop_, which is the next router in the path to reach that target. There are many other characteristics in a route, but those are well beyond the scope of this post.
-
-### Note 9
-
-A route reflector may be a physical router, a software appliance, or simply a BGP daemon. It only processes routing messages, and does not pass actual data plane traffic. However, some route reflectors are co-resident on regular routers that do pass data plane traffic. Although they may sit on one platform, the functions are distinct.
diff --git a/calico_versioned_docs/version-3.25/reference/architecture/index.mdx b/calico_versioned_docs/version-3.25/reference/architecture/index.mdx
deleted file mode 100644
index 7d3a0a9862..0000000000
--- a/calico_versioned_docs/version-3.25/reference/architecture/index.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-description: Understand Calico components, network design, and the data path between workloads.
-hide_table_of_contents: true
----
-
-# Architecture
-
-import DocCardList from '@theme/DocCardList';
-import { useCurrentSidebarCategory } from '@docusaurus/theme-common';
-
-
diff --git a/calico_versioned_docs/version-3.25/reference/architecture/overview.mdx b/calico_versioned_docs/version-3.25/reference/architecture/overview.mdx
deleted file mode 100644
index 1e92d34122..0000000000
--- a/calico_versioned_docs/version-3.25/reference/architecture/overview.mdx
+++ /dev/null
@@ -1,155 +0,0 @@
----
-description: Learn the basic Calico components.
----
-
-# Component architecture
-
-## {{prodname}} components
-
-The following diagram shows the required and optional {{prodname}} components for a Kubernetes, on-premises deployment with networking and network policy.
-
-![calico-components](/img/calico/architecture-calico.svg)
-
-**{{prodname}} components**
-
-- [Calico API server](#calico-api-server)
-- [Felix](#felix)
-- [BIRD](#bird)
-- [confd](#confd)
-- [Dikastes](#dikastes)
-- [CNI plugin](#cni-plugin)
-- [Datastore plugin](#datastore-plugin)
-- [IPAM plugin](#ipam-plugin)
-- [kube-controllers](#kube-controllers)
-- [Typha](#typha)
-- [calicoctl](#calicoctl)
-
-**Cloud orchestrator plugins**
-
-- [Plugins for cloud orchestrators](#plugins-for-cloud-orchestrators)
-
-## Calico API server
-
-**Main task**: Lets you manage {{prodname}} resources directly with `kubectl`.
-
-## Felix
-
-**Main task**: Programs routes and ACLs, and anything else required on the host to provide desired connectivity for the endpoints on that host. Runs on each machine that hosts endpoints. Runs as an agent daemon. [Felix resource](../resources/felixconfig.mdx).
-
-Depending on the specific orchestrator environment, Felix is responsible for:
-
-- **Interface management**
-
- Programs information about interfaces into the kernel so the kernel can correctly handle the traffic from that endpoint. In particular, it ensures that the host responds to ARP requests from each workload with the MAC of the host, and enables IP forwarding for interfaces that it manages. It also monitors interfaces to ensure that the programming is applied at the appropriate time.
-
-- **Route programming**
-
- Programs routes to the endpoints on its host into the Linux kernel FIB (Forwarding Information Base). This ensures that packets destined for those endpoints that arrive on at the host are forwarded accordingly.
-
-- **ACL programming**
-
- Programs ACLs into the Linux kernel to ensure that only valid traffic can be sent between endpoints, and that endpoints cannot circumvent {{prodname}} security measures.
-
-- **State reporting**
-
- Provides network health data. In particular, it reports errors and problems when configuring its host. This data is written to the datastore so it visible to other components and operators of the network.
-
-:::note
-
-`{{nodecontainer}}` can be run in _policy only mode_ where Felix runs without BIRD and confd. This provides policy management without route distribution between hosts, and is used for deployments like managed cloud providers. You enable this mode by setting the environment variable, `CALICO_NETWORKING_BACKEND=none` before starting the node.
-
-:::
-
-## BIRD
-
-**Main task**: Gets routes from Felix and distributes to BGP peers on the network for inter-host routing. Runs on each node that hosts a Felix agent. Open source, internet routing daemon. [BIRD](../configure-calico-node.mdx#content-main).
-
-The BGP client is responsible for:
-
-- **Route distribution**
-
- When Felix inserts routes into the Linux kernel FIB, the BGP client distributes them to other nodes in the deployment. This ensures efficient traffic routing for the deployment.
-
-- **BGP route reflector configuration**
-
- BGP route reflectors are often configured for large deployments rather than a standard BGP client. BGP route reflectors acts as a central point for connecting BGP clients. (Standard BGP requires that every BGP client be connected to every other BGP client in a mesh topology, which is difficult to maintain.)
-
- For redundancy, you can seamlessly deploy multiple BGP route reflectors. BGP route reflectors are involved only in control of the network: no endpoint data passes through them. When the {{prodname}} BGP client advertises routes from its FIB to the route reflector, the route reflector advertises those routes out to the other nodes in the deployment.
-
-## confd
-
-**Main task**: Monitors {{prodname}} datastore for changes to BGP configuration and global defaults such as AS number, logging levels, and IPAM information. Open source, lightweight configuration management tool.
-
-Confd dynamically generates BIRD configuration files based on the updates to data in the datastore. When the configuration file changes, confd triggers BIRD to load the new files. [Configure confd](../configure-calico-node.mdx#content-main), and [confd project](https://github.com/kelseyhightower/confd).
-
-## Dikastes
-
-**Main task**: Enforces network policy for Istio service mesh. Runs on a cluster as a sidecar proxy to Istio Envoy.
-
-(Optional) {{prodname}} enforces network policy for workloads at both the Linux kernel (using iptables, L3-L4), and at L3-L7 using a Envoy sidecar proxy called Dikastes, with cryptographic authentication of requests. Using multiple enforcement points establishes the identity of the remote endpoint based on multiple criteria. The host Linux kernel enforcement protects your workloads even if the workload pod is compromised, and the Envoy proxy is bypassed.
-
-:::note
-
-Dikastes can be terminated by issuing an HTTP POST request to /terminate on the socket address specified using environment variables `DIKASTES_HTTP_BIND_ADDR` and `DIKASTES_HTTP_BIND_PORT`. This is to allow for graceful termination so that Kubernetes Jobs can complete successfully and is analogous to Envoy's /quitquitquit. eg. `curl -XPOST http://127.0.0.1:7777/terminate`
-
-:::
-
-## CNI plugin
-
-**Main task**: Provides {{prodname}} networking for Kubernetes clusters.
-
-The {{prodname}} binary that presents this API to Kubernetes is called the CNI plugin, and must be installed on every node in the Kubernetes cluster. The {{prodname}} CNI plugin allows you to use {{prodname}} networking for any orchestrator that makes use of the CNI networking specification. Configured through the standard [CNI configuration mechanism](https://github.com/containernetworking/cni/blob/master/SPEC.md#network-configuration), and [{{prodname}} CNI plugin](../configure-cni-plugins.mdx).
-
-## Datastore plugin
-
-**Main task**: Increases scale by reducing each node’s impact on the datastore. It is one of the {{prodname}} [CNI plugins](../configure-cni-plugins.mdx).
-
-- **Kubernetes API datastore (kdd)**
-
- The advantages of using the Kubernetes API datastore (kdd) with {{prodname}} are:
-
- - Simpler to manage because it does not require an extra datastore
- - Use Kubernetes RBAC to control access to {{prodname}} resources
- - Use Kubernetes audit logging to generate audit logs of changes to {{prodname}} resources
-
-- **etcd**
-
- `etcd` is a consistent, highly-available distributed key-value store that provides data storage for the {{prodname}} network, and for communications between components. `etcd` is supported for protecting only non-cluster hosts (as of {{prodname}} v3.1). For completeness, `etcd ` advantages are:
-
- - Lets you run {{prodname}} on non-Kubernetes platforms
- - Separation of concerns between Kubernetes and {{prodname}} resources, for example allowing you to scale the datastores independently
- - Lets you run a {{prodname}} cluster that contains more than just a single Kubernetes cluster, for example, bare metal servers with {{prodname}} host protection interworking with a Kubernetes cluster; or multiple Kubernetes clusters.
-
- [etcd admin guide](https://coreos.com/etcd/docs/latest/admin_guide.html#optimal-cluster-size)
-
-## IPAM plugin
-
-**Main task**: Uses {{prodname}}’s IP pool resource to control how IP addresses are allocated to pods within the cluster. It is the default plugin used by most {{prodname}} installations. It is one of the {{prodname}} [CNI plugins](../configure-cni-plugins.mdx).
-
-## kube-controllers
-
-**Main task**: Monitors the Kubernetes API and performs actions based on cluster state. [kube-controllers](../kube-controllers/configuration.mdx).
-
-The `tigera/kube-controllers` container includes the following controllers:
-
-- Policy controller
-- Namespace controller
-- Serviceaccount controller
-- Workloadendpoint controller
-- Node controller
-
-## Typha
-
-**Main task**: Increases scale by reducing each node’s impact on the datastore. Runs as a daemon between the datastore and instances of Felix. Installed by default, but not configured. [Typha description](https://github.com/projectcalico/calico/tree/master/typha), and [Typha component](../typha/index.mdx).
-
-Typha maintains a single datastore connection on behalf of all of its clients like Felix and confd. It caches the datastore state and deduplicates events so that they can be fanned out to many listeners. Because one Typha instance can support hundreds of Felix instances, it reduces the load on the datastore by a large factor. And because Typha can filter out updates that are not relevant to Felix, it also reduces Felix’s CPU usage. In a high-scale (100+ node) Kubernetes cluster, this is essential because the number of updates generated by the API server scales with the number of nodes.
-
-## calicoctl
-
-**Main task**: Command line interface to create, read, update, and delete {{prodname}} objects. `calicoctl` command line is available on any host with network access to the {{prodname}} datastore as either a binary or a container. Requires separate installation. [calicoctl](../calicoctl/index.mdx).
-
-## Plugins for cloud orchestrators
-
-**Main task**: Translates the orchestrator APIs for managing networks to the {{prodname}} data-model and datastore.
-
-For cloud providers, {{prodname}} has a separate plugin for each major cloud orchestration platform. This allows {{prodname}} to tightly bind to the orchestrator, so users can manage the {{prodname}} network using their orchestrator tools. When required, the orchestrator plugin provides feedback from the {{prodname}} network to the orchestrator. For example, providing information about Felix liveness, and marking specific endpoints as failed if network setup fails.
diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/apply.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/apply.mdx
deleted file mode 100644
index ed64e56aa3..0000000000
--- a/calico_versioned_docs/version-3.25/reference/calicoctl/apply.mdx
+++ /dev/null
@@ -1,141 +0,0 @@
----
-description: Command to apply a policy.
----
-
-# calicoctl apply
-
-This sections describes the `calicoctl apply` command.
-
-Read the [calicoctl command line interface user reference](overview.mdx)
-for a full list of calicoctl commands.
-
-:::note
-
-The available actions for a specific resource type may be
-limited based on the datastore used for {{prodname}} (etcdv3 / Kubernetes API).
-Please refer to the
-[Resources section](../resources/overview.mdx)
-for details about each resource type.
-
-:::
-
-## Displaying the help text for 'calicoctl apply' command
-
-Run `calicoctl apply --help` to display the following help menu for the
-command.
-
-```
-Usage:
- calicoctl apply --filename= [--recursive] [--skip-empty] [--config=] [--namespace=]
-
-Examples:
- # Apply a policy using the data in policy.yaml.
- calicoctl apply -f ./policy.yaml
-
- # Apply a policy based on the JSON passed into stdin.
- cat policy.json | calicoctl apply -f -
-
-Options:
- -h --help Show this screen.
- -f --filename= Filename to use to apply the resource. If set to
- "-" loads from stdin. If filename is a directory, this command is
- invoked for each .json .yaml and .yml file within that directory,
- terminating after the first failure.
- -R --recursive Process the filename specified in -f or --filename recursively.
- --skip-empty Do not error if any files or directory specified using -f or --filename contain no
- data.
- -c --config= Path to the file containing connection
- configuration in YAML or JSON format.
- [default: /etc/calico/calicoctl.cfg]
- -n --namespace= Namespace of the resource.
- Only applicable to NetworkPolicy and WorkloadEndpoint.
- Only applicable to NetworkPolicy, NetworkSet, and WorkloadEndpoint.
- Uses the default namespace if not specified.
- --context= The name of the kubeconfig context to use.
-
-Description:
- The apply command is used to create or replace a set of resources by filename
- or stdin. JSON and YAML formats are accepted.
-
- Valid resource types are:
-
- * bgpConfiguration
- * bgpPeer
- * felixConfiguration
- * globalNetworkPolicy
- * hostEndpoint
- * ipPool
- * networkPolicy
- * networkSet
- * node
- * profile
- * workloadEndpoint
-
- When applying a resource:
- - if the resource does not already exist (as determined by it's primary
- identifiers) then it is created
- - if the resource already exists then the specification for that resource is
- replaced in it's entirety by the new resource specification.
-
- The output of the command indicates how many resources were successfully
- applied, and the error reason if an error occurred.
-
- The resources are applied in the order they are specified. In the event of a
- failure applying a specific resource it is possible to work out which
- resource failed based on the number of resources successfully applied
-
- When applying a resource to perform an update, the complete resource spec
- must be provided, it is not sufficient to supply only the fields that are
- being updated.
-```
-
-### Examples
-
-1. Apply a set of resources (of mixed type) using the data in resources.yaml.
-
- ```bash
- calicoctl apply -f ./resources.yaml
- ```
-
- Results indicate that 8 resources were successfully applied
-
- ```
- Successfully applied 8 resource(s)
- ```
-
-1. Apply two policy resources based on the JSON passed into stdin.
-
- ```bash
- cat policy.json | calicoctl apply -f -
- ```
-
- Results indicate success.
-
- ```
- Successfully applied 2 'policy' resource(s)
- ```
-
-### Options
-
-```
--f --filename= Filename to use to apply the resource. If set to
- "-" loads from stdin.
--n --namespace= Namespace of the resource.
- Only applicable to NetworkPolicy and WorkloadEndpoint.
- Uses the default namespace if not specified.
-```
-
-### General options
-
-```
--c --config= Path to the file containing connection
- configuration in YAML or JSON format.
- [default: /etc/calico/calicoctl.cfg]
-```
-
-## See also
-
-- [Installing calicoctl](../../operations/calicoctl/install.mdx)
-- [Resources](../resources/overview.mdx) for details on all valid resources, including file format
- and schema
-- [NetworkPolicy](../resources/networkpolicy.mdx) for details on the {{prodname}} selector-based policy model
diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/convert.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/convert.mdx
deleted file mode 100644
index 914388ea44..0000000000
--- a/calico_versioned_docs/version-3.25/reference/calicoctl/convert.mdx
+++ /dev/null
@@ -1,96 +0,0 @@
----
-description: Command to convert contents of policy.yaml to v3 policy.
----
-
-# calicoctl convert
-
-This sections describes the `calicoctl convert` command.
-
-Read the [calicoctl command line interface user reference](overview.mdx)
-for a full list of calicoctl commands.
-
-:::note
-
-The available actions for a specific resource type may be
-limited based on the datastore used for {{prodname}} (etcdv3 / Kubernetes API).
-Please refer to the
-[Resources section](../resources/overview.mdx)
-for details about each resource type.
-
-:::
-
-## Displaying the help text for 'calicoctl convert' command
-
-Run `calicoctl convert --help` to display the following help menu for the
-command.
-
-```
-Usage:
- calicoctl convert --filename=
- [--output=