Skip to content

Commit

Permalink
cilium e2e: deploy k8s without kube-proxy
Browse files Browse the repository at this point in the history
  • Loading branch information
zhangzujian committed Mar 31, 2022
1 parent 902315e commit 3b4ac99
Show file tree
Hide file tree
Showing 5 changed files with 61 additions and 42 deletions.
7 changes: 2 additions & 5 deletions .github/workflows/build-x86-image.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1139,7 +1139,7 @@ jobs:
run: |
sudo pip3 install j2cli
sudo pip3 install "j2cli[yaml]"
sudo PATH=~/.local/bin:$PATH make kind-init-iptables
sudo PATH=~/.local/bin:$PATH make kind-init-cilium
- name: Download image
uses: actions/download-artifact@v2
Expand All @@ -1161,9 +1161,7 @@ jobs:
timeout_minutes: 10
max_attempts: 3
shell: bash
command: |
sudo PATH=~/.local/bin:$PATH make kind-install-cilium
docker exec kube-ovn-control-plane bash -c "ls -al /etc/cni/net.d/"
command: sudo PATH=~/.local/bin:$PATH make kind-install-cilium

- name: Set up Go 1.x
uses: actions/setup-go@v2
Expand All @@ -1178,7 +1176,6 @@ jobs:
sudo cp -r /root/.kube/ /home/runner/.kube/
sudo chmod -R 777 /home/runner/.kube/
docker exec kube-ovn-control-plane bash -c "ls -al /etc/cni/net.d/"
kubectl rollout status -n kube-system ds cilium
kubectl get pods -A
make e2e-ovn-ebpf
Expand Down
58 changes: 35 additions & 23 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ GOLDFLAGS = "-w -s -extldflags '-z now' -X github.com/kubeovn/kube-ovn/versions.
MULTUS_IMAGE = ghcr.io/k8snetworkplumbingwg/multus-cni:stable
MULTUS_YAML = https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset.yml

CILIUM_VERSION = 1.10.9
CILIUM_IMAGE_REPO = quay.io/cilium/cilium

# ARCH could be amd64,arm64
ARCH = amd64

Expand Down Expand Up @@ -146,6 +149,13 @@ kind-init-dual: kind-clean
docker exec kube-ovn-worker sysctl -w net.ipv6.conf.all.disable_ipv6=0
docker exec kube-ovn-control-plane sysctl -w net.ipv6.conf.all.disable_ipv6=0

.PHONY: kind-init-cilium
kind-init-cilium: kind-clean
kind delete cluster --name=kube-ovn
kube_proxy_mode=none ip_family=ipv4 ha=false single=false j2 yamls/kind.yaml.j2 -o yamls/kind.yaml
kind create cluster --config yamls/kind.yaml --name kube-ovn
kubectl describe no

.PHONY: kind-install
kind-install:
kind load docker-image --name kube-ovn $(REGISTRY)/kube-ovn:$(RELEASE_TAG)
Expand Down Expand Up @@ -280,7 +290,9 @@ kind-install-underlay-logical-gateway-dual:

.PHONY: kind-install-multus
kind-install-multus:
docker pull "$(MULTUS_IMAGE)"
if ! docker images --format "{{.Repository}}:{{.Tag}}" | grep -qw "^$(MULTUS_IMAGE)$$"; then \
docker pull "$(MULTUS_IMAGE)"; \
fi
kind load docker-image --name kube-ovn "$(MULTUS_IMAGE)"
kubectl apply -f "$(MULTUS_YAML)"
kubectl -n kube-system rollout status ds kube-multus-ds
Expand All @@ -302,36 +314,36 @@ kind-install-ic:

.PHONY: kind-install-cilium
kind-install-cilium:
kind load docker-image --name kube-ovn $(REGISTRY)/kube-ovn:$(RELEASE_TAG)
ENABLE_SSL=true ENABLE_LB=false ENABLE_NP=false dist/images/install.sh
kubectl describe no
$(eval TAINTS = $(shell kubectl get no kube-ovn-control-plane -o jsonpath={.spec.taints}))
$(eval MASTER_TAINT = "node-role.kubernetes.io/master")
@if [[ "${TAINTS}" =~ .*"${MASTER_TAINT}".* ]]; then \
kubectl taint node kube-ovn-control-plane node-role.kubernetes.io/master:NoSchedule-; \
$(eval KUBERNETES_SERVICE_HOST = $(shell kubectl get nodes kube-ovn-control-plane -o jsonpath='{.status.addresses[0].address}'))
if ! docker images --format "{{.Repository}}:{{.Tag}}" | grep -qw "^$(CILIUM_IMAGE_REPO):v$(CILIUM_VERSION)$$"; then \
docker pull "$(CILIUM_IMAGE_REPO):v$(CILIUM_VERSION)"; \
fi
kind load docker-image --name kube-ovn "$(CILIUM_IMAGE_REPO):v$(CILIUM_VERSION)"
kubectl apply -f yamls/chaining.yaml
kind get nodes --name kube-ovn | while read node; do \
docker exec $$node mv /etc/cni/net.d/01-kube-ovn.conflist /etc/cni/net.d/10-kube-ovn.conflist; \
done
$(eval CONTROLLERIP = $(shell kubectl get nodes kube-ovn-control-plane -ojsonpath='{.status.addresses[0].address}'))
helm repo add cilium https://helm.cilium.io/
helm install cilium cilium/cilium --version 1.10.5 \
helm install cilium cilium/cilium \
--version $(CILIUM_VERSION) \
--namespace=kube-system \
--set cni.chainingMode=generic-veth \
--set cni.customConf=true \
--set cni.configMap=cni-configuration \
--set k8sServiceHost=$(KUBERNETES_SERVICE_HOST) \
--set k8sServicePort=6443 \
--set tunnel=disabled \
--set enableIPv4Masquerade=false \
--set enableIdentityMark=false \
--set kubeProxyReplacement=strict \
--set k8sServiceHost=$(CONTROLLERIP) \
--set k8sServicePort=6443
kubectl -n kube-system delete ds kube-proxy
kubectl -n kube-system delete cm kube-proxy
kind get nodes --name kube-ovn | while read node; do \
docker exec $$node bash -c "iptables-save | grep -v KUBE | iptables-restore"; \
done
--set cni.chainingMode=generic-veth \
--set cni.customConf=true \
--set cni.configMap=cni-configuration
kubectl -n kube-system rollout status ds cilium --timeout 300s

$(eval TAINTS = $(shell kubectl get no kube-ovn-control-plane -o jsonpath={.spec.taints}))
$(eval MASTER_TAINT = "node-role.kubernetes.io/master")
@if [[ "${TAINTS}" =~ .*"${MASTER_TAINT}".* ]]; then \
kubectl taint node kube-ovn-control-plane node-role.kubernetes.io/master:NoSchedule-; \
fi

kind load docker-image --name kube-ovn $(REGISTRY)/kube-ovn:$(RELEASE_TAG)
ENABLE_SSL=true ENABLE_LB=false ENABLE_NP=false WITHOUT_KUBE_PROXY=true bash dist/images/install.sh
kubectl describe no

.PHONY: kind-reload
kind-reload:
Expand Down
21 changes: 14 additions & 7 deletions dist/images/install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ VLAN_NIC=${VLAN_NIC:-}
HW_OFFLOAD=${HW_OFFLOAD:-false}
ENABLE_LB=${ENABLE_LB:-true}
ENABLE_NP=${ENABLE_NP:-true}
WITHOUT_KUBE_PROXY=${WITHOUT_KUBE_PROXY:-false}
ENABLE_EXTERNAL_VPC=${ENABLE_EXTERNAL_VPC:-true}
CNI_CONFIG_PRIORITY=${CNI_CONFIG_PRIORITY:-01}
# The nic to support container network can be a nic name or a group of regex
Expand Down Expand Up @@ -2695,7 +2696,7 @@ echo "-------------------------------"
echo ""

echo "[Step 4/6] Delete pod that not in host network mode"
for ns in $(kubectl get ns --no-headers -o custom-columns=NAME:.metadata.name); do
for ns in $(kubectl get ns --no-headers -o custom-columns=NAME:.metadata.name); do
for pod in $(kubectl get pod --no-headers -n "$ns" --field-selector spec.restartPolicy=Always -o custom-columns=NAME:.metadata.name,HOST:spec.hostNetwork | awk '{if ($2!="true") print $1}'); do
kubectl delete pod "$pod" -n "$ns" --ignore-not-found
done
Expand All @@ -2714,6 +2715,11 @@ cat <<\EOF > /usr/local/bin/kubectl-ko
set -euo pipefail
KUBE_OVN_NS=kube-system
EOF
cat <<EOF >> /usr/local/bin/kubectl-ko
WITHOUT_KUBE_PROXY=${WITHOUT_KUBE_PROXY}
EOF
cat <<\EOF >> /usr/local/bin/kubectl-ko
OVN_NB_POD=
OVN_SB_POD=
KUBE_OVN_VERSION=
Expand Down Expand Up @@ -3001,7 +3007,10 @@ diagnose(){
kubectl ko nbctl list acl
kubectl ko sbctl show
checkKubeProxy
if [ "${WITHOUT_KUBE_PROXY}" = "false" ]; then
checkKubeProxy
fi
checkDeployment ovn-central
checkDeployment kube-ovn-controller
checkDaemonSet kube-ovn-cni
Expand Down Expand Up @@ -3116,8 +3125,9 @@ checkDeployment(){
}
checkKubeProxy(){
dsMode=`kubectl get ds -n kube-system | grep kube-proxy || true`
if [ -z "$dsMode" ]; then
if kubectl get ds -n kube-system --no-headers -o custom-columns=NAME:.metadata.name | grep -qw ^kube-proxy; then
checkDaemonSet kube-proxy
else
nodeIps=`kubectl get node -o wide | grep -v "INTERNAL-IP" | awk '{print $6}'`
for node in $nodeIps
do
Expand All @@ -3127,8 +3137,6 @@ checkKubeProxy(){
exit 1
fi
done
else
checkDaemonSet kube-proxy
fi
echo "kube-proxy ready"
}
Expand Down Expand Up @@ -3433,7 +3441,6 @@ case $subcommand in
showHelp
;;
esac
EOF

chmod +x /usr/local/bin/kubectl-ko
Expand Down
13 changes: 8 additions & 5 deletions dist/images/kubectl-ko
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
set -euo pipefail

KUBE_OVN_NS=kube-system
WITHOUT_KUBE_PROXY=false
OVN_NB_POD=
OVN_SB_POD=
KUBE_OVN_VERSION=
Expand Down Expand Up @@ -289,7 +290,10 @@ diagnose(){
kubectl ko nbctl list acl
kubectl ko sbctl show

checkKubeProxy
if [ "${WITHOUT_KUBE_PROXY}" = "false" ]; then
checkKubeProxy
fi

checkDeployment ovn-central
checkDeployment kube-ovn-controller
checkDaemonSet kube-ovn-cni
Expand Down Expand Up @@ -404,8 +408,9 @@ checkDeployment(){
}

checkKubeProxy(){
dsMode=`kubectl get ds -n kube-system | grep kube-proxy || true`
if [ -z "$dsMode" ]; then
if kubectl get ds -n kube-system --no-headers -o custom-columns=NAME:.metadata.name | grep -qw ^kube-proxy; then
checkDaemonSet kube-proxy
else
nodeIps=`kubectl get node -o wide | grep -v "INTERNAL-IP" | awk '{print $6}'`
for node in $nodeIps
do
Expand All @@ -415,8 +420,6 @@ checkKubeProxy(){
exit 1
fi
done
else
checkDaemonSet kube-proxy
fi
echo "kube-proxy ready"
}
Expand Down
4 changes: 2 additions & 2 deletions docs/IntegrateCiliumIntoKubeOVN.md
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ kubectl apply -f ./chaining.yaml

```bash
helm repo add cilium https://helm.cilium.io/
helm install cilium cilium/cilium --version 1.10.5 \
helm install cilium cilium/cilium --version 1.10.9 \
--namespace=kube-system \
--set cni.chainingMode=generic-veth \
--set cni.customConf=true \
Expand Down Expand Up @@ -176,7 +176,7 @@ root@cilium-small-x86-01:~# curl 10.110.121.109:9080
##### 3. Enabling Cilium replacement

```bash
helm upgrade cilium cilium/cilium --version 1.10.5 \
helm upgrade cilium cilium/cilium --version 1.10.9 \
--namespace kube-system \
--set cni.chainingMode=generic-veth \
--set cni.customConf=true \
Expand Down

0 comments on commit 3b4ac99

Please sign in to comment.