diff --git a/karmada-HA-workload-example1/finish.md b/karmada-HA-workload-example1/finish.md new file mode 100644 index 0000000..1aa0b5b --- /dev/null +++ b/karmada-HA-workload-example1/finish.md @@ -0,0 +1,3 @@ +**Summary** + +In this scenario, we learned how deploy workloads across multiple clusters using PropagationPolicy to duplicate the nginx deployment. diff --git a/karmada-HA-workload-example1/foreground.sh b/karmada-HA-workload-example1/foreground.sh new file mode 100644 index 0000000..0ef70bc --- /dev/null +++ b/karmada-HA-workload-example1/foreground.sh @@ -0,0 +1,147 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +# variable define +kind_version=v0.17.0 +host_cluster_ip=172.30.1.2 #host node where Karmada is located +member_cluster_ip=172.30.2.2 +local_ip=127.0.0.1 +KUBECONFIG_PATH=${KUBECONFIG_PATH:-"${HOME}/.kube"} + +function installKind() { + cat << EOF > installKind.sh + wget https://github.com/kubernetes-sigs/kind/releases/download/${kind_version}/kind-linux-amd64 + chmod +x kind-linux-amd64 + sudo mv kind-linux-amd64 /usr/local/bin/kind +EOF +} + +function createCluster() { + cat << EOF > createCluster.sh + kind create cluster --name=member1 --config=cluster1.yaml + mv $HOME/.kube/config ~/config-member1 + kind create cluster --name=member2 --config=cluster2.yaml + mv $HOME/.kube/config config-member2 + KUBECONFIG=~/config-member1:~/config-member2 kubectl config view --merge --flatten >> ${KUBECONFIG_PATH}/config + # modify ip + sed -i "s/${local_ip}/${member_cluster_ip}/g" config-member1 + scp config-member1 root@${host_cluster_ip}:$HOME/.kube/config-member1 + sed -i "s/${local_ip}/${member_cluster_ip}/g" config-member2 + scp config-member2 root@${host_cluster_ip}:$HOME/.kube/config-member2 +EOF +} + +function cluster1Config() { + touch cluster1.yaml + cat << EOF > cluster1.yaml + kind: Cluster + apiVersion: kind.x-k8s.io/v1alpha4 + networking: + apiServerAddress: "${member_cluster_ip}" + apiServerPort: 6443 +EOF +} + +function cluster2Config() { + touch cluster2.yaml + cat << EOF > cluster2.yaml + kind: Cluster + apiVersion: kind.x-k8s.io/v1alpha4 + networking: + apiServerAddress: "${member_cluster_ip}" + apiServerPort: 6444 +EOF +} + +function nginxDeployment() { + cat << EOF > nginxDeployment.yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: nginx + labels: + app: nginx + spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx + name: nginx +EOF +} + +function propagationPolicy() { + cat << EOF > propagationPolicy.yaml + apiVersion: policy.karmada.io/v1alpha1 + kind: PropagationPolicy + metadata: + name: nginx-propagation + spec: + resourceSelectors: + - apiVersion: apps/v1 + kind: Deployment + name: nginx + placement: + clusterAffinity: + clusterNames: + - kind-member1 + - kind-member2 + replicaScheduling: + replicaSchedulingType: Duplicated +EOF +} + + +function copyConfigFilesToNode() { + scp installKind.sh root@${member_cluster_ip}:~ + scp createCluster.sh root@${member_cluster_ip}:~ + scp cluster1.yaml root@${member_cluster_ip}:~ + scp cluster2.yaml root@${member_cluster_ip}:~ +} + +kubectl delete node node01 +kubectl taint node controlplane node-role.kubernetes.io/control-plane:NoSchedule- + +# install kind and create member clusters +installKind +createCluster +cluster1Config +cluster2Config +copyConfigFilesToNode + +# generate nginx config +mkdir nginx +cd nginx +nginxDeployment +propagationPolicy + +# create cluster in node01 machine +ssh root@${member_cluster_ip} "bash ~/installKind.sh" & +sleep 10 +ssh root@${member_cluster_ip} "bash ~/createCluster.sh" & +sleep 90 + +# install karmadactl +curl -s https://raw.githubusercontent.com/karmada-io/karmada/master/hack/install-cli.sh | sudo bash + +# init karmada +karmadactl init + +# join member clusters +MEMBER_CLUSTER_NAME=kind-member1 +karmadactl --kubeconfig /etc/karmada/karmada-apiserver.config join ${MEMBER_CLUSTER_NAME} --cluster-kubeconfig=$HOME/.kube/config-member1 --cluster-context=kind-member1 +MEMBER_CLUSTER_NAME=kind-member2 +karmadactl --kubeconfig /etc/karmada/karmada-apiserver.config join ${MEMBER_CLUSTER_NAME} --cluster-kubeconfig=$HOME/.kube/config-member2 --cluster-context=kind-member2 + +# clean screen +clear diff --git a/karmada-HA-workload-example1/image/success.png b/karmada-HA-workload-example1/image/success.png new file mode 100644 index 0000000..7edb1f7 Binary files /dev/null and b/karmada-HA-workload-example1/image/success.png differ diff --git a/karmada-HA-workload-example1/index.json b/karmada-HA-workload-example1/index.json new file mode 100644 index 0000000..2f303c0 --- /dev/null +++ b/karmada-HA-workload-example1/index.json @@ -0,0 +1,37 @@ +{ + "title": "HA workload deployment1", + "description": "propagate workload through karmada duplicated mode", + "details": { + "intro": { + "text": "intro.md", + "foreground": "foreground.sh" + }, + "steps": [ + { + "title": "Check member cluster has been joined", + "text": "step1/text.md", + "verify": "step1/verify.sh" + }, + { + "title": "Create deployment", + "text": "step2/text.md", + "verify": "step2/verify.sh" + }, + { + "title": "Create propagationPolicy and deploy deployment", + "text": "step3/text.md", + "verify": "step3/verify.sh" + }, + { + "title": "Check the status and quantity distribution of pods and deployments", + "text": "step4/text.md" + } + ], + "finish": { + "text": "finish.md" + } + }, + "backend": { + "imageid": "kubernetes-kubeadm-2nodes" + } + } diff --git a/karmada-HA-workload-example1/intro.md b/karmada-HA-workload-example1/intro.md new file mode 100644 index 0000000..5721939 --- /dev/null +++ b/karmada-HA-workload-example1/intro.md @@ -0,0 +1,7 @@ +# What is Karmada? + +Karmada (Kubernetes Armada) is a Kubernetes management system that enables you to run your cloud-native applications across multiple Kubernetes clusters and clouds, with no changes to your applications. By speaking Kubernetes-native APIs and providing advanced scheduling capabilities, Karmada enables truly open, multi-cloud Kubernetes. + +Karmada aims to provide turnkey automation for multi-cluster application management in multi-cloud and hybrid cloud scenarios, with key features such as centralized multi-cloud management, high availability, failure recovery, and traffic scheduling. + +In this scenario, we'll learn how to deploy workloads across multiple clusters using PropagationPolicy to duplicate the nginx deployment. diff --git a/karmada-HA-workload-example1/step1/text.md b/karmada-HA-workload-example1/step1/text.md new file mode 100644 index 0000000..4b79b23 --- /dev/null +++ b/karmada-HA-workload-example1/step1/text.md @@ -0,0 +1,14 @@ +### Background: + +1. The kubeconfig files for the host cluster, member1 cluster, and member2 cluster are located in the $HOME/.kube directory. These files are named config, config-member1, and config-member2 respectively. + + ```shell + $HOME/.kube/config + $HOME/.kube/config-member1 + $HOME/.kube/config-member2 + ``` +2. Check whether the member cluster has been joined + + RUN `kubectl --kubeconfig /etc/karmada/karmada-apiserver.config get cluster`{{exec}} + +**Note**: Initializing the testing environment may take a few minutes. diff --git a/karmada-HA-workload-example1/step1/verify.sh b/karmada-HA-workload-example1/step1/verify.sh new file mode 100644 index 0000000..2f53c33 --- /dev/null +++ b/karmada-HA-workload-example1/step1/verify.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +kubectl --kubeconfig /etc/karmada/karmada-apiserver.config get cluster diff --git a/karmada-HA-workload-example1/step2/text.md b/karmada-HA-workload-example1/step2/text.md new file mode 100644 index 0000000..84050b0 --- /dev/null +++ b/karmada-HA-workload-example1/step2/text.md @@ -0,0 +1,5 @@ +### create deployment + +create deployment named nginx + + RUN `kubectl --kubeconfig /etc/karmada/karmada-apiserver.config apply -f ~/nginx/nginxDeployment.yaml`{{exec}} diff --git a/karmada-HA-workload-example1/step2/verify.sh b/karmada-HA-workload-example1/step2/verify.sh new file mode 100644 index 0000000..ca91f78 --- /dev/null +++ b/karmada-HA-workload-example1/step2/verify.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +kubectl --kubeconfig /etc/karmada/karmada-apiserver.config get deployment nginx diff --git a/karmada-HA-workload-example1/step3/text.md b/karmada-HA-workload-example1/step3/text.md new file mode 100644 index 0000000..d8aad94 --- /dev/null +++ b/karmada-HA-workload-example1/step3/text.md @@ -0,0 +1,3 @@ +### create propagationpolicy and deploy nginx to specific clusters + + RUN `kubectl --kubeconfig /etc/karmada/karmada-apiserver.config create -f ~/nginx/propagationPolicy.yaml`{{exec}} diff --git a/karmada-HA-workload-example1/step3/verify.sh b/karmada-HA-workload-example1/step3/verify.sh new file mode 100644 index 0000000..077dcc6 --- /dev/null +++ b/karmada-HA-workload-example1/step3/verify.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +kubectl --kubeconfig /etc/karmada/karmada-apiserver.config get PropagationPolicy nginx-propagation diff --git a/karmada-HA-workload-example1/step4/text.md b/karmada-HA-workload-example1/step4/text.md new file mode 100644 index 0000000..b5fa453 --- /dev/null +++ b/karmada-HA-workload-example1/step4/text.md @@ -0,0 +1,7 @@ +### Check the status and quantity distribution of pods and deployments + +RUN `karmadactl --kubeconfig /etc/karmada/karmada-apiserver.config get deployment +`{{exec}} + +RUN `karmadactl --kubeconfig /etc/karmada/karmada-apiserver.config get pods +`{{exec}} diff --git a/karmada-HA-workload-example2/finish.md b/karmada-HA-workload-example2/finish.md new file mode 100644 index 0000000..57f8b95 --- /dev/null +++ b/karmada-HA-workload-example2/finish.md @@ -0,0 +1,3 @@ +**Summary** + +In this scenario, we learned how deploy workloads across multiple clusters using PropagationPolicy with StaticWeight configuration to divide the nginx deployment. diff --git a/karmada-HA-workload-example2/foreground.sh b/karmada-HA-workload-example2/foreground.sh new file mode 100644 index 0000000..b8cbf6a --- /dev/null +++ b/karmada-HA-workload-example2/foreground.sh @@ -0,0 +1,156 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +# variable define +kind_version=v0.17.0 +host_cluster_ip=172.30.1.2 #host node where Karmada is located +member_cluster_ip=172.30.2.2 +local_ip=127.0.0.1 +KUBECONFIG_PATH=${KUBECONFIG_PATH:-"${HOME}/.kube"} + +function installKind() { + cat << EOF > installKind.sh + wget https://github.com/kubernetes-sigs/kind/releases/download/${kind_version}/kind-linux-amd64 + chmod +x kind-linux-amd64 + sudo mv kind-linux-amd64 /usr/local/bin/kind +EOF +} + +function createCluster() { + cat << EOF > createCluster.sh + kind create cluster --name=member1 --config=cluster1.yaml + mv $HOME/.kube/config ~/config-member1 + kind create cluster --name=member2 --config=cluster2.yaml + mv $HOME/.kube/config config-member2 + KUBECONFIG=~/config-member1:~/config-member2 kubectl config view --merge --flatten >> ${KUBECONFIG_PATH}/config + # modify ip + sed -i "s/${local_ip}/${member_cluster_ip}/g" config-member1 + scp config-member1 root@${host_cluster_ip}:$HOME/.kube/config-member1 + sed -i "s/${local_ip}/${member_cluster_ip}/g" config-member2 + scp config-member2 root@${host_cluster_ip}:$HOME/.kube/config-member2 +EOF +} + +function cluster1Config() { + cat << EOF > cluster1.yaml + kind: Cluster + apiVersion: kind.x-k8s.io/v1alpha4 + networking: + apiServerAddress: "${member_cluster_ip}" + apiServerPort: 6443 +EOF +} + +function cluster2Config() { + cat << EOF > cluster2.yaml + kind: Cluster + apiVersion: kind.x-k8s.io/v1alpha4 + networking: + apiServerAddress: "${member_cluster_ip}" + apiServerPort: 6444 +EOF +} + +function nginxDeployment() { + cat << EOF > nginxDeployment.yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: nginx + labels: + app: nginx + spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx + name: nginx +EOF +} + +function propagationPolicy() { + cat << EOF > propagationPolicy.yaml + apiVersion: policy.karmada.io/v1alpha1 + kind: PropagationPolicy + metadata: + name: nginx-propagation + spec: + resourceSelectors: + - apiVersion: apps/v1 + kind: Deployment + name: nginx + placement: + clusterAffinity: + clusterNames: + - kind-member1 + - kind-member2 + replicaScheduling: + replicaDivisionPreference: Weighted + replicaSchedulingType: Divided + weightPreference: + staticWeightList: + - targetCluster: + clusterNames: + - kind-member1 + weight: 2 + - targetCluster: + clusterNames: + - kind-member2 + weight: 1 +EOF +} + + +function copyConfigFilesToNode() { + scp installKind.sh root@${member_cluster_ip}:~ + scp createCluster.sh root@${member_cluster_ip}:~ + scp cluster1.yaml root@${member_cluster_ip}:~ + scp cluster2.yaml root@${member_cluster_ip}:~ +} + +kubectl delete node node01 +kubectl taint node controlplane node-role.kubernetes.io/control-plane:NoSchedule- + +# install kind and create member clusters +installKind +createCluster +cluster1Config +cluster2Config +copyConfigFilesToNode + +# generate nginx config +mkdir nginx +cd nginx +nginxDeployment +propagationPolicy + +# create cluster in node01 machine +ssh root@${member_cluster_ip} "bash ~/installKind.sh" & +sleep 10 +ssh root@${member_cluster_ip} "bash ~/createCluster.sh" & +sleep 90 + +# install karmadactl +curl -s https://raw.githubusercontent.com/karmada-io/karmada/master/hack/install-cli.sh | sudo bash + +# init karmada +karmadactl init + +# join member clusters +MEMBER_CLUSTER_NAME=kind-member1 +karmadactl --kubeconfig /etc/karmada/karmada-apiserver.config join ${MEMBER_CLUSTER_NAME} --cluster-kubeconfig=$HOME/.kube/config-member1 --cluster-context=kind-member1 +MEMBER_CLUSTER_NAME=kind-member2 +karmadactl --kubeconfig /etc/karmada/karmada-apiserver.config join ${MEMBER_CLUSTER_NAME} --cluster-kubeconfig=$HOME/.kube/config-member2 --cluster-context=kind-member2 + +# clean screen +clear diff --git a/karmada-HA-workload-example2/image/success.png b/karmada-HA-workload-example2/image/success.png new file mode 100644 index 0000000..7edb1f7 Binary files /dev/null and b/karmada-HA-workload-example2/image/success.png differ diff --git a/karmada-HA-workload-example2/index.json b/karmada-HA-workload-example2/index.json new file mode 100644 index 0000000..2664aae --- /dev/null +++ b/karmada-HA-workload-example2/index.json @@ -0,0 +1,37 @@ +{ + "title": "HA workload deployment2", + "description": "propagate workload through karmada divided mode", + "details": { + "intro": { + "text": "intro.md", + "foreground": "foreground.sh" + }, + "steps": [ + { + "title": "Check member cluster has joined", + "text": "step1/text.md", + "verify": "step1/verify.sh" + }, + { + "title": "create deployment", + "text": "step2/text.md", + "verify": "step2/verify.sh" + }, + { + "title": "Create propagationPolicy and deploy deployment", + "text": "step3/text.md", + "verify": "step3/verify.sh" + }, + { + "title": "Check the status and quantity distribution of pods and deployments", + "text": "step4/text.md" + } + ], + "finish": { + "text": "finish.md" + } + }, + "backend": { + "imageid": "kubernetes-kubeadm-2nodes" + } + } diff --git a/karmada-HA-workload-example2/intro.md b/karmada-HA-workload-example2/intro.md new file mode 100644 index 0000000..23a29eb --- /dev/null +++ b/karmada-HA-workload-example2/intro.md @@ -0,0 +1,7 @@ +# What is Karmada? + +Karmada (Kubernetes Armada) is a Kubernetes management system that enables you to run your cloud-native applications across multiple Kubernetes clusters and clouds, with no changes to your applications. By speaking Kubernetes-native APIs and providing advanced scheduling capabilities, Karmada enables truly open, multi-cloud Kubernetes. + +Karmada aims to provide turnkey automation for multi-cluster application management in multi-cloud and hybrid cloud scenarios, with key features such as centralized multi-cloud management, high availability, failure recovery, and traffic scheduling. + +In this scenario, we'll learn how to deploy workloads across multiple clusters using PropagationPolicy with StaticWeight configuration to divide the nginx deployment. diff --git a/karmada-HA-workload-example2/step1/text.md b/karmada-HA-workload-example2/step1/text.md new file mode 100644 index 0000000..4b79b23 --- /dev/null +++ b/karmada-HA-workload-example2/step1/text.md @@ -0,0 +1,14 @@ +### Background: + +1. The kubeconfig files for the host cluster, member1 cluster, and member2 cluster are located in the $HOME/.kube directory. These files are named config, config-member1, and config-member2 respectively. + + ```shell + $HOME/.kube/config + $HOME/.kube/config-member1 + $HOME/.kube/config-member2 + ``` +2. Check whether the member cluster has been joined + + RUN `kubectl --kubeconfig /etc/karmada/karmada-apiserver.config get cluster`{{exec}} + +**Note**: Initializing the testing environment may take a few minutes. diff --git a/karmada-HA-workload-example2/step1/verify.sh b/karmada-HA-workload-example2/step1/verify.sh new file mode 100644 index 0000000..fc88cfc --- /dev/null +++ b/karmada-HA-workload-example2/step1/verify.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +kubectl --kubeconfig /etc/karmada/karmada-apiserver.config get cluster + diff --git a/karmada-HA-workload-example2/step2/text.md b/karmada-HA-workload-example2/step2/text.md new file mode 100644 index 0000000..84050b0 --- /dev/null +++ b/karmada-HA-workload-example2/step2/text.md @@ -0,0 +1,5 @@ +### create deployment + +create deployment named nginx + + RUN `kubectl --kubeconfig /etc/karmada/karmada-apiserver.config apply -f ~/nginx/nginxDeployment.yaml`{{exec}} diff --git a/karmada-HA-workload-example2/step2/verify.sh b/karmada-HA-workload-example2/step2/verify.sh new file mode 100644 index 0000000..ca91f78 --- /dev/null +++ b/karmada-HA-workload-example2/step2/verify.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +kubectl --kubeconfig /etc/karmada/karmada-apiserver.config get deployment nginx diff --git a/karmada-HA-workload-example2/step3/text.md b/karmada-HA-workload-example2/step3/text.md new file mode 100644 index 0000000..d8aad94 --- /dev/null +++ b/karmada-HA-workload-example2/step3/text.md @@ -0,0 +1,3 @@ +### create propagationpolicy and deploy nginx to specific clusters + + RUN `kubectl --kubeconfig /etc/karmada/karmada-apiserver.config create -f ~/nginx/propagationPolicy.yaml`{{exec}} diff --git a/karmada-HA-workload-example2/step3/verify.sh b/karmada-HA-workload-example2/step3/verify.sh new file mode 100644 index 0000000..077dcc6 --- /dev/null +++ b/karmada-HA-workload-example2/step3/verify.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +kubectl --kubeconfig /etc/karmada/karmada-apiserver.config get PropagationPolicy nginx-propagation diff --git a/karmada-HA-workload-example2/step4/text.md b/karmada-HA-workload-example2/step4/text.md new file mode 100644 index 0000000..b5fa453 --- /dev/null +++ b/karmada-HA-workload-example2/step4/text.md @@ -0,0 +1,7 @@ +### Check the status and quantity distribution of pods and deployments + +RUN `karmadactl --kubeconfig /etc/karmada/karmada-apiserver.config get deployment +`{{exec}} + +RUN `karmadactl --kubeconfig /etc/karmada/karmada-apiserver.config get pods +`{{exec}}