ansible-playbook playbooks/setup-kubernetes
scp -r masterIP:~/.kube/config localIP:
Ex: scp -r master:~/.kube/config ~/.kube/config_mycluster
Note: Set eth1 to internal Ip ( default is eth0 - 10.0.2.15) for each node when use Vagrantfile (fix issue that cannot exec on a pod)
node.vm.provision "shell",
run: "always",
inline: "route add default gw 192.168.11.1"
# node.vm.provision "shell", path: "bootstrap_kmaster.sh"
# delete default gw on eth0
node.vm.provision "shell",
run: "always",
inline: "eval `route -n | awk '{ if ($8 ==\"eth0\" && $2 != \"0.0.0.0\") print \"route del default gw \" $2; }'`"
[Service]
Environment="KUBELET_EXTRA_ARGS=--fail-swap-on=false --node-ip={{ kubernate_node_ip }}"
ansible-playbook playbooks/setup-kubernetes
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
apiVersion: v1
kind: Secret
metadata:
name: admin-user
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: "admin-user"
type: kubernetes.io/service-account-token
data:
# You can include additional key value pairs as you do with Opaque Secrets
extra: abcd
kl describe secret admin-user -n kubernetes-dashboard