Skip to content
Permalink
@@ -1,28 +1,29 @@
GOFILES_NOVENDOR=$(shell find . -type f -name '*.go' -not -path "./vendor/*")
GO_VERSION=1.12

REGISTRY=index.alauda.cn/alaudak8s
ROLES=node controller cni db
DEV_TAG=dev
RELEASE_TAG=$(shell cat VERSION)

.PHONY: build-dev-images build-go build-bin test lint up down halt suspend resume

build-dev-images: build-bin
docker build -t index.alauda.cn/alaudak8s/kube-ovn-node:dev -f dist/images/Dockerfile.node dist/images/
docker push index.alauda.cn/alaudak8s/kube-ovn-node:dev
docker build -t index.alauda.cn/alaudak8s/kube-ovn-controller:dev -f dist/images/Dockerfile.controller dist/images/
docker push index.alauda.cn/alaudak8s/kube-ovn-controller:dev
docker build -t index.alauda.cn/alaudak8s/kube-ovn-cni:dev -f dist/images/Dockerfile.cni dist/images/
docker push index.alauda.cn/alaudak8s/kube-ovn-cni:dev
@for role in ${ROLES} ; do \
docker build -t ${REGISTRY}/kube-ovn-$$role:${DEV_TAG} -f dist/images/Dockerfile.$$role dist/images/; \
docker push ${REGISTRY}/kube-ovn-$$role:${DEV_TAG}; \
done

build-go:
CGO_ENABLED=0 GOOS=linux go build -o $(PWD)/dist/images/kube-ovn -ldflags "-w -s" -v ./cmd/cni
CGO_ENABLED=0 GOOS=linux go build -o $(PWD)/dist/images/kube-ovn-controller -ldflags "-w -s" -v ./cmd/controller
CGO_ENABLED=0 GOOS=linux go build -o $(PWD)/dist/images/kube-ovn-daemon -ldflags "-w -s" -v ./cmd/daemon

release: build-go
docker build -t index.alauda.cn/alaudak8s/kube-ovn-node:`cat VERSION` -f dist/images/Dockerfile.node dist/images/
docker push index.alauda.cn/alaudak8s/kube-ovn-node:`cat VERSION`
docker build -t index.alauda.cn/alaudak8s/kube-ovn-controller:`cat VERSION` -f dist/images/Dockerfile.controller dist/images/
docker push index.alauda.cn/alaudak8s/kube-ovn-controller:`cat VERSION`
docker build -t index.alauda.cn/alaudak8s/kube-ovn-cni:`cat VERSION` -f dist/images/Dockerfile.cni dist/images/
docker push index.alauda.cn/alaudak8s/kube-ovn-cni:`cat VERSION`
@for role in ${ROLES} ; do \
docker build -t ${REGISTRY}/kube-ovn-$$role:${RELEASE_TAG} -f dist/images/Dockerfile.$$role dist/images/; \
docker push ${REGISTRY}/kube-ovn-$$role:${RELEASE_TAG}; \
done

lint:
@gofmt -d ${GOFILES_NOVENDOR}
@@ -33,7 +34,7 @@ test:
GOOS=linux go test -cover -v ./...

build-bin: lint
docker run -e GOOS=linux -e GOCACHE=/tmp \
docker run --rm -e GOOS=linux -e GOCACHE=/tmp \
-u $(shell id -u):$(shell id -g) \
-v $(CURDIR):/go/src/github.com/alauda/kube-ovn:ro \
-v $(CURDIR)/dist:/go/src/github.com/alauda/kube-ovn/dist/ \
@@ -55,4 +56,4 @@ resume:
cd vagrant && vagrant resume

suspend:
cd vagrant && vagrant suspend
cd vagrant && vagrant suspend
@@ -0,0 +1,35 @@
FROM centos:7

ENV PYTHONDONTWRITEBYTECODE yes

RUN yum install -y \
PyYAML bind-utils \
openssl \
numactl-libs \
firewalld-filesystem \
libpcap \
hostname \
iproute strace socat nc \
unbound unbound-devel python-openvswitch libreswan && \
yum clean all

ENV OVS_VERSION=2.11.1
ENV OVS_SUBVERSION=1

RUN rpm -ivh https://github.com/alauda/ovs/releases/download/v${OVS_VERSION}-${OVS_SUBVERSION}/openvswitch-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh https://github.com/alauda/ovs/releases/download/v${OVS_VERSION}-${OVS_SUBVERSION}/openvswitch-ipsec-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh https://github.com/alauda/ovs/releases/download/v${OVS_VERSION}-${OVS_SUBVERSION}/openvswitch-devel-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh https://github.com/alauda/ovs/releases/download/v${OVS_VERSION}-${OVS_SUBVERSION}/ovn-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh https://github.com/alauda/ovs/releases/download/v${OVS_VERSION}-${OVS_SUBVERSION}/ovn-common-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh https://github.com/alauda/ovs/releases/download/v${OVS_VERSION}-${OVS_SUBVERSION}/ovn-vtep-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh https://github.com/alauda/ovs/releases/download/v${OVS_VERSION}-${OVS_SUBVERSION}/ovn-central-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \
rpm -ivh https://github.com/alauda/ovs/releases/download/v${OVS_VERSION}-${OVS_SUBVERSION}/ovn-host-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm

RUN mkdir -p /var/run/openvswitch

COPY ovn-healthcheck.sh /root/ovn-healthcheck.sh
COPY ovn-is-leader.sh /root/ovn-is-leader.sh

COPY start-db.sh /root/start-db.sh

CMD ["/bin/bash", "/root/start-db.sh"]
@@ -29,8 +29,7 @@ RUN mkdir -p /var/run/openvswitch && \
mkdir -p /etc/cni/net.d && \
mkdir -p /opt/cni/bin

COPY ovn-healthcheck.sh /root/ovn-healthcheck.sh
COPY ovs-healthcheck.sh /root/ovs-healthcheck.sh
COPY start-ovs.sh /root/start-ovs.sh
COPY start-ovn.sh /root/start-ovn.sh

CMD ["/bin/bash", "/root/start-ovs.sh"]
@@ -2,4 +2,5 @@
set -euo pipefail

ovn-nbctl show
ovn-sbctl show
# wait 5 seconds
ovn-sbctl -t 5 show
@@ -0,0 +1,9 @@
#!/bin/bash
set -euo pipefail

ovn-nbctl show
# wait 5 seconds
ovn-sbctl -t 5 show

# For data consistency, only store leader address in endpoint
echo $(cat /var/log/openvswitch/ovn-northd.log) | grep -oP "lock acquired(?!.*lock lost).*$"
@@ -0,0 +1,4 @@
#!/bin/bash
set -euo pipefail

ovs-vsctl show
@@ -0,0 +1,66 @@
#!/bin/bash
set -euo pipefail

DB_NB_ADDR=${1:-0.0.0.0}
DB_NB_PORT=${1:-6641}
DB_SB_ADDR=${1:-0.0.0.0}
DB_SB_PORT=${1:-6642}

function gen_conn_str {
t=$(echo -n ${NODE_IPS} | sed 's/[[:space:]]//g' | sed 's/,/ /g')
x=$(for i in $t; do echo -n "tcp:$i:$1",; done| sed 's/,$//')
echo "$x"
}

function get_first_node_ip {
t=$(echo -n ${NODE_IPS} | sed 's/[[:space:]]//g' | sed 's/,/ /g')
echo -n $t | cut -f 1 -d " "
}

function quit {
/usr/share/openvswitch/scripts/ovn-ctl stop_northd
exit 0
}
trap quit EXIT

if [ -z "$NODE_IPS" ]; then
/usr/share/openvswitch/scripts/ovn-ctl restart_northd
else
/usr/share/openvswitch/scripts/ovn-ctl stop_northd

first_node_ip=$(get_first_node_ip)
if [ "$first_node_ip" == "${POD_IP}" ]; then
# Start ovn-northd, ovn-nb and ovn-sb
/usr/share/openvswitch/scripts/ovn-ctl \
--db-nb-create-insecure-remote=yes \
--db-sb-create-insecure-remote=yes \
--db-nb-cluster-local-addr=${POD_IP} \
--db-sb-cluster-local-addr=${POD_IP} \
--ovn-northd-nb-db=$(gen_conn_str 6641) \
--ovn-northd-sb-db=$(gen_conn_str 6642) \
start_northd
else
while ! nc -z ${first_node_ip} ${DB_NB_PORT} </dev/null;
do
echo "sleep 5 seconds, waiting for ovn-nb ${first_node_ip}:${DB_NB_PORT} ready "
sleep 5;
done
# Start ovn-northd, ovn-nb and ovn-sb
/usr/share/openvswitch/scripts/ovn-ctl \
--db-nb-create-insecure-remote=yes \
--db-sb-create-insecure-remote=yes \
--db-nb-cluster-local-addr=${POD_IP} \
--db-sb-cluster-local-addr=${POD_IP} \
--db-nb-cluster-remote-addr=$first_node_ip \
--db-sb-cluster-remote-addr=$first_node_ip \
--ovn-northd-nb-db=$(gen_conn_str 6641) \
--ovn-northd-sb-db=$(gen_conn_str 6642) \
start_northd
fi
fi

# ovn-nb and ovn-sb listen on tcp ports for ovn-controller to connect
ovn-nbctl set-connection ptcp:${DB_NB_PORT}:${DB_NB_ADDR}
ovn-sbctl set-connection ptcp:${DB_SB_PORT}:${DB_SB_ADDR}

tail -f /var/log/openvswitch/ovn-northd.log

This file was deleted.

0 dist/images/start-ovs.sh 100644 → 100755
No changes.
@@ -0,0 +1,20 @@
# High available for ovn db

ovs support clustered database. If want to use high-available database in kube-ovn,
modifie ovn-central deployment in yamls/ovn.yaml.

Change the replicas to 3, and add NODE_IPS environment var.
```yaml
replicas: 3
containers:
- name: ovn-central
image: "index.alauda.cn/alaudak8s/kube-ovn-db:dev1"
imagePullPolicy: Always
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_IPS
value: 192.168.55.10, 192.168.55.11, 192.168.55.12
```
@@ -19,6 +19,8 @@ Kube-OVN includes two parts:

That's all! You can now create some pods and test connectivity.

For high-available ovn db, see [high available](high-available.md)

## More Configuration

### Controller Configuration
@@ -14,18 +14,18 @@ Vagrant.configure("2") do |config|
end

config.vm.define "k8s-master" do |master|
MASTER_NODE_IP = "192.168.55.10"
master_node_ip = "192.168.55.10"

master.vm.box = IMAGE_NAME
master.vm.network "private_network", ip: MASTER_NODE_IP
master.vm.network "private_network", ip: master_node_ip
master.vm.hostname = "k8s-master"
master.vm.provision "ansible" do |ansible|
ansible.playbook = "kubernetes-setup/master-playbook.yml"
ansible.verbose = ANSIBLE_VERBOSE
ansible.extra_vars = {
kubernetes_version: "v1.14.0",
master_ip: MASTER_NODE_IP,
node_ip: MASTER_NODE_IP,
master_ip: master_node_ip,
node_ip: master_node_ip,
pod_network_cidr: "10.16.0.0/16",
cluster_service_ip_range: "10.96.0.0/16",
image_repository: "registry.cn-hangzhou.aliyuncs.com/google_containers",
@@ -35,16 +35,16 @@ Vagrant.configure("2") do |config|

(1..NODES).each do |i|
config.vm.define "node-#{i}" do |node|
SLAVE_NODE_IP = "192.168.55.#{i + 10}"
slave_node_ip = "192.168.55.#{i + 10}"

node.vm.box = IMAGE_NAME
node.vm.network "private_network", ip: SLAVE_NODE_IP
node.vm.network "private_network", ip: slave_node_ip
node.vm.hostname = "node-#{i}"
node.vm.provision "ansible" do |ansible|
ansible.playbook = "kubernetes-setup/node-playbook.yml"
ansible.verbose = ANSIBLE_VERBOSE
ansible.extra_vars = {
node_ip: SLAVE_NODE_IP,
node_ip: slave_node_ip,
}
end
end
@@ -50,14 +50,6 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: KUBE_NAMESPACE
valueFrom:
fieldRef:
@@ -119,10 +111,6 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: KUBE_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- mountPath: /run/openvswitch
name: host-run-ovs
@@ -159,15 +159,24 @@ spec:
tolerations:
- operator: Exists
effect: NoSchedule
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: ovn-central
topologyKey: kubernetes.io/hostname
serviceAccountName: ovn
hostNetwork: true
containers:
- name: ovn-central
image: "index.alauda.cn/alaudak8s/kube-ovn-node:v0.3.0"
image: "index.alauda.cn/alaudak8s/kube-ovn-db:v0.3.0"
imagePullPolicy: Always
args:
- "/bin/bash"
- "/root/start-ovn.sh"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
resources:
requests:
cpu: 200m
@@ -191,15 +200,15 @@ spec:
exec:
command:
- sh
- /root/ovn-healthcheck.sh
periodSeconds: 5
- /root/ovn-is-leader.sh
periodSeconds: 3
livenessProbe:
exec:
command:
- sh
- /root/ovn-healthcheck.sh
initialDelaySeconds: 10
periodSeconds: 5
initialDelaySeconds: 30
periodSeconds: 7
failureThreshold: 5
nodeSelector:
beta.kubernetes.io/os: "linux"
@@ -276,14 +285,14 @@ spec:
readinessProbe:
exec:
command:
- ovs-vsctl
- show
- sh
- /root/ovs-healthcheck.sh
periodSeconds: 5
livenessProbe:
exec:
command:
- ovs-vsctl
- show
- sh
- /root/ovs-healthcheck.sh
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 5

0 comments on commit b8f8514

Please sign in to comment.
You can’t perform that action at this time.