Skip to content

Commit

Permalink
Merge pull request #75 from scality/eve_ci
Browse files Browse the repository at this point in the history
Simple CI with 1 node for metal-k8s
  • Loading branch information
Zempashi committed Jun 11, 2018
2 parents b5a6471 + b0602e9 commit 8e1903f
Show file tree
Hide file tree
Showing 8 changed files with 222 additions and 0 deletions.
89 changes: 89 additions & 0 deletions eve/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
version: "0.2"

branches:
default:
stage: "pre-merge"

stages:
pre-merge:
worker: &pod
type: kube_pod
path: eve/workers/zenko.yaml
steps:
- TriggerStages:
name: trigger all the tests
stage_names:
- single-node

single-node:
worker: &openstack
type: openstack
image: CentOS 7 (PVHVM)
flavor: general1-4
#path: eve/workers/centos7
steps:
- Git: &git_pull
name: git pull
repourl: "%(prop:git_reference)s"
mode: full
method: clobber
retryFetch: true
haltOnFailure: true
- ShellCommand:
name: 'create loopback blockdevice and configure VM'
haltOnFailure: true
command: |-
sudo ./eve/single_node/setup/single_node.sh
- ShellCommand:
name: 'Create virtual env'
haltOnFailure: true
command: |-
make shell
- ShellCommand:
name: 'Run the install with ansible'
haltOnFailure: true
command: >-
./.shell-env/metal-k8s/bin/ansible-playbook
-i eve/single_node/
metal-k8s.yml --skip elasticsearch
env:
ANSIBLE_FORCE_COLOR: 'true'
- ShellCommand:
name: 'Run sample kubectl command'
haltOnFailure: true
command: |-
export PATH=${PATH}:/usr/local/bin
kubectl get nodes
env: &env_kubeconfig
KUBECONFIG: inventories/single-node-ci/artifacts/admin.conf
- ShellCommand:
name: 'Test storage'
haltOnFailure: true
command: |-
export PATH=${PATH}:/usr/local/bin
make test-simple-storage
workdir: build/tests
env: *env_kubeconfig
- ShellCommand:
name: 'Verify that some pv are in released state'
command: |-
export PATH=${PATH}:/usr/local/bin
kubectl get pv
kubectl get pv -o jsonpath={.items[*].status.phase} | grep 'Released' > /dev/null
env: *env_kubeconfig
- ShellCommand:
name: 'Reclaim storage'
haltOnFailure: true
command: >-
./.shell-env/metal-k8s/bin/ansible-playbook
-i eve/single_node/
reclaim-storage.yml
env:
ANSIBLE_FORCE_COLOR: 'true'
- ShellCommand:
name: 'Verify that no pv are in released state'
command: |-
export PATH=${PATH}:/usr/local/bin
kubectl get pv
! kubectl get pv -o jsonpath={.items[*].status.phase}| grep 'Released' > /dev/null
env: *env_kubeconfig
24 changes: 24 additions & 0 deletions eve/single_node/group_vars/k8s-cluster/single_node.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@

metal_k8s_lvm:
vgs:
kubevg:
drives: ['/dev/loop0']

metal_k8s_storage_class:
storage_classes:
local-lvm:
is_default: true
lvm_conf:
default_fstype: 'ext4'
default_fs_force: False
default_fs_opts: '-m 0'
default_mount_opts: 'defaults'
vgs:
kubevg:
host_path: '/mnt/kubevg'
storage_class: 'local-lvm'
volumes:
lv01:
size: 11G
lv02:
size: 8G
14 changes: 14 additions & 0 deletions eve/single_node/hosts
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
ci-metal-k8s ansible_connection=local ansible_become=True

[etcd]
ci-metal-k8s

[kube-master]
ci-metal-k8s

[kube-node]
ci-metal-k8s

[k8s-cluster:children]
kube-master
kube-node
10 changes: 10 additions & 0 deletions eve/single_node/setup/single_node.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#!/usr/bin/env bash

set -euo pipefail

truncate -s 20G /var/lib/kube_lvm
losetup /dev/loop0 /var/lib/kube_lvm
systemctl disable --now iptables
systemctl disable --now ip6tables
iptables -F
iptables -X
17 changes: 17 additions & 0 deletions eve/workers/zenko.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
apiVersion: v1
kind: Pod
metadata:
name: "zenko-test-pod"
spec:
containers:
- name: worker
image: zenko/zenko-releng:0.0.7
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: "1"
memory: 2Gi
command: ["/bin/sh"]
args: ["-c", "buildbot-worker create-worker . ${BUILDMASTER}:${BUILDMASTER_PORT} ${WORKERNAME} ${WORKERPASS} && buildbot-worker start --nodaemon"]
16 changes: 16 additions & 0 deletions tests/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@

DEBUG = 1
V = @

-include config.mk

TESTS_FILE = $(wildcard test-*.sh)
TESTS = $(foreach test,$(TESTS_FILE),$(firstword $(subst ., ,$(test))))

test: $(TESTS)


$(TESTS):
sh $@.sh

# :vim set noexpandtab shiftwidth=8 softtabstop=0
32 changes: 32 additions & 0 deletions tests/storage/test_pv.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: testclaim
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: Pod
metadata:
name: test-pv
spec:
restartPolicy: Never
containers:
- name: test-pv
image: busybox
command: ['/bin/sh']
args:
- "-c"
- "mount | grep /var/test_pv && touch /var/test_pv/foo"
volumeMounts:
- name: test-volume
mountPath: /var/test_pv
volumes:
- name: test-volume
persistentVolumeClaim:
claimName: testclaim
20 changes: 20 additions & 0 deletions tests/test-simple-storage.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#!/usr/bin/env bash

set -euo pipefail

echo "Listing all pv before test (some should be available):"
kubectl get pv
echo "Run simple pod accessing: storage"
kubectl apply -f storage/test_pv.yml
until kubectl get pods test-pv -o jsonpath={.status.phase} | grep -E '(Failed|Succeeded)'; do
echo "Wait for pod to exit"
sleep 1
done
RESULT=$(kubectl get pods test-pv -o jsonpath={.status.phase})
echo "Pod exited; listing all pv"
kubectl get pv
echo "Cleanup..."
kubectl delete pod test-pv
kubectl delete pvc testclaim
echo test is ${RESULT}
exit $([ "${RESULT}" = "Succeeded" ])

0 comments on commit 8e1903f

Please sign in to comment.