forked from contiv/auth_proxy
/
Makefile
98 lines (79 loc) · 3.46 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# this is the classic first makefile target, and it's also the default target
# run when `make` is invoked with no specific target.
all: build
# build uses a build container to build a minimalist image which is suitable
# for releases. you can specify a BUILD_VERSION here, e.g., BUILD_VERSION=foo
# will build 'auth_proxy:foo'. if you omit the BUILD_VERSION, it defaults to
# "devbuild".
build: checks
@bash ./scripts/build.sh
# release creates a release package for contiv.
# It uses a pre-built image specified by BUILD_VERSION.
release:
rm -rf release/
@bash ./scripts/release.sh
# Brings up a demo cluster to install Contiv on - by default this is a docker, centos cluster.
# It can be configured to start a RHEL cluster by setting CONTIV_NODE_OS=rhel7.
# It can be started with k8s kubeadm install by running with VAGRANT_USE_KUBEADM=1.
cluster: cluster-destroy
cd cluster && vagrant up
cluster-destroy:
cd cluster && vagrant destroy -f
# Create a release and test the release installation on a vagrant cluster
# TODO: The vagrant part of this can be optimized by taking snapshots instead
# of creating a new set of VMs for each case
release-test-kubeadm: release
# Test kubeadm (centos by default)
VAGRANT_USE_KUBEADM=1 make cluster
VAGRANT_USE_KUBEADM=1 make install-test-kubeadm
release-test-swarm: release
# Test swarm (centos by default)
CLUSTER_CONFIG='cluster_defs_ansible.json' make cluster
CLUSTER_CONFIG='cluster_defs_ansible.json' make install-test-swarm
release-test-kubelegacy: release
# Test k8s ansible (centos by default)
make cluster
make install-test-kube-legacy
# Test the installation on the provided cluster. This is for bare-metal and other
# setups where the cluster is created using non-vagrant mechanisms.
# Clusters need to have k8s installed for kubernetes kubeadm based mechanism and
# docker installed on the master node for all others.
install-test-kubeadm:
@bash ./installtests/kubeadm_test.sh
install-test-kube-legacy:
@bash ./installtests/kube_legacy_test.sh
install-test-swarm:
@bash ./installtests/swarm_test.sh
# checks runs a script which runs gofmt, go vet, and other code quality tools.
checks:
@bash ./scripts/checks.sh
# ci does everything necessary for a Github PR-triggered CI run.
# currently, this means building a container image and running
# all of the available tests.
ci: build test
# generate-certificate generates a local key and cert for running the proxy.
# if an existing certificate and key exist, it will do nothing.
# if either of them do not exist, they will both be recreated.
generate-certificate:
@bash ./scripts/generate-certificate.sh
# godep rebuilds Godeps/Godeps.json
# you will only need to run this if you add a new external dependency.
godep:
[ -n "`which godep`" ] || go get -u github.com/tools/godep
godep save ./...
# systemtests runs the system tests suite.
systemtests:
@bash ./scripts/systemtests.sh
# unittests runs all the unit tests
unit-tests:
@bash ./scripts/unittests.sh
# test runs ALL the test suites.
test: systemtests unit-tests
# run target runs an auth proxy setup without a netmaster
# It's handy to test proxy only changes w/o requiring a full
# e2e setup. Run generate-certificate and build before this,
# or put certs in appropriate folder. See docker-compose.yaml
# for more details.
run:
docker-compose up -d
.PHONY: all build checks ci generate-certificate godep systemtests test unit-tests release cluster cluster-destroy release-test-swarm release-test-kubeadm release-test-kubelegacy run