Skip to content
Permalink
Browse files

ci: centralize most of the logic in the Makefile

There's not much of a point in having a makefile if our CI pipeline
doesn't exercise it, or if developers don't use it to its full extent.

To that end, most of our CI scritps got converted into thin wrappers
around calls to `make`. Additionally, the Makefile learned a lot of
skills that allow it to also serve more of the needs of the developers,
as most common tasks now become `make $something`. It's all been
thoroughly documented, too ^_^
  • Loading branch information...
juliogreff committed Aug 12, 2019
1 parent 03b23af commit aa3f74d07888cfcf6060603b320c31d3946dcf7b
@@ -0,0 +1,3 @@
*
!build
!test/e2e/testdata
@@ -4,7 +4,7 @@ pkg/chart/testdata/chartmuseum
pkg/chart/testdata/index.yaml
docs/generated

# Binaries generated when building Shipper
# Artifacts generated when building Shipper
build/

# IntelliJ IDE auto generated files
@@ -16,18 +16,18 @@ before_install:
- microk8s.enable registry
install: true
script:
- "./ci/test.sh && ./ci/build.sh && ./ci/e2e.sh && ./ci/package.sh"
- "./ci/test.sh && ./ci/build.sh && ./ci/e2e.sh"
after_success:
- "./ci/build-release-files.sh"
- "./ci/release.sh"
deploy:
provider: releases
api_key:
secure: sn8/ZmHs3IJIbmtUGTA3hvoynmTF1tZfoEaDJ1FY2abw9FutwU6XFHN4HUY2HGjBTXEd2jYYL7hlRoQsIUXILB7ausW7S73sVR5PtnRowzFkcYiCc0A2ZzhmsLWqrAY2761frqpRjhNVp/HEdKhszVTLuWA7hYWrojVyM0jlhpjZi98BYqy47ppVPOfZ8MdLRU9SS04EJpDvKLOnm1pHi1GO/8QiTVx4dStYpCZ9Y2wPQqK3Y3wVvRExFI/V0m51RzeTb/I63HU++6yWV28/llPfruFAEe6TA/th1xrUdh6fFwfnLHezIxmN5LNz6c0vdq3KKeKDeOWmSYnzwriq/LhFqCHVoqgbeOm6He7Yyz45vSV68nIHDbbPx/Q6caN1nPIE2/u7wrRlfJgzZIfLKNXjoUkCdxN1D0YmfXxyPotdh0qEdCyYDPPfuqZTO6Kj8cvRXaU3q6hFxVPaBjCdZitgLGFHOzibhZ2wxSQJtuPgqYsqZqjwmdvPaNGWoAwNVWEXmovQVLuCZryurEl9Ejf9GCFsgoMB+c5SiNto7QqUWed/iinUOoLlcKllYb+mUB28xTFVBS9svmmU5dBFGbaCSy7FiGxOhSxxOryfffkU98PCLYgRXC02PiilWP1fG9/oIQ0yL2um04To0gqpHTkfPaCsUfSfjAXtksChmFE=
file_glob: true
file:
- "release-files/*.tar.gz"
- "release-files/sha256sums.txt"
- "release-files/kubernetes-deployment.yaml"
- "build/*.tar.gz"
- "build/*.yaml"
- "build/sha256sums.txt"
skip_cleanup: true
on:
repo: bookingcom/shipper
@@ -1,5 +1,5 @@
FROM alpine:3.8
LABEL authors="Parham Doustdar <parham.doustdar@booking.com>, Alexey Surikov <alexey.surikov@booking.com>, Igor Sutton <igor.sutton@booking.com>, Ben Tyler <benjamin.tyler@booking.com>"
RUN apk add ca-certificates
ADD build/shipper /bin/shipper
ADD build/shipper.linux-amd64 /bin/shipper
ENTRYPOINT ["shipper", "-v", "4", "-logtostderr"]
@@ -1,5 +1,5 @@
FROM alpine:3.8
LABEL authors="Parham Doustdar <parham.doustdar@booking.com>, Alexey Surikov <alexey.surikov@booking.com>, Igor Sutton <igor.sutton@booking.com>, Ben Tyler <benjamin.tyler@booking.com>"
RUN apk add ca-certificates
ADD build/shipper-state-metrics /bin/shipper-state-metrics
ADD build/shipper-state-metrics.linux-amd64 /bin/shipper-state-metrics
ENTRYPOINT ["shipper-state-metrics", "-v", "2"]
201 Makefile
@@ -1,54 +1,205 @@
SHIPPER_IMAGE ?= bookingcom/shipper:latest
METRICS_IMAGE ?= bookingcom/shipper-state-metrics:latest
HELM_IMAGE ?= bookingcom/shipper-helm:latest
# Defines defaults for building, tagging and pushing our Docker images. You can
# override any of these with environment variables. Most notably, when working
# on shipper, you'll probably want to override DOCKER_REGISTRY to point to a
# private registry available to you.
DOCKER_REGISTRY ?= docker.io
IMAGE_TAG ?= latest
SHIPPER_IMAGE ?= $(DOCKER_REGISTRY)/bookingcom/shipper:$(IMAGE_TAG)
SHIPPER_STATE_METRICS_IMAGE ?= $(DOCKER_REGISTRY)/bookingcom/shipper-state-metrics:$(IMAGE_TAG)
HELM_IMAGE ?= $(DOCKER_REGISTRY)/bookingcom/shipper-helm:$(IMAGE_TAG)

# Defines the namespace where you want shipper to run.
SHIPPER_NAMESPACE ?= shipper-system
KUBECTL ?= kubectl -n $(SHIPPER_NAMESPACE)

PKG = pkg/**/* vendor/**/*
# Defines the path to a shipper clusters definition to be used by shipperctl in
# `make setup`. See ci/clusters.yaml for an example, if you need to override
# this in your development environment.
SHIPPER_CLUSTERS_YAML ?= ci/clusters.yaml

# Defines the default application cluster the end-to-end tests will use. To
# find out which clusters you have available to you, `kubectl get clusters`. If
# that errors out, or returns nothing, you probably need to run `make setup`
# first. This value needs to be present in the `applicationClusters` section in
# $(SHIPPER_CLUSTERS_YAML).
SHIPPER_CLUSTER ?= microk8s

# Defines optional flags to pass to `build/e2e.test` when running end-to-end
# tests. Useful flags are "-inspectfailed" (keep namespaces used for tests that
# filed) and "--test.v" (outputs information about every test, not only failed
# ones).
E2E_FLAGS ?=

# When set, deployments generated by `make build-yaml` will refer to the
# image's digest instead of a tag. Very useful in development, but not so much
# when building releases.
USE_IMAGE_NAME_WITH_SHA256 ?= 1

# All the files (*.go and otherwise) that, when changed, trigger rebuilds of
# binaries in the next run of `make`.
PKG := pkg/**/* vendor/**/*

# The binaries we want to build from `cmd/`.
BINARIES := shipper shipperctl shipper-state-metrics

# The operating systems we support. This gets used by `go build` as the `GOOS`
# environment variable.
OS := linux windows darwin

# The operating system where we're currently running. This is just a shorthand
# for a few targets.
GOOS := $(shell go env GOOS)

# Setup go environment variables that we want for every `go build` and `go
# test`, to ensure our environment is consistent for all developers.
export GOFLAGS := -mod=vendor
export GO111MODULE := on
export CGO_ENABLED := 0
export GOARCH := amd64
export GOOS := linux

build/%: cmd/%/*.go $(PKG)
go build -o $@ cmd/$*/*.go

build/e2e.test: $(PKG) test/e2e/*
go test -c ./test/e2e/ -o build/e2e.test
# *** Common targets ***
# These are the targets you are most likely to use directly, either when
# working on shipper, or via CI scripts.

KUBECTL ?= kubectl -n $(SHIPPER_NAMESPACE)
.PHONY: setup install install-shipper install-shipper-state-metrics install-helm e2e restart logs lint test vendor clean

build: build/shipper build/shipperctl build/shipper-state-metrics build/e2e.test
# Set up shipper clusters with `shipperctl`. This is probably the first thing
# you should do when starting to work on shipper, as most of everything else
# depends on having a management cluster talking to an application cluster.
setup: $(SHIPPER_CLUSTERS_YAML) build/shipperctl.$(GOOS)-amd64
./build/shipperctl.$(GOOS)-amd64 admin clusters apply \
-f $(SHIPPER_CLUSTERS_YAML) \
--shipper-system-namespace $(SHIPPER_NAMESPACE)

.PHONY: shipper shipper-state-metrics restart logs helm lint test vendor
# Install shipper in kubernetes, by applying all the required service and
# deployment yamls.
install: install-shipper install-shipper-state-metrics
install-shipper: build/shipper.image.$(IMAGE_TAG) build/shipper.service.$(IMAGE_TAG).yaml build/shipper.deployment.$(IMAGE_TAG).yaml
$(KUBECTL) apply -f build/shipper.service.$(IMAGE_TAG).yaml
$(KUBECTL) apply -f build/shipper.deployment.$(IMAGE_TAG).yaml

shipper: build/shipper Dockerfile.shipper
docker build -f Dockerfile.shipper -t $(SHIPPER_IMAGE) --build-arg HTTP_PROXY=$(HTTP_PROXY) --build-arg HTTPS_PROXY=$(HTTPS_PROXY) .
docker push $(SHIPPER_IMAGE)
install-shipper-state-metrics: build/shipper-state-metrics.image.$(IMAGE_TAG) build/shipper-state-metrics.deployment.$(IMAGE_TAG).yaml
$(KUBECTL) apply -f build/shipper-state-metrics.deployment.$(IMAGE_TAG).yaml

shipper-state-metrics: build/shipper-state-metrics Dockerfile.shipper-state-metrics
docker build -f Dockerfile.shipper-state-metrics -t $(METRICS_IMAGE) --build-arg HTTP_PROXY=$(HTTP_PROXY) --build-arg HTTPS_PROXY=$(HTTPS_PROXY) .
docker push $(METRICS_IMAGE)
# Install a helm chart repository server with test charts. This is useful
# mostly for end-to-end tests.
install-helm: build/helm.image.$(IMAGE_TAG)
$(KUBECTL) apply -f ci/helm.service.yaml
sed s=\<IMAGE\>=$(HELM_IMAGE)= ci/helm.deployment.yaml | $(KUBECTL) apply -f -

helm:
docker build -f Dockerfile.helm -t $(HELM_IMAGE) --build-arg HTTP_PROXY=$(HTTP_PROXY) --build-arg HTTPS_PROXY=$(HTTPS_PROXY) .
docker push $(HELM_IMAGE)
# Run all end-to-end tests. It does all the work necessary to get the current
# version of shipper on your working directory running in kubernetes, so just
# running `make -j e2e` should get you up and running immediately. Do remember
# do setup your clusters with `make setup` though.
e2e: install install-helm build/e2e.test
./build/e2e.test --e2e --kubeconfig ~/.kube/config \
--testcharts http://$(shell $(KUBECTL) get service helm -o jsonpath='{.spec.clusterIP}'):8879 \
--progresstimeout=2m --appcluster $(SHIPPER_CLUSTER) \
$(E2E_FLAGS)

# Delete all pods in $(SHIPPER_NAMESPACE), to force kubernetes to spawn new
# ones with the latest image (assuming that imagePullPolicy is set to Always).
restart:
# Delete all Pods in namespace, to force the ReplicaSet to spawn new ones
# with the new latest image (assuming that imagePullPolicy is set to Always).
$(KUBECTL) delete pods --all

# Tail logs from shipper's pods.
logs:
$(KUBECTL) get po -o jsonpath='{.items[*].metadata.name}' | xargs $(KUBECTL) logs --follow
$(KUBECTL) logs -l app=shipper -f

# Run all linters. It's useful to run this one before pushing commits ;)
lint:
golangci-lint run -v --config .golangci.yml ./pkg/... ./cmd/... ./test/...

# Run all unit tests. It's useful to run this one before pushing commits ;)
test:
go test -v ./pkg/... ./cmd/...

# Tidy up and vendor dependencies. Run this every time you add or remove
# dependencies, otherwise the CI pipeline will fail to download them, and
# shipper won't build.
vendor:
go mod tidy -v
go mod vendor -v
go mod verify

# Remove all build artifacts from the filesystem, and all objects installed in
# kubernetes.
.NOTPARALLEL: clean
clean:
rm -rf build/

# *** build/ targets ***
.PHONY: build-bin build-yaml build-images build-all
SHA = $(if $(shell which sha256sum),sha256sum,shasum -a 256)
build-bin: $(foreach bin,$(BINARIES),build/$(bin).$(GOOS)-amd64)
build-yaml: build/shipper.deployment.$(IMAGE_TAG).yaml build/shipper-state-metrics.deployment.$(IMAGE_TAG).yaml build/shipper.service.$(IMAGE_TAG).yaml
build-images: build/shipper.image.$(IMAGE_TAG) build/shipper-state-metrics.image.$(IMAGE_TAG)
build-all: $(foreach os,$(OS),build/shipperctl.$(os)-amd64.tar.gz) build/sha256sums.txt build-yaml build-images

build:
mkdir -p build

build/shipper-state-metrics.%-amd64: cmd/shipper-state-metrics/*.go $(PKG)
GOOS=$* GOARCH=amd64 go build -o build/shipper-state-metrics.$*-amd64 cmd/shipper-state-metrics/*.go

build/shipper.%-amd64: cmd/shipper/*.go $(PKG)
GOOS=$* GOARCH=amd64 go build -o build/shipper.$*-amd64 cmd/shipper/*.go

build/shipperctl.%-amd64: cmd/shipperctl/*.go $(PKG)
GOOS=$* GOARCH=amd64 go build -o build/shipperctl.$*-amd64 cmd/shipperctl/*.go

build/e2e.test: $(PKG) test/e2e/*
go test -c ./test/e2e/ -o build/e2e.test

build/%.service.$(IMAGE_TAG).yaml: kubernetes/%.service.yaml build
cp $< $@

IMAGE_NAME_WITH_SHA256 = $(shell cat build/$*.image.$(IMAGE_TAG))
IMAGE_NAME_TO_USE = $(if $(USE_IMAGE_NAME_WITH_SHA256),$(IMAGE_NAME_WITH_SHA256),$(IMAGE_NAME_WITH_TAG))
build/%.deployment.$(IMAGE_TAG).yaml: kubernetes/%.deployment.yaml build/%.image.$(IMAGE_TAG) build
sed s=\<IMAGE\>=$(IMAGE_NAME_TO_USE)= $< > $@

build/sha256sums.txt: $(foreach os,$(OS),build/shipperctl.$(os)-amd64.tar.gz)
$(SHA) build/*.tar.gz > $@

build/%.tar.gz: build/%
tar -zcvf $@ -C build $*


# *** Docker Image Targets ***

# Take the "%" from a target (that is now in $*) and use that as part of a
# variable name, that then gets evaluated, kind of like a poor man's hash. So
# if you have "shipper" in $*, IMAGE_NAME_WITH_TAG will read from
# $(SHIPPER_IMAGE).
IMAGE_NAME_WITH_TAG = $($(subst -,_,$(shell echo $* | tr '[:lower:]' '[:upper:]'))_IMAGE)

# The shipper, shipper-state-metrics and helm targets here are phony and
# supposed to be used directly, as a shorthand. They call their close cousins
# in `build/%.image.$(IMAGE_TAG)`, that are *not* phony, as they output the
# fully qualified name to an image that's immutable to a file. This serves two
# purposes:
#
# - there's no need to manually delete pods from kubernetes to get new images
# running, as we can use the digest in deployments so `make install` always
# deploys the most recent image.
# - when the file with the image name is up to date, it prevents `docker
# build` from being called at all, as it just tells us that all layers have
# already been cached and it didn't generate a new image.

.PHONY: shipper shipper-state-metrics helm
shipper: build/shipper.image.$(IMAGE_TAG)
shipper-state-metrics: build/shipper-state-metrics.image.$(IMAGE_TAG)
helm: build/helm.image.$(IMAGE_TAG)

# These two targets actually get built by the more general
# `build/%.image.$(IMAGE_TAG)` just below, but we need to add the binaries as
# dependencies. We can't add them directly to the general rule because the helm
# image does not have any binaries that it depends on.
build/shipper.image.$(IMAGE_TAG): build/shipper.linux-amd64
build/shipper-state-metrics.image.$(IMAGE_TAG): build/shipper-state-metrics.linux-amd64

build/%.image.$(IMAGE_TAG): Dockerfile.%
docker build -f Dockerfile.$* -t $(IMAGE_NAME_WITH_TAG) --build-arg HTTP_PROXY=$(HTTP_PROXY) --build-arg HTTPS_PROXY=$(HTTPS_PROXY) .
docker push $(IMAGE_NAME_WITH_TAG)
docker inspect --format='{{index .RepoDigests 0}}' $(IMAGE_NAME_WITH_TAG) > $@

This file was deleted.

@@ -1,3 +1,3 @@
#!/bin/bash -ex

make build
make build-bin

0 comments on commit aa3f74d

Please sign in to comment.
You can’t perform that action at this time.