Skip to content
This repository has been archived by the owner on Nov 3, 2023. It is now read-only.

Commit

Permalink
Merge pull request #63 from dhiltgen/native_containerd_ci
Browse files Browse the repository at this point in the history
Switch CI to use native containerd instead of kind
  • Loading branch information
dhiltgen committed Dec 9, 2020
2 parents 2848b46 + edb8d42 commit f9f7fe9
Show file tree
Hide file tree
Showing 11 changed files with 58 additions and 53 deletions.
26 changes: 21 additions & 5 deletions .github/workflows/pull_request.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ jobs:
flags: unit-tests
name: codecov-unit-test

test-integration-kind-containerd:
test-integration-containerd:
runs-on: ubuntu-latest
steps:
- name: Set up Go ${{ env.GO_VERSION }}
Expand All @@ -37,16 +37,31 @@ jobs:
go-version: ^${{ env.GO_VERSION }}
- uses: actions/checkout@v2
name: Check out code into the Go module directory
- name: Setup kind cluster
run: kind create cluster --config ./integration/kind-ci.yaml
- name: Setup containerd cluster
run: |
set -x
sudo apt-get install -y kubeadm kubelet
sudo swapoff -a
# Ensure dockerd isn't running
sudo systemctl stop docker.socket
sudo systemctl stop docker
sudo rm -f /var/run/docker.sock
sudo kubeadm init --cri-socket /run/containerd/containerd.sock
mkdir -p $HOME/.kube/
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $USER $HOME/.kube/config
kubectl taint nodes --all node-role.kubernetes.io/master-
kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
kubectl wait --for=condition=ready --timeout=30s node --all
kubectl get nodes -o wide
- name: Run integration tests
run: make integration EXTRA_GO_TEST_FLAGS=-v
- name: Gather integration coverage results
uses: codecov/codecov-action@v1
with:
file: cover-int.out
flags: integration-tests
name: codecov-integration-test-kind
name: codecov-integration-test-containerd

test-integration-dockerd:
runs-on: ubuntu-latest
Expand All @@ -69,7 +84,8 @@ jobs:
kubectl taint nodes --all node-role.kubernetes.io/master-
kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
kubectl wait --for=condition=ready --timeout=30s node --all
kubectl get nodes
kubectl get nodes -o wide
docker version
- name: Run integration tests
run: make integration EXTRA_GO_TEST_FLAGS=-v
Expand Down
7 changes: 5 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@

VERSION?=$(shell git describe --tags --first-parent --abbrev=7 --long --dirty --always)
TEST_KUBECONFIG?=$(HOME)/.kube/config
TEST_TIMEOUT=300s
TEST_TIMEOUT=600s
TEST_PARALLELISM=3

# Verify Go in PATH
ifeq (, $(shell which go))
Expand Down Expand Up @@ -71,7 +72,9 @@ test:
integration:
@echo "Running integration tests with $(TEST_KUBECONFIG)"
@kubectl config get-contexts
TEST_KUBECONFIG=$(TEST_KUBECONFIG) go test -timeout $(TEST_TIMEOUT) $(GO_FLAGS) $(EXTRA_GO_TEST_FLAGS) \
TEST_KUBECONFIG=$(TEST_KUBECONFIG) go test -timeout $(TEST_TIMEOUT) $(GO_FLAGS) \
-parallel $(TEST_PARALLELISM) \
$(EXTRA_GO_TEST_FLAGS) \
$(GO_COVER_FLAGS) -coverprofile=./cover-int.out \
./integration/...

Expand Down
8 changes: 4 additions & 4 deletions integration/common/basesuites.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ type BaseSuite struct {
Namespace string
}

func (s *BaseSuite) SetupTest() {
func (s *BaseSuite) SetupSuite() {
var err error
if !s.SkipSetupCreate {
logrus.Infof("%s: Setting up builder", s.Name)
Expand All @@ -44,7 +44,7 @@ func (s *BaseSuite) SetupTest() {
require.NoError(s.T(), err, "%s: kube client failed", s.Name)
}

func (s *BaseSuite) TearDownTest() {
func (s *BaseSuite) TearDownSuite() {
logrus.Infof("%s: Removing builder", s.Name)
err := RunBuildkit("rm", []string{
s.Name,
Expand All @@ -62,7 +62,7 @@ func (s *BaseSuite) TestSimpleBuild() {
dir, cleanup, err := NewSimpleBuildContext()
defer cleanup()
require.NoError(s.T(), err, "Failed to set up temporary build context")
args := []string{}
args := []string{"--progress=plain"}
if s.Name != "buildkit" { // TODO wire up the default name variable
args = append(
args,
Expand Down Expand Up @@ -102,7 +102,7 @@ func (s *BaseSuite) TestLocalOutputTarBuild() {
dir, cleanup, err := NewSimpleBuildContext()
defer cleanup()
require.NoError(s.T(), err, "Failed to set up temporary build context")
args := []string{}
args := []string{"--progress=plain"}
if s.Name != "buildkit" { // TODO wire up the default name variable
args = append(
args,
Expand Down
4 changes: 2 additions & 2 deletions integration/suites/configmap_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ type configMapSuite struct {
configMapClient v1.ConfigMapInterface
}

func (s *configMapSuite) SetupTest() {
func (s *configMapSuite) SetupSuite() {
var err error
s.ClientSet, s.Namespace, err = common.GetKubeClientset()
require.NoError(s.T(), err, "%s: kube client failed", s.Name)
Expand Down Expand Up @@ -200,7 +200,7 @@ func (s *configMapSuite) TestPreExistingWithCustomCreate() {

func TestConfigMapSuite(t *testing.T) {
common.Skipper(t)
//t.Parallel() // TODO - tests fail if run in parallel, may be actual race bug
t.Parallel()
suite.Run(t, &configMapSuite{
Name: "configmaptest",
})
Expand Down
2 changes: 1 addition & 1 deletion integration/suites/default_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ type DefaultSuite struct{ common.BaseSuite }

func TestDefaultSuite(t *testing.T) {
common.Skipper(t)
//t.Parallel() // TODO - tests fail if run in parallel, may be actual race bug
t.Parallel()
suite.Run(t, &DefaultSuite{
BaseSuite: common.BaseSuite{
Name: "buildkit", // TODO pull this from the actual default name
Expand Down
2 changes: 1 addition & 1 deletion integration/suites/randomlb_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ type RandomLBSuite struct{ common.BaseSuite }

func TestRandomLBSuite(t *testing.T) {
common.Skipper(t)
//t.Parallel() // TODO - tests fail if run in parallel, may be actual race bug
t.Parallel()
suite.Run(t, &RandomLBSuite{
BaseSuite: common.BaseSuite{
Name: "randomlb",
Expand Down
29 changes: 15 additions & 14 deletions pkg/build/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -581,14 +581,9 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, ku
multiDriver := len(m[k]) > 1
for i, dp := range m[k] {
d := drivers[dp.driverIndex].Driver
driverName := drivers[dp.driverIndex].Name
opt.Platforms = dp.platforms

// TODO - this is a little messy - some recactoring of this routine would be worthwhile
var chosenNodeName string
for name := range clients[d.Factory().Name()] {
chosenNodeName = name // TODO - this doesn't actually seem to be working!!!
break
}
// TODO - this is also messy and wont work for multi-driver scenarios (no that it's possible yet...)
if auth == nil {
auth = d.GetAuthWrapper(registrySecretName)
Expand All @@ -598,9 +593,9 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, ku
// Set up loader based on first found type (only 1 supported)
for _, entry := range opt.Exports {
if entry.Type == "docker" {
return newDockerLoader(ctx, d, kubeClientConfig, chosenNodeName, mw)
return newDockerLoader(ctx, d, kubeClientConfig, driverName, mw)
} else if entry.Type == "oci" {
return newContainerdLoader(ctx, d, kubeClientConfig, chosenNodeName, mw)
return newContainerdLoader(ctx, d, kubeClientConfig, driverName, mw)
}
}
// TODO - Push scenario? (or is this a "not reached" scenario now?)
Expand Down Expand Up @@ -899,18 +894,24 @@ func notSupported(d driver.Driver, f driver.Feature) error {

type dockerLoadCallback func(name string) (io.WriteCloser, func(), error)

func newDockerLoader(ctx context.Context, d driver.Driver, kubeClientConfig clientcmd.ClientConfig, chosenNodeName string, mw *progress.MultiWriter) (io.WriteCloser, func(), error) {
func newDockerLoader(ctx context.Context, d driver.Driver, kubeClientConfig clientcmd.ClientConfig, builderName string, mw *progress.MultiWriter) (io.WriteCloser, func(), error) {
nodeNames := []string{}
// TODO this isn't quite right - we need a better "list only pods from one instance" func
builders, err := d.List(ctx)
if err != nil {
return nil, nil, err
}
for _, builder := range builders {
if builder.Name != builderName {
continue
}
for _, node := range builder.Nodes {
nodeNames = append(nodeNames, node.Name)
}
}
if len(nodeNames) == 0 {
return nil, nil, fmt.Errorf("no builders found for %s", builderName)
}
// TODO revamp this flow to return a list of pods

readers := make([]*io.PipeReader, len(nodeNames))
Expand Down Expand Up @@ -1019,7 +1020,7 @@ func (w *waitingWriter) Close() error {
return err
}

func newContainerdLoader(ctx context.Context, d driver.Driver, kubeClientConfig clientcmd.ClientConfig, chosenNodeName string, mw *progress.MultiWriter) (io.WriteCloser, func(), error) {
func newContainerdLoader(ctx context.Context, d driver.Driver, kubeClientConfig clientcmd.ClientConfig, builderName string, mw *progress.MultiWriter) (io.WriteCloser, func(), error) {
nodeNames := []string{}
// TODO this isn't quite right - we need a better "list only pods from one instance" func
// TODO revamp this flow to return a list of pods
Expand All @@ -1029,12 +1030,12 @@ func newContainerdLoader(ctx context.Context, d driver.Driver, kubeClientConfig
return nil, nil, err
}
for _, builder := range builders {
if builder.Name != builderName {
continue
}
for _, node := range builder.Nodes {
// For containerd, we never have to load on the node where it was built
// TODO - this doesn't actually work yet, but when we switch the output.Type to "oci" we need to load it everywhere anyway
if node.Name == chosenNodeName {
continue
}
// TODO - we may want to filter the source node, but when we switch the output.Type to "oci" we need to load it everywhere anyway
nodeNames = append(nodeNames, node.Name)
}
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/cmd/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ func runBuild(streams genericclioptions.IOStreams, in buildOptions) error {
func buildTargets(ctx context.Context, kubeClientConfig clientcmd.ClientConfig, streams genericclioptions.IOStreams, opts map[string]build.Options, progressMode, contextPathHash, registrySecretName, instance string) error {
driverName := instance
if driverName == "" {
driverName = "buildx_buildkit_kubernetes"
driverName = "buildkit"
}
d, err := driver.GetDriver(ctx, driverName, nil, kubeClientConfig, []string{} /* TODO what BuildkitFlags are these? */, "" /* unused config file */, map[string]string{} /* DriverOpts unused */, contextPathHash)
if err != nil {
Expand Down
5 changes: 3 additions & 2 deletions pkg/driver/kubernetes/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ const (
// TODO - consider adding other default values here to aid users in fine-tuning by editing the configmap post deployment
DefaultConfigFileTemplate = `# Default buildkitd configuration. Use --config <path/to/file> to override during create
debug = false
root = "/var/lib/buildkit/{{ .Name }}"
[worker.containerd]
namespace = "{{ .ContainerdNamespace }}"
`
Expand Down Expand Up @@ -103,7 +104,7 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
}
}
return sub.Wrap(
fmt.Sprintf("waiting for %d pods to be ready", d.minReplicas),
fmt.Sprintf("waiting for %d pods to be ready for %s", d.minReplicas, d.deployment.Name),
func() error {
if err := d.wait(ctx, sub); err != nil {
return err
Expand Down Expand Up @@ -137,7 +138,7 @@ func (d *Driver) wait(ctx context.Context, sub progress.SubLogger) error {
for try := 0; try < 100; try++ {
if err == nil {
if depl.Status.ReadyReplicas >= int32(d.minReplicas) {
sub.Log(1, []byte(fmt.Sprintf("All %d replicas online\n", d.minReplicas)))
sub.Log(1, []byte(fmt.Sprintf("All %d replicas for %s online\n", d.minReplicas, d.deployment.Name)))
return nil
}
deploymentUID = string(depl.ObjectMeta.GetUID())
Expand Down
17 changes: 2 additions & 15 deletions pkg/driver/kubernetes/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"fmt"
"io/ioutil"
"strconv"
"strings"
"text/template"

"github.com/vmware-tanzu/buildkit-cli-for-kubectl/pkg/driver"
Expand Down Expand Up @@ -243,21 +242,9 @@ func (f *factory) AllowsInstances() bool {
}

// buildxNameToDeploymentName converts buildx name to Kubernetes Deployment name.
//
// eg. "buildx_buildkit_loving_mendeleev0" -> "loving-mendeleev0"
func buildxNameToDeploymentName(bx string) string {
// TODO: commands.util.go should not pass "buildx_buildkit_" prefix to drivers
s := bx
if strings.HasPrefix(s, "buildx_buildkit_") {
s = strings.TrimPrefix(s, "buildx_buildkit_")
}
s = strings.ReplaceAll(s, "_", "-")
if s == "kubernetes" {
// Having the default name of the deployment for buildkit called "kubernetes" is confusing, use something better
return "buildkit"
}
if s == "" {
if bx == "" {
return "buildkit"
}
return s
return bx
}
9 changes: 3 additions & 6 deletions pkg/driver/kubernetes/manifest/manifest.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ func toContainerdWorker(d *appsv1.Deployment, opt *DeploymentOpt) error {
},
corev1.VolumeMount{
Name: "var-lib-buildkit",
MountPath: "/var/lib/buildkit",
MountPath: "/var/lib/buildkit/" + opt.Name,
MountPropagation: &mountPropagationBidirectional,
},
corev1.VolumeMount{
Expand Down Expand Up @@ -204,7 +204,7 @@ func toContainerdWorker(d *appsv1.Deployment, opt *DeploymentOpt) error {
Name: "var-lib-buildkit",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/var/lib/buildkit",
Path: "/var/lib/buildkit/" + opt.Name,
Type: &hostPathDirectoryOrCreate,
}},
},
Expand Down Expand Up @@ -246,10 +246,7 @@ func toContainerdWorker(d *appsv1.Deployment, opt *DeploymentOpt) error {
},
)

// If this is a multi-user cluster and other users are running
// containerd based builders in other namespaces, we'll fail to start due to
// shared mounts for the buildkit cache
// buildkitd: could not lock /var/lib/buildkit/buildkitd.lock, another instance running?
// Spread our builders out on a multi-node cluster
d.Spec.Template.Spec.Affinity = &corev1.Affinity{
PodAntiAffinity: &corev1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
Expand Down

0 comments on commit f9f7fe9

Please sign in to comment.