Skip to content
This repository has been archived by the owner on Nov 3, 2023. It is now read-only.

Commit

Permalink
Fix ConfigMap glitches and doc registry caching
Browse files Browse the repository at this point in the history
This fixes a few corner cases in how we handle ConfigMaps for the
buildkitd toml config file.  It adds integration coverage for those
scenarios.  It also adds a brief example showing how to configure
a local registry for caching purposes to speed up incremental
builds on a multi-node cluster.
  • Loading branch information
Daniel Hiltgen committed Dec 1, 2020
1 parent efe365a commit 9438b36
Show file tree
Hide file tree
Showing 10 changed files with 362 additions and 20 deletions.
5 changes: 5 additions & 0 deletions CONTRIBUTING.md
Expand Up @@ -35,6 +35,11 @@ Assuming you have a valid kube configuration pointed at a cluster, you can run t
make integration
```

If you want to run a single suite of tests while working on a specific area of the tests or main code, use something like this:
```
make integration EXTRA_GO_TEST_FLAGS="-run TestConfigMapSuite -v"
```

To check your code for **lint/style consistency**, run
```
make lint
Expand Down
23 changes: 23 additions & 0 deletions README.md
Expand Up @@ -98,6 +98,29 @@ kubectl create secret docker-registry mysecret --docker-server='<registry hostna
kubectl build --push --registry-secret mysecret -t <registry hostname>/<namespace>/<repo>:<tag> -f Dockerfile ./
```

### Registry-based Caching

BuildKit is smart about caching prior build results for efficient incremental
builds. This works great for a single-node scenario, but if you want to build
on a multi-node cluster, you can take advantage of BuildKit's ability to use a
registry for cache persistence. This can have a significant improvement on
incremental build times regardless of which node in the cluster your build lands
on. For best performance, this registry should be "local" to the cluster. The
following examples demonstrate this pattern:

* [./examples/local-registry.yaml](./examples/local-registry.yaml) A kubernetes Deployment+Service to run a local registry (unauthenticated)
* [./examples/local-registry-buildkitd.toml](./examples/local-registry-buildkitd.toml) A BuildKit TOML configuration example for the above Registry that configures it for **"insecure" access**

To setup from the root of this tree:
```
kubectl apply -f ./examples/local-registry.yaml
kubectl buildkit create --config ./examples/local-registry-buildkitd.toml
```

You can then build using the registry cache with something like
```
kubectl build -t myimage --cache-to=type=registry,ref=registry:5000/cache --cache-from=type=registry,ref=registry:5000/cache .
```

## Contributing

Expand Down
11 changes: 11 additions & 0 deletions examples/local-registry-buildkitd.toml
@@ -0,0 +1,11 @@
# Example buildkitd.toml configuration for a local insecure registry
# Initialize buildkit with:
#
# kubectl buildkit create --config ./local-registry-buildkitd.toml
debug = false
[worker.containerd]
namespace = "k8s.io"
[registry."registry:5000"]
http = true
insecure = true

44 changes: 44 additions & 0 deletions examples/local-registry.yaml
@@ -0,0 +1,44 @@
# Example for running a local registry in your cluster to use for caching purposes
#
# Note: this will not be visible to the underlying container runtime, so you won't
# be able to run images in the cluster from this registry, but you can use it
# as a cache for a multi-node cluster to speed up builds so every builder has access
# to the same cached content

# TODO explore a variant of this for Host networking, binding to localhost on port 5000
# and see if that's viable for a local dev registry pattern

apiVersion: apps/v1
kind: Deployment
metadata:
name: registry
labels:
app: registry
spec:
replicas: 1
selector:
matchLabels:
app: registry
template:
metadata:
labels:
app: registry
spec:
containers:
- name: registry
image: docker.io/registry
ports:
- containerPort: 5000

---
apiVersion: v1
kind: Service
metadata:
name: registry
spec:
type: ClusterIP
selector:
app: registry
ports:
- protocol: TCP
port: 5000
40 changes: 29 additions & 11 deletions integration/common/basesuites.go
Expand Up @@ -3,31 +3,45 @@
package common

import (
"context"
"fmt"
"path"
"strings"

"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)

type BaseSuite struct {
suite.Suite
Name string
CreateFlags []string
Name string
CreateFlags []string
SkipSetupCreate bool

ClientSet *kubernetes.Clientset
Namespace string
}

func (s *BaseSuite) SetupTest() {
logrus.Infof("%s: Setting up builder", s.Name)
args := append(
[]string{
s.Name,
},
s.CreateFlags...,
)
err := RunBuildkit("create", args)
require.NoError(s.T(), err, "%s: builder create failed", s.Name)
var err error
if !s.SkipSetupCreate {
logrus.Infof("%s: Setting up builder", s.Name)
args := append(
[]string{
s.Name,
},
s.CreateFlags...,
)
err := RunBuildkit("create", args)
require.NoError(s.T(), err, "%s: builder create failed", s.Name)
}

s.ClientSet, s.Namespace, err = GetKubeClientset()
require.NoError(s.T(), err, "%s: kube client failed", s.Name)
}

func (s *BaseSuite) TearDownTest() {
Expand All @@ -36,6 +50,10 @@ func (s *BaseSuite) TearDownTest() {
s.Name,
})
require.NoError(s.T(), err, "%s: builder rm failed", s.Name)
configMapClient := s.ClientSet.CoreV1().ConfigMaps(s.Namespace)
_, err = configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{})
require.Error(s.T(), err, "config map wasn't cleaned up")
require.Contains(s.T(), err.Error(), "not found")
}

func (s *BaseSuite) TestSimpleBuild() {
Expand Down
24 changes: 24 additions & 0 deletions integration/common/kubeclient.go
@@ -0,0 +1,24 @@
// Copyright (C) 2020 VMware, Inc.
// SPDX-License-Identifier: Apache-2.0
package common

import (
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/kubernetes"
)

// GetKubeClientset retrieves the clientset and namespace
func GetKubeClientset() (*kubernetes.Clientset, string, error) {
configFlags := genericclioptions.NewConfigFlags(true)
clientConfig := configFlags.ToRawKubeConfigLoader()
ns, _, err := clientConfig.Namespace()
if err != nil {
return nil, "", err
}
restClientConfig, err := clientConfig.ClientConfig()
if err != nil {
return nil, "", err
}
clientset, err := kubernetes.NewForConfig(restClientConfig)
return clientset, ns, err
}
207 changes: 207 additions & 0 deletions integration/suites/configmap_test.go
@@ -0,0 +1,207 @@
// Copyright (C) 2020 VMware, Inc.
// SPDX-License-Identifier: Apache-2.0
package suites

import (
"context"
"path/filepath"
"testing"

"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/vmware-tanzu/buildkit-cli-for-kubectl/integration/common"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
)

type configMapSuite struct {
suite.Suite
Name string
CreateFlags []string

ClientSet *kubernetes.Clientset
Namespace string

configMapClient v1.ConfigMapInterface
}

func (s *configMapSuite) SetupTest() {
var err error
s.ClientSet, s.Namespace, err = common.GetKubeClientset()
require.NoError(s.T(), err, "%s: kube client failed", s.Name)
s.configMapClient = s.ClientSet.CoreV1().ConfigMaps(s.Namespace)
}

func (s *configMapSuite) getConfigMap() *corev1.ConfigMap {
payload := `# pre-existing configuration
# nothing to see here...
`
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: appsv1.SchemeGroupVersion.String(),
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: s.Namespace,
Name: s.Name,
},
BinaryData: map[string][]byte{
"buildkitd.toml": []byte(payload),
},
}
}

func (s *configMapSuite) TestDefaultCreate() {
logrus.Infof("%s: Creating builder with default config", s.Name)
args := append(
[]string{
s.Name,
},
s.CreateFlags...,
)
err := common.RunBuildkit("create", args)
require.NoError(s.T(), err, "%s: builder create failed", s.Name)
cfg, err := s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{})
require.NoError(s.T(), err, "%s: fetch configmap failed", s.Name)
data, ok := cfg.BinaryData["buildkitd.toml"]
require.True(s.T(), ok, "missing buildkitd.toml: %#v", cfg.BinaryData)
// Spot check an expected string
require.Contains(s.T(), string(data), "Default buildkitd configuration.")

// Tear down the builder
logrus.Infof("%s: Removing builder", s.Name)
err = common.RunBuildkit("rm", []string{
s.Name,
})
require.NoError(s.T(), err, "%s: builder rm failed", s.Name)
_, err = s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{})
require.Error(s.T(), err, "config map wasn't cleaned up")
require.Contains(s.T(), err.Error(), "not found")
}

// Pre-create a config and make sure it does not get overridden by the default creation flow
func (s *configMapSuite) TestPreExistingConfigDefaultCreate() {
logrus.Infof("%s: Creating pre-existing config", s.Name)
_, err := s.configMapClient.Create(context.Background(), s.getConfigMap(), metav1.CreateOptions{})
require.NoError(s.T(), err, "%s: pre-existing configmap create failed", s.Name)

logrus.Infof("%s: Creating builder with default config", s.Name)
args := append(
[]string{
s.Name,
},
s.CreateFlags...,
)
err = common.RunBuildkit("create", args)
require.NoError(s.T(), err, "%s: builder create failed", s.Name)
cfg, err := s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{})
require.NoError(s.T(), err, "%s: fetch configmap failed", s.Name)
data, ok := cfg.BinaryData["buildkitd.toml"]
require.True(s.T(), ok, "missing buildkitd.toml: %#v", cfg.BinaryData)
// Spot check an expected string doesn't exist
require.NotContains(s.T(), string(data), "Default buildkitd configuration.")

// Tear down the builder
logrus.Infof("%s: Removing builder", s.Name)
err = common.RunBuildkit("rm", []string{
s.Name,
})
require.NoError(s.T(), err, "%s: builder rm failed", s.Name)
_, err = s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{})
// TODO if we preserve pre-existing configmaps this will need to be refined.
require.Error(s.T(), err, "config map wasn't cleaned up")
require.Contains(s.T(), err.Error(), "not found")
}

func (s *configMapSuite) TestCustomCreate() {
logrus.Infof("%s: Creating builder with custom config", s.Name)
dir, cleanup, err := common.NewBuildContext(map[string]string{
"buildkitd.toml": `# Custom config file
# nothing to see here 2
`})
require.NoError(s.T(), err, "%s: config file creation", s.Name)

defer cleanup()

args := append(
[]string{
"--config", filepath.Join(dir, "buildkitd.toml"),
s.Name,
},
s.CreateFlags...,
)
err = common.RunBuildkit("create", args)
require.NoError(s.T(), err, "%s: builder create failed", s.Name)
cfg, err := s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{})
require.NoError(s.T(), err, "%s: fetch configmap failed", s.Name)
data, ok := cfg.BinaryData["buildkitd.toml"]
require.True(s.T(), ok, "missing buildkitd.toml: %#v", cfg.BinaryData)
// Spot check an expected string
require.NotContains(s.T(), string(data), "Default buildkitd configuration.", string(data))
require.Contains(s.T(), string(data), "Custom config file", string(data))

// Tear down the builder
logrus.Infof("%s: Removing builder", s.Name)
err = common.RunBuildkit("rm", []string{
s.Name,
})
require.NoError(s.T(), err, "%s: builder rm failed", s.Name)
_, err = s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{})
require.Error(s.T(), err, "config map wasn't cleaned up")
require.Contains(s.T(), err.Error(), "not found")
}
func (s *configMapSuite) TestPreExistingWithCustomCreate() {
logrus.Infof("%s: Creating pre-existing config", s.Name)
_, err := s.configMapClient.Create(context.Background(), s.getConfigMap(), metav1.CreateOptions{})
require.NoError(s.T(), err, "%s: pre-existing configmap create failed", s.Name)

logrus.Infof("%s: Creating builder with custom config", s.Name)
dir, cleanup, err := common.NewBuildContext(map[string]string{
"buildkitd.toml": `# Custom config file
# nothing to see here 2
`})
require.NoError(s.T(), err, "%s: config file create failed", s.Name)

defer cleanup()

args := append(
[]string{
"--config", filepath.Join(dir, "buildkitd.toml"),
s.Name,
},
s.CreateFlags...,
)
err = common.RunBuildkit("create", args)
require.NoError(s.T(), err, "%s: builder create failed", s.Name)
cfg, err := s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{})
require.NoError(s.T(), err, "%s: fetch configmap failed", s.Name)
data, ok := cfg.BinaryData["buildkitd.toml"]
require.True(s.T(), ok, "missing buildkitd.toml: %#v", cfg.BinaryData)
// Spot check expected strings
require.NotContains(s.T(), string(data), "Default buildkitd configuration.", string(data))
require.NotContains(s.T(), string(data), "pre-existing configuration", string(data))
require.Contains(s.T(), string(data), "Custom config file", string(data))

// Tear down the builder
logrus.Infof("%s: Removing builder", s.Name)
err = common.RunBuildkit("rm", []string{
s.Name,
})
require.NoError(s.T(), err, "%s: builder rm failed", s.Name)
_, err = s.configMapClient.Get(context.Background(), s.Name, metav1.GetOptions{})
require.Error(s.T(), err, "config map wasn't cleaned up")
require.Contains(s.T(), err.Error(), "not found")
}

func TestConfigMapSuite(t *testing.T) {
common.Skipper(t)
//t.Parallel() // TODO - tests fail if run in parallel, may be actual race bug
suite.Run(t, &configMapSuite{
Name: "configmaptest",
})
}

0 comments on commit 9438b36

Please sign in to comment.