Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor KinD usage in test harness and allow to add (local) containers #1217

Merged
merged 1 commit into from
Dec 23, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions pkg/apis/kudo/v1beta1/test_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ type TestSuite struct {
// If set, each node defined in the kind configuration will have a docker named volume mounted into it to persist
// pulled container images across test runs.
KINDNodeCache bool `json:"kindNodeCache"`
// Containers to load to each KIND node prior to running the tests.
KINDContainers []string `json:"kindContainers"`
// Whether or not to start the KUDO controller for the tests.
StartKUDO bool `json:"startKUDO"`
// If set, do not delete the resources after running the tests (implies SkipClusterDelete).
Expand Down
52 changes: 21 additions & 31 deletions pkg/test/harness.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/manager"
kindConfig "sigs.k8s.io/kind/pkg/apis/config/v1alpha3"
kind "sigs.k8s.io/kind/pkg/cluster"

kudo "github.com/kudobuilder/kudo/pkg/apis/kudo/v1beta1"
"github.com/kudobuilder/kudo/pkg/controller/instance"
Expand All @@ -44,7 +43,7 @@ type Harness struct {
client client.Client
dclient discovery.DiscoveryInterface
env *envtest.Environment
kind *kind.Provider
kind *kind
kubeConfigPath string
clientLock sync.Mutex
configLock sync.Mutex
Expand Down Expand Up @@ -105,18 +104,18 @@ func (h *Harness) GetTimeout() int {
// RunKIND starts a KIND cluster.
func (h *Harness) RunKIND() (*rest.Config, error) {
if h.kind == nil {
h.kind = kind.NewProvider()
var err error

contexts, err := h.kind.List()
h.kubeConfigPath, err = ioutil.TempDir("", "kudo")
if err != nil {
return nil, err
}

for _, context := range contexts {
// There is already a cluster with this context, let's re-use it.
if context == h.TestSuite.KINDContext {
return clientcmd.BuildConfigFromFlags("", h.explicitPath())
}
kind := newKind(h.TestSuite.KINDContext, h.explicitPath())
h.kind = &kind

if h.kind.IsRunning() {
return clientcmd.BuildConfigFromFlags("", h.explicitPath())
}

kindCfg := &kindConfig.Cluster{}
Expand All @@ -129,40 +128,33 @@ func (h *Harness) RunKIND() (*rest.Config, error) {
}
}

if err := h.addNodeCaches(kindCfg); err != nil {
dockerClient, err := h.DockerClient()
if err != nil {
return nil, err
}

h.kubeConfigPath, err = ioutil.TempDir("", "kudo")
if err != nil {
// Determine the correct API version to use with the user's Docker client.
dockerClient.NegotiateAPIVersion(context.TODO())

h.addNodeCaches(dockerClient, kindCfg)

if err := h.kind.Run(kindCfg); err != nil {
return nil, err
}

if err := h.kind.Create(
h.TestSuite.KINDContext,
kind.CreateWithV1Alpha3Config(kindCfg),
kind.CreateWithKubeconfigPath(h.explicitPath()),
); err != nil {
if err := h.kind.AddContainers(dockerClient, h.TestSuite.KINDContainers); err != nil {
return nil, err
}
}

return clientcmd.BuildConfigFromFlags("", h.explicitPath())
}

func (h *Harness) addNodeCaches(kindCfg *kindConfig.Cluster) error {
func (h *Harness) addNodeCaches(dockerClient testutils.DockerClient, kindCfg *kindConfig.Cluster) {
if !h.TestSuite.KINDNodeCache {
return nil
}

dockerClient, err := h.DockerClient()
if err != nil {
return err
return
}

// Determine the correct API version to use with the user's Docker client.
dockerClient.NegotiateAPIVersion(context.TODO())

// add a default node if there are none specified.
if len(kindCfg.Nodes) == 0 {
kindCfg.Nodes = append(kindCfg.Nodes, kindConfig.Node{})
Expand All @@ -188,8 +180,6 @@ func (h *Harness) addNodeCaches(kindCfg *kindConfig.Cluster) error {
HostPath: volume.Mountpoint,
})
}

return nil
}

// RunTestEnv starts a Kubernetes API server and etcd server for use in the
Expand Down Expand Up @@ -451,7 +441,7 @@ func (h *Harness) Stop() {

h.T.Log("collecting cluster logs to", logDir)

if err := h.kind.CollectLogs(h.TestSuite.KINDContext, logDir); err != nil {
if err := h.kind.CollectLogs(logDir); err != nil {
h.T.Log("error collecting kind cluster logs", err)
}
}
Expand All @@ -477,7 +467,7 @@ func (h *Harness) Stop() {

if h.kind != nil {
h.T.Log("tearing down kind cluster")
if err := h.kind.Delete(h.TestSuite.KINDContext, h.explicitPath()); err != nil {
if err := h.kind.Stop(); err != nil {
h.T.Log("error tearing down kind cluster", err)
}

Expand Down
33 changes: 22 additions & 11 deletions pkg/test/harness_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package test
import (
"context"
"fmt"
"io"
"testing"

dockertypes "github.com/docker/docker/api/types"
Expand All @@ -19,7 +20,19 @@ func TestGetTimeout(t *testing.T) {
assert.Equal(t, 45, h.GetTimeout())
}

type dockerMock struct{}
type dockerMock struct {
ImageWriter *io.PipeWriter
imageReader *io.PipeReader
}

func newDockerMock() *dockerMock {
reader, writer := io.Pipe()

return &dockerMock{
ImageWriter: writer,
imageReader: reader,
}
}

func (d *dockerMock) VolumeCreate(ctx context.Context, body volumetypes.VolumeCreateBody) (dockertypes.Volume, error) {
return dockertypes.Volume{
Expand All @@ -29,22 +42,22 @@ func (d *dockerMock) VolumeCreate(ctx context.Context, body volumetypes.VolumeCr

func (d *dockerMock) NegotiateAPIVersion(ctx context.Context) {}

func (d *dockerMock) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) {
return d.imageReader, nil
}

func TestAddNodeCaches(t *testing.T) {
h := Harness{
T: t,
docker: &dockerMock{},
docker: newDockerMock(),
}

kindCfg := &kindConfig.Cluster{}
if err := h.addNodeCaches(kindCfg); err != nil {
t.Fatal(err)
}
h.addNodeCaches(h.docker, kindCfg)
assert.Nil(t, kindCfg.Nodes)

h.TestSuite.KINDNodeCache = true
if err := h.addNodeCaches(kindCfg); err != nil {
t.Fatal(err)
}
h.addNodeCaches(h.docker, kindCfg)
assert.NotNil(t, kindCfg.Nodes)
assert.Equal(t, 1, len(kindCfg.Nodes))
assert.NotNil(t, kindCfg.Nodes[0].ExtraMounts)
Expand All @@ -59,9 +72,7 @@ func TestAddNodeCaches(t *testing.T) {
},
}

if err := h.addNodeCaches(kindCfg); err != nil {
t.Fatal(err)
}
h.addNodeCaches(h.docker, kindCfg)
assert.NotNil(t, kindCfg.Nodes)
assert.Equal(t, 2, len(kindCfg.Nodes))
assert.NotNil(t, kindCfg.Nodes[0].ExtraMounts)
Expand Down
102 changes: 102 additions & 0 deletions pkg/test/kind.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
package test

import (
"context"

"sigs.k8s.io/kind/pkg/apis/config/v1alpha3"
"sigs.k8s.io/kind/pkg/cluster"
"sigs.k8s.io/kind/pkg/cluster/nodes"
"sigs.k8s.io/kind/pkg/cluster/nodeutils"

testutils "github.com/kudobuilder/kudo/pkg/test/utils"
)

// kind provides a thin abstraction layer for a KIND cluster.
type kind struct {
Provider *cluster.Provider
context string
explicitPath string
}

func newKind(kindContext string, explicitPath string) kind {
provider := cluster.NewProvider()

return kind{
Provider: provider,
context: kindContext,
explicitPath: explicitPath,
}
}

// Run starts a KIND cluster from a given configuration.
func (k *kind) Run(config *v1alpha3.Cluster) error {
return k.Provider.Create(
k.context,
cluster.CreateWithV1Alpha3Config(config),
cluster.CreateWithKubeconfigPath(k.explicitPath),
)
}

// IsRunning checks if a KIND cluster is already running for the current context.
func (k *kind) IsRunning() bool {
contexts, err := k.Provider.List()
if err != nil {
panic(err)
}

for _, context := range contexts {
if context == k.context {
return true
}
}

return false
}

// AddContainers loads the named Docker containers into a KIND cluster.
// The cluster must be running for this to work.
func (k *kind) AddContainers(docker testutils.DockerClient, containers []string) error {
if !k.IsRunning() {
panic("KIND cluster isn't running")
}

nodes, err := k.Provider.ListNodes(k.context)
if err != nil {
return err
}

for _, node := range nodes {
for _, container := range containers {
if err := loadContainer(docker, node, container); err != nil {
return err
}
}
}

return nil
}

// CollectLogs saves the cluster logs to a directory.
func (k *kind) CollectLogs(dir string) error {
return k.Provider.CollectLogs(k.context, dir)
}

// Stop stops the KIND cluster.
func (k *kind) Stop() error {
return k.Provider.Delete(k.context, k.explicitPath)
}

func loadContainer(docker testutils.DockerClient, node nodes.Node, container string) error {
image, err := docker.ImageSave(context.TODO(), []string{container})
if err != nil {
return err
}

defer image.Close()

if err := nodeutils.LoadImageArchive(node, image); err != nil {
return err
}

return nil
}
Loading