Skip to content

Commit

Permalink
buildcontrol: pass in a cluster rather than injecting a default clust…
Browse files Browse the repository at this point in the history
…er (#5645)
  • Loading branch information
nicks committed Mar 31, 2022
1 parent dc45b0e commit 00745ff
Show file tree
Hide file tree
Showing 12 changed files with 124 additions and 134 deletions.
14 changes: 7 additions & 7 deletions internal/cli/wire_gen.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion internal/controllers/core/kubernetesapply/disco.go
Expand Up @@ -162,7 +162,7 @@ func (r *Reconciler) toWatchRefs(ka *v1alpha1.KubernetesApply) ([]v1alpha1.Kuber
for _, e := range entities {
ns := k8s.Namespace(e.Meta().GetNamespace())
if ns == "" {
ns = r.cfgNS
ns = k8s.Namespace(r.k8sClient.ConnectionConfig().Namespace)
}
if ns == "" {
ns = k8s.DefaultNamespace
Expand Down
38 changes: 17 additions & 21 deletions internal/controllers/core/kubernetesapply/reconciler.go
Expand Up @@ -48,15 +48,13 @@ type deleteSpec struct {
}

type Reconciler struct {
st store.RStore
dkc build.DockerKubeConnection
kubeContext k8s.KubeContext
k8sClient k8s.Client
cfgNS k8s.Namespace
ctrlClient ctrlclient.Client
indexer *indexer.Indexer
execer localexec.Execer
requeuer *indexer.Requeuer
st store.RStore
dkc build.DockerKubeConnection
k8sClient k8s.Client
ctrlClient ctrlclient.Client
indexer *indexer.Indexer
execer localexec.Execer
requeuer *indexer.Requeuer

mu sync.Mutex

Expand All @@ -81,18 +79,16 @@ func (r *Reconciler) CreateBuilder(mgr ctrl.Manager) (*builder.Builder, error) {
return b, nil
}

func NewReconciler(ctrlClient ctrlclient.Client, k8sClient k8s.Client, scheme *runtime.Scheme, dkc build.DockerKubeConnection, kubeContext k8s.KubeContext, st store.RStore, cfgNS k8s.Namespace, execer localexec.Execer) *Reconciler {
func NewReconciler(ctrlClient ctrlclient.Client, k8sClient k8s.Client, scheme *runtime.Scheme, dkc build.DockerKubeConnection, st store.RStore, execer localexec.Execer) *Reconciler {
return &Reconciler{
ctrlClient: ctrlClient,
k8sClient: k8sClient,
indexer: indexer.NewIndexer(scheme, indexKubernetesApply),
execer: execer,
dkc: dkc,
kubeContext: kubeContext,
st: st,
results: make(map[types.NamespacedName]*Result),
cfgNS: cfgNS,
requeuer: indexer.NewRequeuer(),
ctrlClient: ctrlClient,
k8sClient: k8sClient,
indexer: indexer.NewIndexer(scheme, indexKubernetesApply),
execer: execer,
dkc: dkc,
st: st,
results: make(map[types.NamespacedName]*Result),
requeuer: indexer.NewRequeuer(),
}
}

Expand Down Expand Up @@ -474,7 +470,7 @@ func (r *Reconciler) createEntitiesToDeploy(ctx context.Context,
// When working with a local k8s cluster, we set the pull policy to Never,
// to ensure that k8s fails hard if the image is missing from docker.
policy := v1.PullIfNotPresent
if r.dkc.WillBuildToKubeContext(r.kubeContext) {
if r.dkc.WillBuildToKubeContext(k8s.KubeContext(r.k8sClient.ConnectionConfig().Context)) {
policy = v1.PullNever
}

Expand Down
3 changes: 1 addition & 2 deletions internal/controllers/core/kubernetesapply/reconciler_test.go
Expand Up @@ -664,7 +664,6 @@ func newFixture(t *testing.T) *fixture {
kClient := k8s.NewFakeK8sClient(t)
cfb := fake.NewControllerFixtureBuilder(t)
dockerClient := docker.NewFakeClient()
kubeContext := k8s.KubeContext("kind-kind")

// Make the fake ImageExists always return true, which is the behavior we want
// when testing the reconciler
Expand All @@ -673,7 +672,7 @@ func newFixture(t *testing.T) *fixture {
execer := localexec.NewFakeExecer(t)

db := build.NewDockerBuilder(dockerClient, dockerfile.Labels{})
r := NewReconciler(cfb.Client, kClient, v1alpha1.NewScheme(), db, kubeContext, cfb.Store, "default", execer)
r := NewReconciler(cfb.Client, kClient, v1alpha1.NewScheme(), db, cfb.Store, execer)

return &fixture{
ControllerFixture: cfb.Build(r),
Expand Down
26 changes: 1 addition & 25 deletions internal/engine/build_and_deployer_test.go
Expand Up @@ -80,30 +80,6 @@ func TestGKEDeploy(t *testing.T) {
}
}

func TestDockerForMacDeploy(t *testing.T) {
f := newBDFixture(t, clusterid.ProductDockerDesktop, container.RuntimeDocker)

manifest := NewSanchoDockerBuildManifest(f)
targets := buildcontrol.BuildTargets(manifest)
_, err := f.BuildAndDeploy(targets, store.BuildStateSet{})
if err != nil {
t.Fatal(err)
}

if f.docker.BuildCount != 1 {
t.Errorf("Expected 1 docker build, actual: %d", f.docker.BuildCount)
}

if f.docker.PushCount != 0 {
t.Errorf("Expected no push to docker, actual: %d", f.docker.PushCount)
}

expectedYaml := "image: gcr.io/some-project-162817/sancho:tilt-11cd0b38bc3ceb95"
if !strings.Contains(f.k8s.Yaml, expectedYaml) {
t.Errorf("Expected yaml to contain %q. Actual:\n%s", expectedYaml, f.k8s.Yaml)
}
}

func TestYamlManifestDeploy(t *testing.T) {
f := newBDFixture(t, clusterid.ProductGKE, container.RuntimeDocker)

Expand Down Expand Up @@ -510,7 +486,7 @@ type fakeKINDLoader struct {
loadCount int
}

func (kl *fakeKINDLoader) LoadToKIND(ctx context.Context, ref reference.NamedTagged) error {
func (kl *fakeKINDLoader) LoadToKIND(ctx context.Context, cluster *v1alpha1.Cluster, ref reference.NamedTagged) error {
kl.loadCount++
return nil
}
82 changes: 39 additions & 43 deletions internal/engine/buildcontrol/image_build_and_deployer.go
Expand Up @@ -30,19 +30,18 @@ import (
var _ BuildAndDeployer = &ImageBuildAndDeployer{}

type KINDLoader interface {
LoadToKIND(ctx context.Context, ref reference.NamedTagged) error
LoadToKIND(ctx context.Context, cluster *v1alpha1.Cluster, ref reference.NamedTagged) error
}

type cmdKINDLoader struct {
env clusterid.Product
clusterName k8s.ClusterName
}

func (kl *cmdKINDLoader) LoadToKIND(ctx context.Context, ref reference.NamedTagged) error {
func (kl *cmdKINDLoader) LoadToKIND(ctx context.Context, cluster *v1alpha1.Cluster, ref reference.NamedTagged) error {
// In Kind5, --name specifies the name of the cluster in the kubeconfig.
// In Kind6, the -name parameter is prefixed with 'kind-' before being written to/read from the kubeconfig
kindName := string(kl.clusterName)
if kl.env == clusterid.ProductKIND {
k8sConn := k8sConnStatus(cluster)
kindName := k8sConn.Cluster
if k8sConn.Product == string(clusterid.ProductKIND) {
kindName = strings.TrimPrefix(kindName, "kind-")
}

Expand All @@ -54,49 +53,37 @@ func (kl *cmdKINDLoader) LoadToKIND(ctx context.Context, ref reference.NamedTagg
return cmd.Run()
}

func NewKINDLoader(env clusterid.Product, clusterName k8s.ClusterName) KINDLoader {
return &cmdKINDLoader{
env: env,
clusterName: clusterName,
}
func NewKINDLoader() KINDLoader {
return &cmdKINDLoader{}
}

type ImageBuildAndDeployer struct {
db *build.DockerBuilder
ib *ImageBuilder
k8sClient k8s.Client
env clusterid.Product
kubeContext k8s.KubeContext
analytics *analytics.TiltAnalytics
clock build.Clock
kl KINDLoader
ctrlClient ctrlclient.Client
r *kubernetesapply.Reconciler
db *build.DockerBuilder
ib *ImageBuilder
analytics *analytics.TiltAnalytics
clock build.Clock
kl KINDLoader
ctrlClient ctrlclient.Client
r *kubernetesapply.Reconciler
}

func NewImageBuildAndDeployer(
db *build.DockerBuilder,
customBuilder *build.CustomBuilder,
k8sClient k8s.Client,
env clusterid.Product,
kubeContext k8s.KubeContext,
analytics *analytics.TiltAnalytics,
c build.Clock,
kl KINDLoader,
ctrlClient ctrlclient.Client,
r *kubernetesapply.Reconciler,
) *ImageBuildAndDeployer {
return &ImageBuildAndDeployer{
db: db,
ib: NewImageBuilder(db, customBuilder),
k8sClient: k8sClient,
env: env,
kubeContext: kubeContext,
analytics: analytics,
clock: c,
kl: kl,
ctrlClient: ctrlClient,
r: r,
db: db,
ib: NewImageBuilder(db, customBuilder),
analytics: analytics,
clock: c,
kl: kl,
ctrlClient: ctrlClient,
r: r,
}
}

Expand Down Expand Up @@ -192,7 +179,7 @@ func (ibd *ImageBuildAndDeployer) BuildAndDeploy(ctx context.Context, st store.R
return store.ImageBuildResult{}, err
}

pushStage := ibd.push(ctx, refs.LocalRef, ps, iTarget, kTarget)
pushStage := ibd.push(ctx, refs.LocalRef, ps, iTarget, cluster, kTarget)
if pushStage != nil {
stages = append(stages, *pushStage)
}
Expand Down Expand Up @@ -238,7 +225,7 @@ func (ibd *ImageBuildAndDeployer) BuildAndDeploy(ctx context.Context, st store.R
return newResults, nil
}

func (ibd *ImageBuildAndDeployer) push(ctx context.Context, ref reference.NamedTagged, ps *build.PipelineState, iTarget model.ImageTarget, kTarget model.K8sTarget) *v1alpha1.DockerImageStageStatus {
func (ibd *ImageBuildAndDeployer) push(ctx context.Context, ref reference.NamedTagged, ps *build.PipelineState, iTarget model.ImageTarget, cluster *v1alpha1.Cluster, kTarget model.K8sTarget) *v1alpha1.DockerImageStageStatus {
ps.StartPipelineStep(ctx, "Pushing %s", container.FamiliarString(ref))
defer ps.EndPipelineStep(ctx)

Expand All @@ -256,16 +243,16 @@ func (ibd *ImageBuildAndDeployer) push(ctx context.Context, ref reference.NamedT
} else if !IsImageDeployedToK8s(iTarget, kTarget) {
ps.Printf(ctx, "Skipping push: base image does not need deploy")
return nil
} else if ibd.db.WillBuildToKubeContext(ibd.kubeContext) {
} else if ibd.db.WillBuildToKubeContext(k8s.KubeContext(k8sConnStatus(cluster).Context)) {
ps.Printf(ctx, "Skipping push: building on cluster's container runtime")
return nil
}

startTime := apis.NowMicro()
var err error
if ibd.shouldUseKINDLoad(ctx, iTarget) {
if ibd.shouldUseKINDLoad(ctx, iTarget, cluster) {
ps.Printf(ctx, "Loading image to KIND")
err := ibd.kl.LoadToKIND(ps.AttachLogger(ctx), ref)
err := ibd.kl.LoadToKIND(ps.AttachLogger(ctx), cluster, ref)
endTime := apis.NowMicro()
stage := &v1alpha1.DockerImageStageStatus{
Name: "kind load",
Expand Down Expand Up @@ -293,8 +280,8 @@ func (ibd *ImageBuildAndDeployer) push(ctx context.Context, ref reference.NamedT
return stage
}

func (ibd *ImageBuildAndDeployer) shouldUseKINDLoad(ctx context.Context, iTarg model.ImageTarget) bool {
isKIND := ibd.env == clusterid.ProductKIND
func (ibd *ImageBuildAndDeployer) shouldUseKINDLoad(ctx context.Context, iTarg model.ImageTarget, cluster *v1alpha1.Cluster) bool {
isKIND := k8sConnStatus(cluster).Product == string(clusterid.ProductKIND)
if !isKIND {
return false
}
Expand All @@ -306,8 +293,8 @@ func (ibd *ImageBuildAndDeployer) shouldUseKINDLoad(ctx context.Context, iTarg m
return false
}

registry := ibd.k8sClient.LocalRegistry(ctx)
if !registry.Empty() {
hasRegistry := cluster.Status.Registry != nil && cluster.Status.Registry.Host != ""
if hasRegistry {
return false
}

Expand Down Expand Up @@ -344,3 +331,12 @@ func (ibd *ImageBuildAndDeployer) delete(ctx context.Context, k8sTarget model.K8
kTargetNN := types.NamespacedName{Name: k8sTarget.ID().Name.String()}
return ibd.r.ForceDelete(ctx, kTargetNN, k8sTarget.KubernetesApplySpec, "force update")
}

func k8sConnStatus(cluster *v1alpha1.Cluster) *v1alpha1.KubernetesClusterConnectionStatus {
if cluster != nil &&
cluster.Status.Connection != nil &&
cluster.Status.Connection.Kubernetes != nil {
return cluster.Status.Connection.Kubernetes
}
return &v1alpha1.KubernetesClusterConnectionStatus{}
}

0 comments on commit 00745ff

Please sign in to comment.