Skip to content

Commit

Permalink
buildcontrol: put the Cluster in BuildState (#5642)
Browse files Browse the repository at this point in the history
  • Loading branch information
nicks committed Mar 30, 2022
1 parent e3212d4 commit 596dbcb
Show file tree
Hide file tree
Showing 6 changed files with 28 additions and 18 deletions.
2 changes: 0 additions & 2 deletions internal/engine/buildcontrol/build_control.go
Expand Up @@ -230,8 +230,6 @@ func HoldTargetsWithBuildingComponents(state store.EngineState, mts []*store.Man
}

func targetsByCluster(mts []*store.ManifestTarget) map[string][]*store.ManifestTarget {
// TODO(nick): In the future, K8s objects may reference the cluster
// they're deploying to.
clusters := make(map[string][]*store.ManifestTarget)
for _, mt := range mts {
if mt.Manifest.IsK8s() {
Expand Down
Expand Up @@ -145,10 +145,6 @@ func (bd *DockerComposeBuildAndDeployer) BuildAndDeploy(ctx context.Context, st
ps.EndPipelineStep(ctx)
}

var cluster v1alpha1.Cluster
// If the cluster fetch fails, that's OK.
_ = bd.ctrlClient.Get(ctx, ktypes.NamespacedName{Name: v1alpha1.ClusterNameDocker}, &cluster)

imageMapSet := make(map[ktypes.NamespacedName]*v1alpha1.ImageMap, len(plan.dockerComposeTarget.Spec.ImageMaps))
for _, iTarget := range iTargets {
if iTarget.IsLiveUpdateOnly {
Expand All @@ -175,11 +171,12 @@ func (bd *DockerComposeBuildAndDeployer) BuildAndDeploy(ctx context.Context, st
cmdimage.MaybeUpdateStatus(ctx, bd.ctrlClient, iTarget, cmdimage.ToBuildingStatus(iTarget, startTime))

expectedRef := iTarget.Refs.ConfigurationRef
cluster := currentState[target.ID()].ClusterOrEmpty()

// NOTE(maia): we assume that this func takes one DC target and up to one image target
// corresponding to that service. If this func ever supports specs for more than one
// service at once, we'll have to match up image build results to DC target by ref.
refs, stages, err := bd.ib.Build(ctx, iTarget, &cluster, imageMapSet, ps)
refs, stages, err := bd.ib.Build(ctx, iTarget, cluster, imageMapSet, ps)
if err != nil {
dockerimage.MaybeUpdateStatus(ctx, bd.ctrlClient, iTarget, dockerimage.ToCompletedFailStatus(iTarget, startTime, stages, err))
cmdimage.MaybeUpdateStatus(ctx, bd.ctrlClient, iTarget, cmdimage.ToCompletedFailStatus(iTarget, startTime, err))
Expand Down
7 changes: 2 additions & 5 deletions internal/engine/buildcontrol/image_build_and_deployer.go
Expand Up @@ -154,10 +154,6 @@ func (ibd *ImageBuildAndDeployer) BuildAndDeploy(ctx context.Context, st store.R
ps.EndPipelineStep(ctx)
}

var cluster v1alpha1.Cluster
// If the cluster fetch fails, that's OK.
_ = ibd.ctrlClient.Get(ctx, types.NamespacedName{Name: "default"}, &cluster)

imageMapSet := make(map[types.NamespacedName]*v1alpha1.ImageMap, len(kTarget.ImageMaps))
for _, iTarget := range iTargets {
if iTarget.IsLiveUpdateOnly {
Expand Down Expand Up @@ -187,8 +183,9 @@ func (ibd *ImageBuildAndDeployer) BuildAndDeploy(ctx context.Context, st store.R
startTime := apis.NowMicro()
dockerimage.MaybeUpdateStatus(ctx, ibd.ctrlClient, iTarget, dockerimage.ToBuildingStatus(iTarget, startTime))
cmdimage.MaybeUpdateStatus(ctx, ibd.ctrlClient, iTarget, cmdimage.ToBuildingStatus(iTarget, startTime))
cluster := stateSet[target.ID()].ClusterOrEmpty()

refs, stages, err := ibd.ib.Build(ctx, iTarget, &cluster, imageMapSet, ps)
refs, stages, err := ibd.ib.Build(ctx, iTarget, cluster, imageMapSet, ps)
if err != nil {
dockerimage.MaybeUpdateStatus(ctx, ibd.ctrlClient, iTarget, dockerimage.ToCompletedFailStatus(iTarget, startTime, stages, err))
cmdimage.MaybeUpdateStatus(ctx, ibd.ctrlClient, iTarget, cmdimage.ToCompletedFailStatus(iTarget, startTime, err))
Expand Down
6 changes: 3 additions & 3 deletions internal/engine/buildcontrol/image_build_and_deployer_test.go
Expand Up @@ -989,17 +989,17 @@ func TestTwoManifestsWithSameTwoImages(t *testing.T) {
func TestPlatformFromCluster(t *testing.T) {
f := newIBDFixture(t, clusterid.ProductGKE)

f.upsert(&v1alpha1.Cluster{
cluster := &v1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "default"},
Status: v1alpha1.ClusterStatus{
Arch: "amd64",
},
})
}

m := NewSanchoDockerBuildManifest(f)
iTargetID1 := m.ImageTargets[0].ID()
stateSet := store.BuildStateSet{
iTargetID1: store.BuildState{FullBuildTriggered: true},
iTargetID1: store.BuildState{FullBuildTriggered: true, Cluster: cluster},
}
_, err := f.BuildAndDeploy(BuildTargets(m), stateSet)
require.NoError(t, err)
Expand Down
14 changes: 11 additions & 3 deletions internal/engine/buildcontroller.go
Expand Up @@ -79,9 +79,14 @@ func (c *BuildController) needsBuild(ctx context.Context, st store.RStore) (buil

buildReason := mt.NextBuildReason()
targets := buildcontrol.BuildTargets(manifest)
buildStateSet := buildStateSet(ctx, manifest, state.KubernetesResources[manifest.Name.String()],
buildStateSet := buildStateSet(ctx,
manifest,
state.KubernetesResources[manifest.Name.String()],
state.DockerComposeServices[manifest.Name.String()],
targets, ms, buildReason)
state.Clusters[v1alpha1.ClusterNameDefault],
targets,
ms,
buildReason)

return buildEntry{
name: manifest.Name,
Expand Down Expand Up @@ -204,6 +209,7 @@ func SpanIDForBuildLog(buildCount int) logstore.SpanID {
func buildStateSet(ctx context.Context, manifest model.Manifest,
kresource *k8sconv.KubernetesResource,
dcs *v1alpha1.DockerComposeService,
cluster *v1alpha1.Cluster,
specs []model.TargetSpec,
ms *store.ManifestState, reason model.BuildReason) store.BuildStateSet {
result := store.BuildStateSet{}
Expand All @@ -222,7 +228,9 @@ func buildStateSet(ctx context.Context, manifest model.Manifest,
depsChanged = append(depsChanged, dep)
}

result[id] = store.NewBuildState(status.LastResult, filesChanged, depsChanged)
state := store.NewBuildState(status.LastResult, filesChanged, depsChanged)
state.Cluster = cluster
result[id] = state
}

isFullBuildTrigger := reason.HasTrigger() && !buildcontrol.IsLiveUpdateEligibleTrigger(manifest, reason)
Expand Down
10 changes: 10 additions & 0 deletions internal/store/build_result.go
Expand Up @@ -196,6 +196,9 @@ type BuildState struct {
// This field indicates case 1 || case 2 -- i.e. that we should skip
// live_update, and force an image build (even if there are no changed files)
FullBuildTriggered bool

// The default cluster.
Cluster *v1alpha1.Cluster
}

func NewBuildState(result BuildResult, files []string, pendingDeps []model.TargetID) BuildState {
Expand All @@ -214,6 +217,13 @@ func NewBuildState(result BuildResult, files []string, pendingDeps []model.Targe
}
}

func (b BuildState) ClusterOrEmpty() *v1alpha1.Cluster {
if b.Cluster == nil {
return &v1alpha1.Cluster{}
}
return b.Cluster
}

func (b BuildState) WithFullBuildTriggered(isImageBuildTrigger bool) BuildState {
b.FullBuildTriggered = isImageBuildTrigger
return b
Expand Down

0 comments on commit 596dbcb

Please sign in to comment.