-
Notifications
You must be signed in to change notification settings - Fork 290
/
docker_compose_build_and_deployer.go
157 lines (132 loc) · 4.88 KB
/
docker_compose_build_and_deployer.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
package engine
import (
"context"
"fmt"
"github.com/docker/distribution/reference"
"github.com/opentracing/opentracing-go"
"github.com/windmilleng/tilt/internal/build"
"github.com/windmilleng/tilt/internal/container"
"github.com/windmilleng/tilt/internal/docker"
"github.com/windmilleng/tilt/internal/dockercompose"
"github.com/windmilleng/tilt/internal/logger"
"github.com/windmilleng/tilt/internal/model"
"github.com/windmilleng/tilt/internal/store"
)
type DockerComposeBuildAndDeployer struct {
dcc dockercompose.DockerComposeClient
dc docker.Client
icb *imageAndCacheBuilder
clock build.Clock
}
var _ BuildAndDeployer = &DockerComposeBuildAndDeployer{}
func NewDockerComposeBuildAndDeployer(dcc dockercompose.DockerComposeClient, dc docker.Client,
icb *imageAndCacheBuilder, c build.Clock) *DockerComposeBuildAndDeployer {
return &DockerComposeBuildAndDeployer{
dcc: dcc,
dc: dc,
icb: icb,
clock: c,
}
}
// Extract the targets we can apply -- DCBaD supports ImageTargets and DockerComposeTargets.
func (bd *DockerComposeBuildAndDeployer) extract(specs []model.TargetSpec) ([]model.ImageTarget, []model.DockerComposeTarget) {
var iTargets []model.ImageTarget
var dcTargets []model.DockerComposeTarget
for _, s := range specs {
switch s := s.(type) {
case model.ImageTarget:
iTargets = append(iTargets, s)
case model.DockerComposeTarget:
dcTargets = append(dcTargets, s)
default:
// unrecognized target
return nil, nil
}
}
return iTargets, dcTargets
}
func (bd *DockerComposeBuildAndDeployer) BuildAndDeploy(ctx context.Context, st store.RStore, specs []model.TargetSpec, currentState store.BuildStateSet) (store.BuildResultSet, error) {
iTargets, dcTargets := bd.extract(specs)
if len(dcTargets) != 1 {
return store.BuildResultSet{}, SilentRedirectToNextBuilderf(
"DockerComposeBuildAndDeployer requires exactly one dcTarget (got %d)", len(dcTargets))
}
dcTarget := dcTargets[0]
span, ctx := opentracing.StartSpanFromContext(ctx, "DockerComposeBuildAndDeployer-BuildAndDeploy")
span.SetTag("target", dcTargets[0].Name)
defer span.Finish()
q, err := NewImageTargetQueue(iTargets, currentState)
if err != nil {
return store.BuildResultSet{}, err
}
numStages := q.CountDirty()
haveImage := len(iTargets) > 0
ps := build.NewPipelineState(ctx, numStages, bd.clock)
defer func() { ps.End(ctx, err) }()
iTargetMap := model.ImageTargetsByID(iTargets)
err = q.RunBuilds(func(target model.TargetSpec, state store.BuildState, depResults []store.BuildResult) (store.BuildResult, error) {
iTarget, ok := target.(model.ImageTarget)
if !ok {
return store.BuildResult{}, fmt.Errorf("Not an image target: %T", target)
}
iTarget, err := injectImageDependencies(iTarget, iTargetMap, depResults)
if err != nil {
return store.BuildResult{}, err
}
expectedRef := iTarget.ConfigurationRef
// NOTE(maia): we assume that this func takes one DC target and up to one image target
// corresponding to that service. If this func ever supports specs for more than one
// service at once, we'll have to match up image build results to DC target by ref.
ref, err := bd.icb.Build(ctx, iTarget, currentState[iTarget.ID()], ps)
if err != nil {
return store.BuildResult{}, err
}
ref, err = bd.tagWithExpected(ctx, ref, expectedRef)
if err != nil {
return store.BuildResult{}, err
}
return store.NewImageBuildResult(iTarget.ID(), ref), nil
})
if err != nil {
return store.BuildResultSet{}, err
}
stdout := logger.Get(ctx).Writer(logger.InfoLvl)
stderr := logger.Get(ctx).Writer(logger.InfoLvl)
err = bd.dcc.Up(ctx, dcTarget.ConfigPath, dcTarget.Name, !haveImage, stdout, stderr)
if err != nil {
return store.BuildResultSet{}, err
}
// NOTE(dmiller): right now we only need this the first time. In the future
// it might be worth it to move this somewhere else
cid, err := bd.dcc.ContainerID(ctx, dcTarget.ConfigPath, dcTarget.Name)
if err != nil {
return store.BuildResultSet{}, err
}
results := q.results
for _, iTarget := range iTargets {
if isImageDeployedToDC(iTarget, dcTarget) {
result := results[iTarget.ID()]
result.ContainerID = cid
results[iTarget.ID()] = result
}
}
results[dcTarget.ID()] = store.NewContainerBuildResult(dcTarget.ID(), cid)
return results, nil
}
func (bd *DockerComposeBuildAndDeployer) tagWithExpected(ctx context.Context, ref reference.NamedTagged,
expected container.RefSelector) (reference.NamedTagged, error) {
var tagAs reference.NamedTagged
expectedNt, err := container.ParseNamedTagged(expected.String())
if err == nil {
// expected ref already includes a tag, so just tag the image as that
tagAs = expectedNt
} else {
// expected ref is just a name, so tag it as `latest` b/c that's what Docker Compose wants
tagAs, err = reference.WithTag(ref, docker.TagLatest)
if err != nil {
return nil, err
}
}
err = bd.dc.ImageTag(ctx, ref.String(), tagAs.String())
return tagAs, err
}