Skip to content

Commit da55b2a

Browse files
committed
feat(stapel-to-buildah): support user stages and mounts
* Working beforeInstall, install, beforeSetup, setup stages building. * Run each instruction from werf.yaml in the separate shell session for now. * Fixed mountpoints cleaning in the 'from' stage. * Added usage of mounts for user stages. Signed-off-by: Timofey Kirillov <timofey.kirillov@flant.com>
1 parent 9e242e9 commit da55b2a

File tree

5 files changed

+74
-31
lines changed

5 files changed

+74
-31
lines changed

pkg/build/builder/ansible.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -135,8 +135,7 @@ func (b *Ansible) stage(ctx context.Context, cr container_backend.ContainerBacke
135135

136136
return nil
137137
} else {
138-
// TODO(stapel-to-buildah)
139-
panic("not implemented")
138+
return fmt.Errorf("ansible builder is not supported when using buildah backend, please use shell builder instead")
140139
}
141140
}
142141

pkg/build/builder/shell.go

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -87,12 +87,11 @@ func (b *Shell) stage(cr container_backend.ContainerBackend, stageBuilder stage_
8787
}
8888

8989
container.AddServiceRunCommands(containerTmpScriptFilePath)
90-
91-
return nil
9290
} else {
93-
// TODO(stapel-to-buildah)
94-
panic("not implemented")
91+
stageBuilder.StapelStageBuilder().AddUserCommands(b.stageCommands(userStageName)...)
9592
}
93+
94+
return nil
9695
}
9796

9897
func (b *Shell) stageChecksum(ctx context.Context, userStageName string) string {

pkg/build/stage/base.go

Lines changed: 17 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -253,13 +253,13 @@ func (s *BaseStage) PrepareImage(ctx context.Context, c Conveyor, cr container_b
253253

254254
serviceMounts := s.getServiceMounts(prevBuiltImage)
255255
s.addServiceMountsLabels(serviceMounts, c, cr, stageImage)
256-
if err := s.addServiceMountsVolumes(serviceMounts, stageImage); err != nil {
256+
if err := s.addServiceMountsVolumes(serviceMounts, c, cr, stageImage); err != nil {
257257
return fmt.Errorf("error adding mounts volumes: %s", err)
258258
}
259259

260260
customMounts := s.getCustomMounts(prevBuiltImage)
261261
s.addCustomMountLabels(customMounts, c, cr, stageImage)
262-
if err := s.addCustomMountVolumes(customMounts, stageImage); err != nil {
262+
if err := s.addCustomMountVolumes(customMounts, c, cr, stageImage); err != nil {
263263
return fmt.Errorf("error adding mounts volumes: %s", err)
264264
}
265265

@@ -313,7 +313,7 @@ func (s *BaseStage) getServiceMountsFromConfig() map[string][]string {
313313
return mountpointsByType
314314
}
315315

316-
func (s *BaseStage) addServiceMountsVolumes(mountpointsByType map[string][]string, stageImage *StageImage) error {
316+
func (s *BaseStage) addServiceMountsVolumes(mountpointsByType map[string][]string, c Conveyor, cr container_backend.ContainerBackend, stageImage *StageImage) error {
317317
for mountType, mountpoints := range mountpointsByType {
318318
for _, mountpoint := range mountpoints {
319319
absoluteMountpoint := path.Join("/", mountpoint)
@@ -333,7 +333,12 @@ func (s *BaseStage) addServiceMountsVolumes(mountpointsByType map[string][]strin
333333
return fmt.Errorf("error creating tmp path %s for mount: %s", absoluteFrom, err)
334334
}
335335

336-
stageImage.Builder.LegacyStapelStageBuilder().Container().RunOptions().AddVolume(fmt.Sprintf("%s:%s", absoluteFrom, absoluteMountpoint))
336+
volume := fmt.Sprintf("%s:%s", absoluteFrom, absoluteMountpoint)
337+
if c.UseLegacyStapelBuilder(cr) {
338+
stageImage.Builder.LegacyStapelStageBuilder().Container().RunOptions().AddVolume(volume)
339+
} else {
340+
stageImage.Builder.StapelStageBuilder().AddBuildVolumes(volume)
341+
}
337342
}
338343
}
339344

@@ -406,7 +411,7 @@ func (s *BaseStage) getCustomMountsFromConfig() map[string][]string {
406411
return mountpointsByFrom
407412
}
408413

409-
func (s *BaseStage) addCustomMountVolumes(mountpointsByFrom map[string][]string, stageImage *StageImage) error {
414+
func (s *BaseStage) addCustomMountVolumes(mountpointsByFrom map[string][]string, c Conveyor, cr container_backend.ContainerBackend, stageImage *StageImage) error {
410415
for from, mountpoints := range mountpointsByFrom {
411416
absoluteFrom := util.ExpandPath(from)
412417

@@ -424,7 +429,13 @@ func (s *BaseStage) addCustomMountVolumes(mountpointsByFrom map[string][]string,
424429

425430
for _, mountpoint := range mountpoints {
426431
absoluteMountpoint := path.Join("/", mountpoint)
427-
stageImage.Builder.LegacyStapelStageBuilder().Container().RunOptions().AddVolume(fmt.Sprintf("%s:%s", absoluteFrom, absoluteMountpoint))
432+
433+
volume := fmt.Sprintf("%s:%s", absoluteFrom, absoluteMountpoint)
434+
if c.UseLegacyStapelBuilder(cr) {
435+
stageImage.Builder.LegacyStapelStageBuilder().Container().RunOptions().AddVolume(volume)
436+
} else {
437+
stageImage.Builder.StapelStageBuilder().AddBuildVolumes(volume)
438+
}
428439
}
429440
}
430441

pkg/build/stage/from.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import (
88
"path/filepath"
99
"strings"
1010

11+
"github.com/werf/logboek"
1112
"github.com/werf/werf/pkg/config"
1213
"github.com/werf/werf/pkg/container_backend"
1314
imagePkg "github.com/werf/werf/pkg/image"
@@ -99,7 +100,9 @@ func (s *FromStage) PrepareImage(ctx context.Context, c Conveyor, cr container_b
99100
} else {
100101
stageImage.Builder.StapelStageBuilder().AddPrepareContainerActions(container_backend.PrepareContainerActionWith(func(containerRoot string) error {
101102
for _, mountpoint := range mountpoints {
102-
if err := os.RemoveAll(mountpoint); err != nil {
103+
logboek.Context(ctx).Info().LogF("Removing mountpoint %q in the container dir: %q\n", mountpoint, filepath.Join(containerRoot, mountpoint))
104+
105+
if err := os.RemoveAll(filepath.Join(containerRoot, mountpoint)); err != nil {
103106
return fmt.Errorf("unable to remove %q: %s", mountpoint, err)
104107
}
105108
}

pkg/container_backend/buildah_backend.go

Lines changed: 49 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ import (
66
"strings"
77

88
"github.com/google/uuid"
9+
"github.com/opencontainers/runtime-spec/specs-go"
910

1011
"github.com/werf/logboek"
1112
"github.com/werf/werf/pkg/buildah"
@@ -28,30 +29,32 @@ func (runtime *BuildahBackend) HasStapelBuildSupport() bool {
2829
return true
2930
}
3031

31-
// FIXME(stapel-to-buildah): proper deep implementation
32-
func (runtime *BuildahBackend) BuildStapelStage(ctx context.Context, baseImage string, opts BuildStapelStageOpts) (string, error) {
33-
/*
34-
1. Create new temporary build container using 'from' and remain uniq container name.
35-
2. Mount container root to host and run all prepare-container-actions, then unmount.
36-
3. Run user instructions in container, mount volumes when build.
37-
4. Set specified labels into container.
38-
5. Save container name as builtID (ideally there is no need to commit an image here, because buildah allows to commit and push directly container, which would happen later).
39-
*/
32+
func (runtime *BuildahBackend) getBuildahCommonOpts(ctx context.Context, suppressLog bool) (opts buildah.CommonOpts) {
33+
if !suppressLog {
34+
opts.LogWriter = logboek.Context(ctx).OutStream()
35+
}
4036

41-
containerID := uuid.New().String()
37+
return
38+
}
4239

43-
_, err := runtime.buildah.FromCommand(ctx, containerID, baseImage, buildah.FromCommandOpts{})
40+
func (runtime *BuildahBackend) BuildStapelStage(ctx context.Context, baseImage string, opts BuildStapelStageOpts) (string, error) {
41+
containerID := fmt.Sprintf("werf-stage-build-%s", uuid.New().String())
42+
43+
_, err := runtime.buildah.FromCommand(ctx, containerID, baseImage, buildah.FromCommandOpts(runtime.getBuildahCommonOpts(ctx, true)))
4444
if err != nil {
4545
return "", fmt.Errorf("unable to create container using base image %q: %s", baseImage, err)
4646
}
4747

48+
// TODO(stapel-to-buildah): cleanup orphan build containers in werf-host-cleanup procedure
49+
// defer runtime.buildah.Rm(ctx, containerID, buildah.RmOpts{CommonOpts: runtime.getBuildahCommonOpts(ctx, true)})
50+
4851
if len(opts.PrepareContainerActions) > 0 {
4952
err := func() error {
50-
containerRoot, err := runtime.buildah.Mount(ctx, containerID, buildah.MountOpts{})
53+
containerRoot, err := runtime.buildah.Mount(ctx, containerID, buildah.MountOpts(runtime.getBuildahCommonOpts(ctx, true)))
5154
if err != nil {
5255
return fmt.Errorf("unable to mount container %q root dir: %s", containerID, err)
5356
}
54-
defer runtime.buildah.Umount(ctx, containerRoot, buildah.UmountOpts{})
57+
defer runtime.buildah.Umount(ctx, containerRoot, buildah.UmountOpts(runtime.getBuildahCommonOpts(ctx, true)))
5558

5659
for _, action := range opts.PrepareContainerActions {
5760
if err := action.PrepareContainer(containerRoot); err != nil {
@@ -67,16 +70,44 @@ func (runtime *BuildahBackend) BuildStapelStage(ctx context.Context, baseImage s
6770
}
6871

6972
for _, cmd := range opts.UserCommands {
70-
if err := runtime.buildah.RunCommand(ctx, containerID, strings.Fields(cmd), buildah.RunCommandOpts{}); err != nil {
73+
var mounts []specs.Mount
74+
for _, volume := range opts.BuildVolumes {
75+
volumeParts := strings.SplitN(volume, ":", 2)
76+
if len(volumeParts) != 2 {
77+
panic(fmt.Sprintf("invalid volume %q: expected SOURCE:DESTINATION format", volume))
78+
}
79+
80+
mounts = append(mounts, specs.Mount{
81+
Type: "bind",
82+
Source: volumeParts[0],
83+
Destination: volumeParts[1],
84+
})
85+
}
86+
87+
// TODO(stapel-to-buildah): Consider support for shell script instead of separate run commands to allow shared
88+
// usage of shell variables and functions between multiple commands.
89+
// Maybe there is no need of such function, instead provide options to select shell in the werf.yaml.
90+
// Is it important to provide compatibility between docker-server-based werf.yaml and buildah-based?
91+
if err := runtime.buildah.RunCommand(ctx, containerID, []string{"sh", "-c", cmd}, buildah.RunCommandOpts{
92+
CommonOpts: runtime.getBuildahCommonOpts(ctx, false),
93+
Mounts: mounts,
94+
}); err != nil {
7195
return "", fmt.Errorf("unable to run %q: %s", cmd, err)
7296
}
7397
}
7498

75-
// TODO(stapel-to-buildah): use buildah.Change to set labels
76-
fmt.Printf("[DEBUG] Setting labels %v for build container %q\n", opts.Labels, containerID)
99+
logboek.Context(ctx).Debug().LogF("Setting labels %v for build container %q\n", opts.Labels, containerID)
100+
if err := runtime.buildah.Config(ctx, containerID, buildah.ConfigOpts{
101+
CommonOpts: runtime.getBuildahCommonOpts(ctx, true),
102+
Labels: opts.Labels,
103+
}); err != nil {
104+
return "", fmt.Errorf("unable to set container %q config: %s", containerID, err)
105+
}
77106

78-
fmt.Printf("[DEBUG] Committing container %q\n", containerID)
79-
imgID, err := runtime.buildah.Commit(ctx, containerID, buildah.CommitOpts{})
107+
// TODO(stapel-to-buildah): Save container name as builtID. There is no need to commit an image here,
108+
// because buildah allows to commit and push directly container, which would happen later.
109+
logboek.Context(ctx).Debug().LogF("committing container %q\n", containerID)
110+
imgID, err := runtime.buildah.Commit(ctx, containerID, buildah.CommitOpts{CommonOpts: runtime.getBuildahCommonOpts(ctx, true)})
80111
if err != nil {
81112
return "", fmt.Errorf("unable to commit container %q: %s", containerID, err)
82113
}

0 commit comments

Comments
 (0)