This repository has been archived by the owner on Oct 2, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 691
/
image.bzl
522 lines (472 loc) · 17 KB
/
image.bzl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rule for building a Container image.
In addition to the base container_image rule, we expose its constituents
(attr, outputs, implementation) directly so that others may expose a
more specialized build leveraging the same implementation. The
expectation in such cases is that users will write something like:
load(
"@io_bazel_rules_docker//container:container.bzl",
_container="container",
)
def _impl(ctx):
...
return _container.image.implementation(ctx, ... kwarg overrides ...)
_foo_image = rule(
attrs = _container.image.attrs + {
# My attributes, or overrides of _container.image.attrs defaults.
...
},
executable = True,
outputs = _container.image.outputs,
implementation = _impl,
)
"""
load(
"//skylib:filetype.bzl",
container_filetype = "container",
deb_filetype = "deb",
tar_filetype = "tar",
)
load(
"@bazel_tools//tools/build_defs/hash:hash.bzl",
_hash_tools = "tools",
_sha256 = "sha256",
)
load(
"//skylib:zip.bzl",
_gzip = "gzip",
)
load(
"//skylib:label.bzl",
_string_to_label = "string_to_label",
)
load(
"//container:layers.bzl",
_assemble_image = "assemble",
_get_layers = "get_from_target",
_incr_load = "incremental_load",
_layer_tools = "tools",
)
load(
"//skylib:path.bzl",
"dirname",
"strip_prefix",
_canonicalize_path = "canonicalize",
_join_path = "join",
)
load(
"//skylib:serialize.bzl",
_serialize_dict = "dict_to_associative_list",
)
def magic_path(ctx, f):
# Right now the logic this uses is a bit crazy/buggy, so to support
# bug-for-bug compatibility in the foo_image rules, expose the logic.
# See also: https://github.com/bazelbuild/rules_docker/issues/106
# See also: https://groups.google.com/forum/#!topic/bazel-discuss/1lX3aiTZX3Y
if ctx.attr.data_path:
# If data_prefix is specified, then add files relative to that.
data_path = _join_path(
dirname(ctx.outputs.out.short_path),
_canonicalize_path(ctx.attr.data_path))
return strip_prefix(f.short_path, data_path)
else:
# Otherwise, files are added without a directory prefix at all.
return f.basename
def _build_layer(ctx, files=None, file_map=None, empty_files=None,
directory=None, symlinks=None, debs=None, tars=None):
"""Build the current layer for appending it the base layer.
Args:
files: File list, overrides ctx.files.files
directory: str, overrides ctx.attr.directory
symlinks: str Dict, overrides ctx.attr.symlinks
"""
layer = ctx.outputs.layer
build_layer = ctx.executable.build_layer
args = [
"--output=" + layer.path,
"--directory=" + directory,
"--mode=" + ctx.attr.mode,
]
args += ["--file=%s=%s" % (f.path, magic_path(ctx, f)) for f in files]
args += ["--file=%s=%s" % (f.path, path) for (path, f) in file_map.items()]
args += ["--empty_file=%s" % f for f in empty_files or []]
args += ["--tar=" + f.path for f in tars]
args += ["--deb=" + f.path for f in debs]
for k in symlinks:
if ':' in k:
fail("The source of a symlink cannot contain ':', got: %s" % k)
args += ["--link=%s:%s" % (k, symlinks[k])
for k in symlinks]
arg_file = ctx.new_file(ctx.label.name + ".layer.args")
ctx.file_action(arg_file, "\n".join(args))
ctx.action(
executable = build_layer,
arguments = ["--flagfile=" + arg_file.path],
inputs = files + file_map.values() + tars + debs + [arg_file],
outputs = [layer],
use_default_shell_env=True,
mnemonic="ImageLayer"
)
return layer, _sha256(ctx, layer)
def _zip_layer(ctx, layer):
zipped_layer = _gzip(ctx, layer)
return zipped_layer, _sha256(ctx, zipped_layer)
def _get_base_config(ctx):
if ctx.files.base:
# The base is the first layer in container_parts if provided.
l = _get_layers(ctx, ctx.attr.base, ctx.files.base)
return l.get("config")
def _image_config(ctx, layer_name, entrypoint=None, cmd=None, env=None):
"""Create the configuration for a new container image."""
config = ctx.new_file(ctx.label.name + ".config")
label_file_dict = _string_to_label(
ctx.files.label_files, ctx.attr.label_file_strings)
labels = dict()
for l in ctx.attr.labels:
fname = ctx.attr.labels[l]
if fname[0] == "@":
labels[l] = "@" + label_file_dict[fname[1:]].path
else:
labels[l] = fname
args = [
"--output=%s" % config.path,
] + [
"--entrypoint=%s" % x for x in entrypoint
] + [
"--command=%s" % x for x in cmd
] + [
"--ports=%s" % x for x in ctx.attr.ports
] + [
"--volumes=%s" % x for x in ctx.attr.volumes
]
_labels = _serialize_dict(labels)
if _labels:
args += ["--labels=%s" % x for x in _labels.split(',')]
_env = _serialize_dict(env)
if _env:
args += ["--env=%s" % x for x in _env.split(',')]
if ctx.attr.user:
args += ["--user=" + ctx.attr.user]
if ctx.attr.workdir:
args += ["--workdir=" + ctx.attr.workdir]
inputs = [layer_name]
args += ["--layer=@" + layer_name.path]
if ctx.attr.label_files:
inputs += ctx.files.label_files
base = _get_base_config(ctx)
if base:
args += ["--base=%s" % base.path]
inputs += [base]
if ctx.attr.stamp:
stamp_inputs = [ctx.info_file, ctx.version_file]
args += ["--stamp-info-file=%s" % f.path for f in stamp_inputs]
inputs += stamp_inputs
ctx.action(
executable = ctx.executable.create_image_config,
arguments = args,
inputs = inputs,
outputs = [config],
use_default_shell_env=True,
mnemonic = "ImageConfig")
return config, _sha256(ctx, config)
def _repository_name(ctx):
"""Compute the repository name for the current rule."""
if ctx.attr.legacy_repository_naming:
# Legacy behavior, off by default.
return _join_path(ctx.attr.repository, ctx.label.package.replace("/", "_"))
# Newer Docker clients support multi-level names, which are a part of
# the v2 registry specification.
return _join_path(ctx.attr.repository, ctx.label.package)
def _impl(ctx, files=None, file_map=None, empty_files=None, directory=None,
entrypoint=None, cmd=None, symlinks=None, output=None, env=None,
debs=None, tars=None):
"""Implementation for the container_image rule.
Args:
ctx: The bazel rule context
files: File list, overrides ctx.files.files
file_map: Dict[str, File], defaults to {}
empty_files: str list, overrides ctx.attr.empty_files
directory: str, overrides ctx.attr.directory
entrypoint: str List, overrides ctx.attr.entrypoint
cmd: str List, overrides ctx.attr.cmd
symlinks: str Dict, overrides ctx.attr.symlinks
output: File to use as output for script to load docker image
env: str Dict, overrides ctx.attr.env
debs: File list, overrides ctx.files.debs
tars: File list, overrides ctx.files.tars
"""
file_map = file_map or {}
files = files or ctx.files.files
empty_files = empty_files or ctx.attr.empty_files
directory = directory or ctx.attr.directory
entrypoint = entrypoint or ctx.attr.entrypoint
cmd = cmd or ctx.attr.cmd
symlinks = symlinks or ctx.attr.symlinks
output = output or ctx.outputs.executable
env = env or ctx.attr.env
debs = debs or ctx.files.debs
tars = tars or ctx.files.tars
# Generate the unzipped filesystem layer, and its sha256 (aka diff_id).
unzipped_layer, diff_id = _build_layer(ctx, files=files, file_map=file_map,
empty_files=empty_files,
directory=directory, symlinks=symlinks,
debs=debs, tars=tars)
# Generate the zipped filesystem layer, and its sha256 (aka blob sum)
zipped_layer, blob_sum = _zip_layer(ctx, unzipped_layer)
# Generate the new config using the attributes specified and the diff_id
config_file, config_digest = _image_config(
ctx, diff_id, entrypoint=entrypoint, cmd=cmd, env=env)
# Construct a temporary name based on the build target.
tag_name = _repository_name(ctx) + ":" + ctx.label.name
# Get the layers and shas from our base.
# These are ordered as they'd appear in the v2.2 config,
# so they grow at the end.
parent_parts = _get_layers(ctx, ctx.attr.base, ctx.files.base)
zipped_layers = parent_parts.get("zipped_layer", []) + [zipped_layer]
shas = parent_parts.get("blobsum", []) + [blob_sum]
unzipped_layers = parent_parts.get("unzipped_layer", []) + [unzipped_layer]
diff_ids = parent_parts.get("diff_id", []) + [diff_id]
# These are the constituent parts of the Container image, which each
# rule in the chain must preserve.
container_parts = {
# The path to the v2.2 configuration file.
"config": config_file,
"config_digest": config_digest,
# A list of paths to the layer .tar.gz files
"zipped_layer": zipped_layers,
# A list of paths to the layer digests.
"blobsum": shas,
# A list of paths to the layer .tar files
"unzipped_layer": unzipped_layers,
# A list of paths to the layer diff_ids.
"diff_id": diff_ids,
# At the root of the chain, we support deriving from a tarball
# base image.
"legacy": parent_parts.get("legacy"),
}
# We support incrementally loading or assembling this single image
# with a temporary name given by its build rule.
images = {
tag_name: container_parts
}
_incr_load(ctx, images, output,
run=not ctx.attr.legacy_run_behavior,
run_flags=ctx.attr.docker_run_flags)
_assemble_image(ctx, images, ctx.outputs.out)
runfiles = ctx.runfiles(
files = unzipped_layers + diff_ids + [config_file, config_digest] +
([container_parts["legacy"]] if container_parts["legacy"] else []))
return struct(runfiles = runfiles,
files = depset([ctx.outputs.layer]),
container_parts = container_parts)
_attrs = dict({
"base": attr.label(allow_files = container_filetype),
"data_path": attr.string(),
"directory": attr.string(default = "/"),
"tars": attr.label_list(allow_files = tar_filetype),
"debs": attr.label_list(allow_files = deb_filetype),
"files": attr.label_list(allow_files = True),
"legacy_repository_naming": attr.bool(default = False),
# TODO(mattmoor): Default this to False.
"legacy_run_behavior": attr.bool(default = True),
# Run the container using host networking, so that the service is
# available to the developer without having to poke around with
# docker inspect.
"docker_run_flags": attr.string(
default = "-i --rm --network=host",
),
"mode": attr.string(default = "0555"), # 0555 == a+rx
"symlinks": attr.string_dict(),
"entrypoint": attr.string_list(),
"cmd": attr.string_list(),
"user": attr.string(),
"env": attr.string_dict(),
"labels": attr.string_dict(),
"ports": attr.string_list(), # Skylark doesn't support int_list...
"volumes": attr.string_list(),
"workdir": attr.string(),
"repository": attr.string(default = "bazel"),
"stamp": attr.bool(default = False),
# Implicit/Undocumented dependencies.
"label_files": attr.label_list(
allow_files = True,
),
"label_file_strings": attr.string_list(),
"empty_files": attr.string_list(),
"build_layer": attr.label(
default = Label("//container:build_tar"),
cfg = "host",
executable = True,
allow_files = True,
),
"create_image_config": attr.label(
default = Label("//container:create_image_config"),
cfg = "host",
executable = True,
allow_files = True,
),
}.items() + _hash_tools.items() + _layer_tools.items())
_outputs = {
"out": "%{name}.tar",
"layer": "%{name}-layer.tar",
}
image = struct(
attrs = _attrs,
outputs = _outputs,
implementation = _impl,
)
container_image_ = rule(
attrs = _attrs,
executable = True,
outputs = _outputs,
implementation = _impl,
)
# This validates the two forms of value accepted by
# ENTRYPOINT and CMD, turning them into a canonical
# python list form.
#
# The Dockerfile construct:
# ENTRYPOINT "/foo"
# Results in:
# "Entrypoint": [
# "/bin/sh",
# "-c",
# "\"/foo\""
# ],
# Whereas:
# ENTRYPOINT ["/foo", "a"]
# Results in:
# "Entrypoint": [
# "/foo",
# "a"
# ],
# NOTE: prefacing a command with 'exec' just ends up with the former
def _validate_command(name, argument):
if type(argument) == type(""):
return ["/bin/sh", "-c", argument]
elif type(argument) == type([]):
return argument
elif argument:
fail("The %s attribute must be a string or list, if specified." % name)
else:
return None
# Produces a new container image tarball compatible with 'docker load', which
# is a single additional layer atop 'base'. The goal is to have relatively
# complete support for building container image, from the Dockerfile spec.
#
# For more information see the 'Config' section of the image specification:
# https://github.com/opencontainers/image-spec/blob/v0.2.0/serialization.md
#
# Only 'name' is required. All other fields have sane defaults.
#
# container_image(
# name="...",
# visibility="...",
#
# # The base layers on top of which to overlay this layer,
# # equivalent to FROM.
# base="//another/build:rule",
#
# # The base directory of the files, defaulted to
# # the package of the input.
# # All files structure relatively to that path will be preserved.
# # A leading '/' mean the workspace root and this path is relative
# # to the current package by default.
# data_path="...",
#
# # The directory in which to expand the specified files,
# # defaulting to '/'.
# # Only makes sense accompanying one of files/tars/debs.
# directory="...",
#
# # The set of archives to expand, or packages to install
# # within the chroot of this layer
# files=[...],
# tars=[...],
# debs=[...],
#
# # The set of symlinks to create within a given layer.
# symlinks = {
# "/path/to/link": "/path/to/target",
# ...
# },
#
# # https://docs.docker.com/engine/reference/builder/#entrypoint
# entrypoint="...", or
# entrypoint=[...], -- exec form
#
# # https://docs.docker.com/engine/reference/builder/#cmd
# cmd="...", or
# cmd=[...], -- exec form
#
# # https://docs.docker.com/engine/reference/builder/#expose
# ports=[...],
#
# # https://docs.docker.com/engine/reference/builder/#user
# # NOTE: the normal directive affects subsequent RUN, CMD,
# # and ENTRYPOINT
# user="...",
#
# # https://docs.docker.com/engine/reference/builder/#volume
# volumes=[...],
#
# # https://docs.docker.com/engine/reference/builder/#workdir
# # NOTE: the normal directive affects subsequent RUN, CMD,
# # ENTRYPOINT, ADD, and COPY, but this attribute only affects
# # the entry point.
# workdir="...",
#
# # https://docs.docker.com/engine/reference/builder/#env
# env = {
# "var1": "val1",
# "var2": "val2",
# ...
# "varN": "valN",
# },
# )
def container_image(**kwargs):
"""Package a docker image.
This rule generates a sequence of genrules the last of which is named 'name',
so the dependency graph works out properly. The output of this rule is a
tarball compatible with 'docker save/load' with the structure:
{layer-name}:
layer.tar
VERSION
json
{image-config-sha256}.json
...
manifest.json
repositories
top # an implementation detail of our rules, not consumed by Docker.
This rule appends a single new layer to the tarball of this form provided
via the 'base' parameter.
The images produced by this rule are always named 'bazel/tmp:latest' when
loaded (an internal detail). The expectation is that the images produced
by these rules will be uploaded using the 'docker_push' rule below.
Args:
**kwargs: See above.
"""
if "cmd" in kwargs:
kwargs["cmd"] = _validate_command("cmd", kwargs["cmd"])
for reserved in ["label_files", "label_file_strings"]:
if reserved in kwargs:
fail("reserved for internal use by container_image macro", attr=reserved)
if "labels" in kwargs:
files = sorted({v[1:]: None for v in kwargs["labels"].values() if v[0] == "@"}.keys())
kwargs["label_files"] = files
kwargs["label_file_strings"] = files
if "entrypoint" in kwargs:
kwargs["entrypoint"] = _validate_command("entrypoint", kwargs["entrypoint"])
container_image_(**kwargs)