This repository has been archived by the owner on Oct 13, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 26
/
release_gen_payload.py
1714 lines (1435 loc) · 89 KB
/
release_gen_payload.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import asyncio
from datetime import datetime
import hashlib
import traceback
import sys
import os
import json
from pathlib import Path
from typing import List, Optional, Tuple, Dict, NamedTuple, Iterable, Set, Any, Callable
from unittest.mock import MagicMock
import aiofiles
import click
import yaml
from doozerlib import rhcos
import openshift as oc
from doozerlib.rpm_utils import parse_nvr
from doozerlib.brew import KojiWrapperMetaReturn
from doozerlib.rhcos import RHCOSBuildInspector, RhcosMissingContainerException
from doozerlib.cli import cli, pass_runtime
from doozerlib.image import ImageMetadata, BrewBuildImageInspector, ArchiveImageInspector
from doozerlib.assembly_inspector import AssemblyInspector
from doozerlib.runtime import Runtime
from doozerlib.util import red_print, go_suffix_for_arch, brew_arch_for_go_arch, isolate_nightly_name_components, \
convert_remote_git_to_https, go_arch_for_brew_arch
from doozerlib.assembly import AssemblyTypes, assembly_basis, AssemblyIssue, AssemblyIssueCode
from doozerlib import exectools
from doozerlib.model import Model
from doozerlib.exceptions import DoozerFatalError
from doozerlib.util import find_manifest_list_sha
@cli.command("release:gen-payload", short_help="Mirror release images to quay and release-controller")
@click.option("--is-name", metavar="NAME", required=False,
help="ImageStream .metadata.name value. For example '4.2-art-latest'")
@click.option("--is-namespace", metavar="NAMESPACE", required=False,
help="ImageStream .metadata.namespace value. For example 'ocp'")
@click.option("--organization", metavar="ORGANIZATION", required=False, default="openshift-release-dev",
help="Quay ORGANIZATION to mirror into.\ndefault=openshift-release-dev")
@click.option("--repository", metavar="REPO", required=False, default="ocp-v4.0-art-dev",
help="Quay REPOSITORY in ORGANIZATION to mirror into.\ndefault=ocp-v4.0-art-dev")
@click.option("--release-repository", metavar="REPO", required=False, default="ocp-release-nightly",
help="Quay REPOSITORY in ORGANIZATION to push release payloads (used for multi-arch)\n"
"default=ocp-release-nightly")
@click.option("--output-dir", metavar="DIR", required=False, default=".",
help="Directory into which the mirroring/imagestream artifacts should be written")
@click.option("--skip-gc-tagging", default=False, is_flag=True,
help="By default, for a named assembly, images will be tagged to prevent garbage collection")
@click.option("--exclude-arch", metavar="ARCH", required=False, multiple=True,
help="Architecture (brew nomenclature) to exclude from payload generation")
@click.option("--emergency-ignore-issues", default=False, is_flag=True,
help="If you must get this command to permit an assembly despite issues. Do not use without approval.")
@click.option("--apply", default=False, is_flag=True,
help="Perform mirroring and imagestream updates.")
@click.option("--apply-multi-arch", default=False, is_flag=True,
help="Also create a release payload for multi-arch/heterogeneous clusters.")
@click.option("--moist-run", default=False, is_flag=True,
help="Mirror and determine tags but do not actually update imagestreams.")
@pass_runtime
def release_gen_payload(runtime: Runtime, is_name: str, is_namespace: str, organization: str,
repository: str, release_repository: str, output_dir: str, exclude_arch: Tuple[str, ...],
skip_gc_tagging: bool, emergency_ignore_issues: bool,
apply: bool, apply_multi_arch: bool, moist_run: bool):
"""
Computes a set of imagestream tags which can be assembled into an OpenShift release for this
assembly. The tags may not be valid unless --apply or --moist-run triggers mirroring.
Applying the change will cause the OSBS images to be mirrored into the OpenShift release
repositories on quay.
Applying will also directly update the imagestreams relevant to assembly (e.g. updating
4.9-art-latest for 4.9's stream assembly).
You may provide the namespace and base name for the image streams, or defaults will be used.
The ORGANIZATION and REPOSITORY options are combined into ORGANIZATION/REPOSITORY when preparing for
mirroring.
Generate files for mirroring from registry-proxy (OSBS storage) to our quay registry:
\b
$ doozer --group=openshift-4.12 release:gen-payload \\
--is-name=4.12-art-latest
Note that if you use -i to include specific images, you should also include openshift-enterprise-pod
to supply the 'pod' tag. The 'pod' image is used automatically as a payload stand-in for images that
do not build on all arches.
## Validation ##
Additionally we want to check that the following conditions are true for each imagestream being
updated:
* For all architectures built, RHCOS builds must have matching versions of any unshipped RPM they
include (per-entry os metadata - the set of RPMs may differ between arches, but versions should
not).
* Any RPMs present in images (including RHCOS) from unshipped RPM builds included in one of our
candidate tags must exactly version-match the latest RPM builds in those candidate tags (ONLY; we
never flag what we don't directly ship.)
These checks (and likely more in the future) should run and any failures should
be listed in brief via a "release.openshift.io/inconsistency" annotation on the
relevant image istag (these are publicly visible; ref. https://bit.ly/37cseC1)
and in more detail in state.yaml. The release-controller, per ART-2195, will
read and propagate/expose this annotation in its display of the release image.
"""
runtime.initialize(mode="both", clone_distgits=False, clone_source=False, prevent_cloning=True)
pipeline = GenPayloadCli(
runtime,
is_name or assembly_imagestream_base_name(runtime),
is_namespace or default_imagestream_namespace_base_name(),
organization, repository, release_repository,
output_dir,
exclude_arch,
skip_gc_tagging, emergency_ignore_issues,
apply, apply_multi_arch, moist_run
)
asyncio.get_event_loop().run_until_complete(pipeline.run())
def default_imagestream_base_name(version: str) -> str:
return f"{version}-art-latest"
def assembly_imagestream_base_name(runtime: Runtime) -> str:
version = runtime.get_minor_version()
if runtime.assembly == 'stream' and runtime.assembly_type is AssemblyTypes.STREAM:
return default_imagestream_base_name(version)
else:
return f"{version}-art-assembly-{runtime.assembly}"
def default_imagestream_namespace_base_name() -> str:
return "ocp"
def payload_imagestream_namespace_and_name(base_namespace: str, base_imagestream_name: str,
brew_arch: str, private: bool) -> Tuple[str, str]:
"""
:return: Returns the imagestream name and namespace to which images
for the specified CPU arch and privacy mode should be synced.
"""
arch_suffix = go_suffix_for_arch(brew_arch)
priv_suffix = "-priv" if private else ""
namespace = f"{base_namespace}{arch_suffix}{priv_suffix}"
name = f"{base_imagestream_name}{arch_suffix}{priv_suffix}"
return namespace, name
async def modify_and_replace_api_object(api_obj: oc.APIObject, modifier_func: Callable[[oc.APIObject], Any],
backup_file_path: Path, dry_run: bool):
"""
Receives an APIObject, archives the current state of that object, runs a modifying method on it,
archives the new state of the object, and then tries to replace the object on the
cluster API server.
:param api_obj: The openshift client APIObject to work with.
:param modifier_func: A function that will accept the api_obj as its first parameter and make
any desired change to that object.
:param backup_file_path: A Path object that can be used to archive pre & post modification
states of the object before triggering the update.
:param dry_run: Write archive files but do not actually update the imagestream.
"""
filepath = backup_file_path.joinpath(
f"replacing-{api_obj.kind()}.{api_obj.namespace()}.{api_obj.name()}.before-modify.json")
async with aiofiles.open(filepath, mode='w+') as backup_file:
await backup_file.write(api_obj.as_json(indent=4))
modifier_func(api_obj)
api_obj_model = api_obj.model
# Before replacing api objects on the server, make sure to remove aspects that can
# confuse subsequent CLI interactions with the object.
if api_obj_model.metadata.annotations["kubectl.kubernetes.io/last-applied-configuration"]:
api_obj_model.metadata.annotations.pop("kubectl.kubernetes.io/last-applied-configuration")
# If server-side metadata is being passed in, remove it before we try to replace the object.
if api_obj_model.metadata:
for md in ["creationTimestamp", "generation", "uid"]:
api_obj_model.metadata.pop(md)
api_obj_model.pop("status")
filepath = backup_file_path.joinpath(
f"replacing-{api_obj.kind()}.{api_obj.namespace()}.{api_obj.name()}.after-modify.json")
async with aiofiles.open(filepath, mode="w+") as backup_file:
await backup_file.write(api_obj.as_json(indent=4))
if not dry_run:
api_obj.replace()
class PayloadEntry(NamedTuple):
# Append any issues for the assembly
issues: List[AssemblyIssue]
# The final quay.io destination for the single-arch pullspec
dest_pullspec: str
# The final quay.io destination for the manifest list the single arch image
# might belong to. Most images built in brew will have been part of a
# manifest list, but not all release components (e.g. RHCOS)
# will be. We reuse manifest lists where possible for heterogeneous
# release payloads to save time vs building them ourselves.
dest_manifest_list_pullspec: str = None
# If the entry is for an image in this doozer group, these values will be set.
# The image metadata which associated with the payload
image_meta: Optional[ImageMetadata] = None
# An inspector associated with the overall brew build (manifest list) found for the release
build_inspector: Optional[BrewBuildImageInspector] = None
# The brew build archive (arch specific image) that should be tagged into the payload
archive_inspector: Optional[ArchiveImageInspector] = None
# If the entry is for RHCOS, this value will be set
rhcos_build: Optional[RHCOSBuildInspector] = None
def exchange_pullspec_tag_for_shasum(tag_pullspec: str, digest: str) -> str:
"""
Create pullspec with the repo from the (tag-specifying) pullspec plus the given digest.
"""
# extract repo e.g. quay.io/openshift-release-dev/ocp-v4.0-art-dev:sha256-b056..84b-ml
# -> quay.io/openshift-release-dev/ocp-v4.0-art-dev
output_pullspec: str = tag_pullspec.rsplit(":", 1)[0]
# return a sha-based pullspec
return output_pullspec + "@" + digest
class GenPayloadCli:
"""
An object to encapsulate the CLI inputs, methods, and state for the release:gen-payload command.
"""
def __init__(self,
# leave these all optional to make testing easier
runtime: Runtime = None, is_name: str = None, is_namespace: str = None, organization: str = None,
repository: str = None, release_repository: str = None, output_dir: str = None,
exclude_arch: Tuple[str] = None, skip_gc_tagging: bool = False, emergency_ignore_issues: bool = False,
apply: bool = False, apply_multi_arch: bool = False, moist_run: bool = False):
self.runtime = runtime
self.logger = runtime.logger if runtime else MagicMock() # in tests, blackhole logs by default
# release-controller IS to update (modified per arch, privacy)
self.base_imagestream = (is_namespace, is_name)
# where in the registry to publish/reference component images and release images
self.component_repo = (organization, repository)
self.release_repo = (organization, release_repository)
if output_dir: # where to output yaml report and backed up IS
self.output_path = Path(output_dir).absolute()
self.output_path.mkdir(parents=True, exist_ok=True)
self.exclude_arch = exclude_arch
self.skip_gc_tagging = skip_gc_tagging # do not re-tag custom builds in brew
self.emergency_ignore_issues = emergency_ignore_issues # permit any inconsistency
self.apply = apply # actually update the IS
self.apply_multi_arch = apply_multi_arch # update the "multi" IS as well
self.moist_run = moist_run # mirror the images but do not update the IS
# store generated payload entries: {arch -> dict of payload entries}
self.payload_entries_for_arch: Dict[str, Dict[str, PayloadEntry]] = {}
# for gathering issues that are found while evaluating the payload:
self.assembly_issues: List[AssemblyIssue] = list()
# private releases (only nightlies) can reference private component builds
self.privacy_modes: List[bool] = [False]
# do we proceed with this payload after weighing issues against permits?
self.payload_permitted = False
async def run(self):
"""
Main entry point once instantiated with CLI inputs.
"""
self.validate_parameters()
rt = self.runtime
self.logger.info(f"Collecting latest information associated with the assembly: {rt.assembly}")
assembly_inspector = AssemblyInspector(rt, rt.build_retrying_koji_client())
self.payload_entries_for_arch = self.generate_payload_entries(assembly_inspector)
assembly_report: Dict = await self.generate_assembly_report(assembly_inspector)
self.logger.info('\n%s', yaml.dump(assembly_report, default_flow_style=False, indent=2))
with self.output_path.joinpath("assembly-report.yaml").open(mode="w") as report_file:
yaml.dump(assembly_report, stream=report_file, default_flow_style=False, indent=2)
self.assess_assembly_viability()
await self.sync_payloads() # even when not permitted, produce what we _would have_ synced
if self.payload_permitted:
exit(0)
red_print("DO NOT PROCEED WITH THIS ASSEMBLY PAYLOAD -- not all detected issues are permitted.",
file=sys.stderr)
exit(1)
def validate_parameters(self):
"""
Sanity check the assembly requested and adjust state accordingly.
"""
rt = self.runtime
if rt.assembly not in {None, "stream", "test"} and rt.assembly not in rt.releases_config.releases:
raise DoozerFatalError(f"Assembly '{rt.assembly}' is not explicitly defined.")
if rt.assembly and rt.assembly != "stream" and "art-latest" in self.base_imagestream[1]:
raise ValueError('"art-latest" imagestreams should only be used for the "stream" assembly')
if rt.assembly_type is AssemblyTypes.STREAM:
# Only nightlies have the concept of private and public payloads
self.privacy_modes = [False, True]
# check that we can produce a full multi nightly if requested
if self.apply_multi_arch and (rt.images or rt.exclude or self.exclude_arch):
raise DoozerFatalError(
"Cannot create a multi nightly without including the full set of images. "
"Either include all images/arches or omit --apply-multi-arch")
async def generate_assembly_report(self, assembly_inspector: AssemblyInspector) -> Dict:
"""
Generate a status report of the search for inconsistencies across all payloads generated.
"""
rt = self.runtime
report = dict(
non_release_images=[image_meta.distgit_key for image_meta in rt.get_non_release_image_metas()],
release_images=[image_meta.distgit_key for image_meta in rt.get_for_release_image_metas()],
missing_image_builds=[
dgk for (dgk, ii) in assembly_inspector.get_group_release_images().items()
if ii is None
], # A list of metas where the assembly did not find a build
)
report["viable"], report["assembly_issues"] = await self.generate_assembly_issues_report(assembly_inspector)
self.payload_permitted = report["viable"]
return report
async def generate_assembly_issues_report(self, assembly_inspector: AssemblyInspector) -> (bool, Dict[str, Dict]):
"""
Populate self.assembly_issues and payload entries with inconsistencies found.
"""
rt = self.runtime
self.logger.info("Checking assembly content for inconsistencies.")
self.detect_mismatched_siblings(assembly_inspector)
assembly_build_ids: Set[int] = self.collect_assembly_build_ids(assembly_inspector)
with rt.shared_build_status_detector() as bsd:
# Use the list of builds associated with the group/assembly to warm up BSD caches
bsd.populate_archive_lists(assembly_build_ids)
bsd.find_shipped_builds(assembly_build_ids)
# check that RPMs belonging to this assembly/group are consistent with the assembly definition.
for rpm_meta in rt.rpm_metas():
self.assembly_issues.extend(assembly_inspector.check_group_rpm_package_consistency(rpm_meta))
if rt.assembly_type is AssemblyTypes.STREAM:
self.detect_non_latest_rpms(assembly_inspector)
# check that images for this assembly/group are consistent with the assembly definition.
self.detect_inconsistent_images(assembly_inspector)
# update issues found for payload images and check RPM consistency
self.detect_extend_payload_entry_issues(assembly_inspector)
# If the assembly claims to have reference nightlies, assert that our payload matches them exactly.
self.assembly_issues.extend(await PayloadGenerator.check_nightlies_consistency(assembly_inspector))
return self.summarize_issue_permits(assembly_inspector)
def detect_mismatched_siblings(self, assembly_inspector: AssemblyInspector):
"""
Mismatched siblings are built from the same repo but at a different commit
"""
self.logger.debug("Checking for mismatched sibling sources...")
group_images: List = assembly_inspector.get_group_release_images().values()
for mismatched, sibling in PayloadGenerator.find_mismatched_siblings(group_images):
self.assembly_issues.append(AssemblyIssue(
f"{mismatched.get_nvr()} was built from a different upstream "
f"source commit ({mismatched.get_source_git_commit()[:7]}) "
f"than one of its siblings {sibling.get_nvr()} "
f"from {sibling.get_source_git_commit()[:7]}",
component=mismatched.get_image_meta().distgit_key,
code=AssemblyIssueCode.MISMATCHED_SIBLINGS
))
@staticmethod
def generate_id_tags_list(assembly_inspector: AssemblyInspector) -> List[Tuple[int, str]]:
"""
Construct a list of builds and desired tags (we use "hotfix" tags for this)
"""
return [ # (build_id, desired non-GC tag)
(bbii.get_brew_build_id(), bbii.get_image_meta().hotfix_brew_tag())
for bbii in assembly_inspector.get_group_release_images().values()
if bbii
]
def collect_assembly_build_ids(self, assembly_inspector: AssemblyInspector) -> Set[int]:
"""
Collect a list of brew builds (images and RPMs) included in the assembly.
To prevent garbage collection for custom assemblies (which don't have errata tool releases
that normally prevent), we must tag these builds explicitly. We want to ensure these builds
persist so that later we can build custom releases based on previous custom releases. If we
lose images and builds for custom releases in brew due to GC, we will not be able to
construct derivative release payloads.
We do not, however, want to preserve every build destined for a nightly (which is why we do
not tag for "stream" assemblies).
"""
self.logger.debug("Finding all builds in the assembly...")
id_tags: List[Tuple[int, str]] = self.generate_id_tags_list(assembly_inspector)
# For each RHEL version targeted by any of our RPMs, find RPM
# builds with respect to the group/assembly. (Even single RPMs
# can build for multiple versions of RHEL.)
rhel_version_seen: Set[int] = set() # set of rhel versions we have processed
for rpm_meta in self.runtime.rpm_metas():
for el_ver in rpm_meta.determine_rhel_targets():
if el_ver not in rhel_version_seen:
# not processed yet, query the assembly for this rhel version now.
rhel_version_seen.add(el_ver)
hotfix_tag = self.runtime.get_default_hotfix_brew_tag(el_target=el_ver)
id_tags.extend([
(rpm_build_dict["id"], hotfix_tag)
for rpm_build_dict in assembly_inspector.get_group_rpm_build_dicts(el_ver=el_ver).values()
if rpm_build_dict
])
# Tag builds for custom assemblies unless we have been told not to from the command line.
if self.runtime.assembly_type != AssemblyTypes.STREAM and not self.skip_gc_tagging:
self.tag_missing_gc_tags(id_tags)
# we should now in good conscience be able to put these in a payload
return set(build_id for build_id, _ in id_tags)
def tag_missing_gc_tags(self, id_tags: Tuple[int, str]):
"""
Ensure that each build is tagged in brew with its GC-preventing tag
"""
self.logger.debug("ensuring assembly contents are tagged to avoid brew GC...")
# first, find the tags that each build currently has.
# construct a list of builds and brew listTags tasks (which run as a multicall at loop/context exit)
id_tag_tasks: List[Tuple[int, str, KojiWrapperMetaReturn]] = list()
# [(build_id, non-GC tag, multicall task to list tags)]
with self.runtime.pooled_koji_client_session() as pooled_kcs:
with pooled_kcs.multicall(strict=True) as m:
for build_id, desired_tag in id_tags:
id_tag_tasks.append((build_id, desired_tag, m.listTags(build=build_id)))
# Tasks should now contain tag list information for all builds associated with this assembly.
# Now see if the hotfix tag we want is already present, and if not, add it.
# Note: shared_koji_client_session authenticates by default (needed for tagging)
with self.runtime.shared_koji_client_session() as koji_client:
with koji_client.multicall() as m:
for build_id, desired_tag, list_tag_task in id_tag_tasks:
current_tags = [tag_entry["name"] for tag_entry in list_tag_task.result]
if desired_tag not in current_tags:
# The hotfix tag is missing, so apply it.
self.logger.info(
'Adding tag %s to build: %s to prevent garbage collection.', desired_tag, build_id)
m.tagBuild(desired_tag, build_id)
def detect_non_latest_rpms(self, assembly_inspector: AssemblyInspector):
"""
If this is a stream assembly, images which are not using the latest rpm builds should not reach
the release controller. Other assemblies may be deliberately constructed from non-latest.
"""
self.logger.debug("detecting images with group RPMs installed that are not the latest builds...")
for dgk, build_inspector in assembly_inspector.get_group_release_images().items():
if build_inspector:
for installed_nvr, newest_nvr in build_inspector.find_non_latest_rpms():
# This could indicate an issue with scan-sources or that an image is no longer successfully building
# It could also mean that images are pinning content, which may be expected, so allow permits.
self.assembly_issues.append(AssemblyIssue(
f"Found outdated RPM ({installed_nvr}) installed in {build_inspector.get_nvr()} "
f"when {newest_nvr} was available",
component=dgk, code=AssemblyIssueCode.OUTDATED_RPMS_IN_STREAM_BUILD
))
def detect_inconsistent_images(self, assembly_inspector: AssemblyInspector):
"""
Create issues for image builds selected by this assembly/group
that are inconsistent with the assembly definition.
"""
self.logger.debug("detecting images inconsistent with the assembly definition ...")
for _, bbii in assembly_inspector.get_group_release_images().items():
if bbii:
self.assembly_issues.extend(assembly_inspector.check_group_image_consistency(bbii))
def full_component_repo(self) -> str:
"""
Full pullspec for the component repo
"""
org, repo = self.component_repo
return f"quay.io/{org}/{repo}"
def generate_payload_entries(self, assembly_inspector: AssemblyInspector) -> Dict[str, Dict[str, PayloadEntry]]:
"""
Generate single-arch PayloadEntries for the assembly payload.
Payload generation may uncover assembly issues, which are added to the assembly_issues list.
Returns a dict of dicts, keyed by architecture, then by payload component name.
"""
entries_for_arch: Dict[str, Dict[str, PayloadEntry]] = dict() # arch => img tag => PayloadEntry
for arch in self.runtime.arches:
if arch in self.exclude_arch:
self.logger.info(f"Excluding payload files architecture: {arch}")
continue
# No adjustment for private or public; the assembly's canonical payload content is the same.
entries: Dict[str, PayloadEntry] # Key of this dict is release payload tag name
issues: List[AssemblyIssue]
entries, issues = PayloadGenerator.find_payload_entries(assembly_inspector, arch,
self.full_component_repo())
entries_for_arch[arch] = entries
self.assembly_issues.extend(issues)
return entries_for_arch
def detect_extend_payload_entry_issues(self, assembly_inspector: AssemblyInspector):
"""
Associate assembly issues with related payload entries if any.
Also look for additional issues related to RHCOS container(s) across
the single-arch payloads.
"""
primary_container_name = rhcos.get_primary_container_name(self.runtime)
cross_payload_requirements = self.runtime.group_config.rhcos.require_consistency
if not cross_payload_requirements:
self.runtime.logger.debug("No cross-payload consistency requirements defined in group.yml")
# Structure to record rhcos builds we use so that they can be analyzed for inconsistencies
targeted_rhcos_builds: Dict[bool, List[RHCOSBuildInspector]] = \
{False: [], True: []} # privacy mode: list of BuildInspector
for arch, entries in self.payload_entries_for_arch.items():
for tag, payload_entry in entries.items():
if payload_entry.image_meta:
# Record the issues previously found for this image in corresponding payload_entry
payload_entry.issues.extend(
ai for ai in self.assembly_issues
if ai.component == payload_entry.image_meta.distgit_key
)
elif payload_entry.rhcos_build:
if tag != primary_container_name:
continue # RHCOS is one build, only analyze once (for primary container)
self.detect_rhcos_issues(payload_entry, assembly_inspector)
# Record the build to enable later consistency checks between all RHCOS builds.
# There are presently no private RHCOS builds, so add only to private_mode=False.
targeted_rhcos_builds[False].append(payload_entry.rhcos_build)
if cross_payload_requirements:
self.assembly_issues.extend(
PayloadGenerator.find_rhcos_payload_rpm_inconsistencies(
payload_entry.rhcos_build,
assembly_inspector.get_group_release_images(),
cross_payload_requirements,
)
)
else:
raise DoozerFatalError(f"Unsupported PayloadEntry: {payload_entry}")
self.detect_rhcos_inconsistent_rpms(targeted_rhcos_builds) # across all arches
def detect_rhcos_issues(self, payload_entry, assembly_inspector: AssemblyInspector):
"""
Associate relevant assembly issues with an RHCOS PayloadEntry.
Use assembly inspector to detect assembly issues like RPMs installed in
the PayloadEntry that are not what the assembly specifies.
"""
# record issues found in assembly list and per payload entry
self.assembly_issues.extend(assembly_inspector.check_rhcos_issues(payload_entry.rhcos_build))
payload_entry.issues.extend(ai for ai in self.assembly_issues if ai.component == "rhcos")
if self.runtime.assembly_type is AssemblyTypes.STREAM:
# For stream alone, we want to enforce that the very latest RPMs are installed.
for installed_nvr, newest_nvr in payload_entry.rhcos_build.find_non_latest_rpms():
self.assembly_issues.append(AssemblyIssue(
f"Found outdated RPM ({installed_nvr}) "
f"installed in {payload_entry.rhcos_build} "
f"when {newest_nvr} is available",
component="rhcos", code=AssemblyIssueCode.OUTDATED_RPMS_IN_STREAM_BUILD
))
def detect_rhcos_inconsistent_rpms(self, targeted_rhcos_builds: Dict[bool, List[RHCOSBuildInspector]]):
"""
Generate assembly issue(s) if RHCOS builds do not contain consistent RPMs across arches.
"""
for privacy_mode in self.privacy_modes: # only for relevant modes
rhcos_builds = targeted_rhcos_builds[privacy_mode]
rhcos_inconsistencies: Dict[str, List[str]] = \
PayloadGenerator.find_rhcos_build_rpm_inconsistencies(rhcos_builds)
if rhcos_inconsistencies:
self.assembly_issues.append(AssemblyIssue(
f"Found RHCOS inconsistencies in builds {rhcos_builds} "
f"(private={privacy_mode}): {rhcos_inconsistencies}",
component="rhcos", code=AssemblyIssueCode.INCONSISTENT_RHCOS_RPMS
))
def summarize_issue_permits(self, assembly_inspector: AssemblyInspector) -> (bool, Dict[str, Dict]):
"""
Check whether the issues found are permitted,
and collect issues per component into a serializable report
"""
payload_permitted = True
assembly_issues_report: Dict[str, List[Dict]] = dict()
for ai in self.assembly_issues:
permitted = assembly_inspector.does_permit(ai)
payload_permitted &= permitted # If anything not permitted, payload not permitted
assembly_issues_report.setdefault(ai.component, []).append(dict(
code=ai.code.name,
msg=ai.msg,
permitted=permitted
))
return payload_permitted, assembly_issues_report
def assess_assembly_viability(self):
"""
Adjust assembly viability if needed per emergency_ignore_issues.
"""
if not self.payload_permitted:
if self.emergency_ignore_issues:
self.logger.warning("Permitting issues only because --emergency-ignore-issues was specified")
self.payload_permitted = True
else:
self.logger.warning("Assembly is not permitted. Will not apply changes to imagestreams.")
self.apply = False
self.apply_multi_arch = False
async def sync_payloads(self):
"""
Use the payload entries that have been generated across the arches to mirror the images
out to quay and create the imagestream definitions that will update the release-controller.
Also re-organize the entries into a hierarchy suitable for creating the heterogeneous
"multi" payloads, and create those.
"""
# When we are building a heterogeneous / multiarch payload, we need to
# keep track of images that are going into each single-arch
# imagestream. Maps [is_private] -> [tag_name] -> [arch] -> PayloadEntry
multi_specs: Dict[bool, Dict[str, Dict[str, PayloadEntry]]] = {
True: dict(),
False: dict()
}
# Ensure that all payload images have been mirrored before updating
# the imagestream. Otherwise, the imagestream will fail to import the
# image.
tasks = []
for arch, payload_entries in self.payload_entries_for_arch.items():
tasks.append(self.mirror_payload_content(arch, payload_entries))
await asyncio.gather(*tasks)
# Update the imagestreams being monitored by the release controller.
tasks = []
for arch, payload_entries in self.payload_entries_for_arch.items():
for private_mode in self.privacy_modes:
self.logger.info(f"Building payload files for architecture: {arch}; private: {private_mode}")
tasks.append(self.generate_specific_payload_imagestreams(arch, private_mode, payload_entries, multi_specs))
await asyncio.gather(*tasks)
if self.apply_multi_arch:
if self.runtime.group_config.multi_arch.enabled:
await self.sync_heterogeneous_payloads(multi_specs)
else:
self.logger.info("--apply-multi-arch is enabled but the group config / assembly does "
"not have group.multi_arch.enabled==true")
@exectools.limit_concurrency(500)
async def mirror_payload_content(self, arch: str, payload_entries: Dict[str, PayloadEntry]):
"""
Ensure an arch's payload entries are synced out for the public to access.
"""
# Prevents writing the same destination twice (not supported by oc if in the same mirroring file):
mirror_src_for_dest: Dict[str, str] = dict()
for payload_entry in payload_entries.values():
if not payload_entry.archive_inspector:
continue # Nothing to mirror (e.g. RHCOS)
mirror_src_for_dest[payload_entry.dest_pullspec] = payload_entry.archive_inspector.get_archive_pullspec()
if payload_entry.dest_manifest_list_pullspec:
# For heterogeneous release payloads, if a component builds for all arches
# (without using -alt images), we can use the manifest list for the images directly from OSBS.
# This saves a significant amount of time compared to building the manifest list again.
mirror_src_for_dest[payload_entry.dest_manifest_list_pullspec] = \
payload_entry.build_inspector.get_build_pullspec()
# Save the default SRC=DEST input to a file for syncing by 'oc image mirror'. Why is
# there no '-priv'? The true images for the assembly are what we are syncing -
# it is what we update in the imagestreams that defines whether the image will be
# part of a public vs private release.
src_dest_path = self.output_path.joinpath(f"src_dest.{arch}")
async with aiofiles.open(src_dest_path, mode="w+", encoding="utf-8") as out_file:
for dest_pullspec, src_pullspec in mirror_src_for_dest.items():
await out_file.write(f"{src_pullspec}={dest_pullspec}\n")
if self.apply or self.apply_multi_arch:
self.logger.info(f"Mirroring images from {str(src_dest_path)}")
try:
await asyncio.wait_for(exectools.cmd_assert_async(
f"oc image mirror --keep-manifest-list --filename={str(src_dest_path)}", retries=3), timeout=1800)
except asyncio.TimeoutError:
pass
async def generate_specific_payload_imagestreams(self, arch: str, private_mode: bool,
payload_entries: Dict[str, PayloadEntry],
# Map [is_private] -> [tag_name] -> [arch] -> PayloadEntry
multi_specs: Dict[bool, Dict[str, Dict[str, PayloadEntry]]]):
"""
For the specific arch, -priv, and payload entries, generate the imagestreams.
Populate multi_specs with the single-arch images that need composing into the multi-arch imagestream.
"""
# Generate arch/privacy-specific imagestream tags for the payload
istags: List[Dict] = []
# Typically for a stream update, we want to prune unused imagestream
# tags. But setting this flag indicates we don't expect to have all the
# tags for a payload, so tags will only be updated, and none removed.
incomplete_payload_update: bool = False
if self.runtime.images or self.runtime.exclude:
# If images are being explicitly included or excluded, assume we will not be
# performing a full replacement of the imagestream content.
incomplete_payload_update = True
for payload_tag_name, payload_entry in payload_entries.items():
multi_specs[private_mode].setdefault(payload_tag_name, dict())
if private_mode is False and payload_entry.build_inspector \
and payload_entry.build_inspector.is_under_embargo():
# No embargoed images should go to the public release controller, so we will not have
# a complete set of payload tags for the public imagestream.
incomplete_payload_update = True
else:
istags.append(PayloadGenerator.build_payload_istag(payload_tag_name, payload_entry))
multi_specs[private_mode][payload_tag_name][arch] = payload_entry
imagestream_namespace, imagestream_name = payload_imagestream_namespace_and_name(
*self.base_imagestream, arch, private_mode)
await self.write_imagestream_artifact_file(
imagestream_namespace, imagestream_name, istags, incomplete_payload_update)
if self.apply:
await self.apply_arch_imagestream(
imagestream_namespace, imagestream_name, istags, incomplete_payload_update)
async def write_imagestream_artifact_file(self, imagestream_namespace: str, imagestream_name: str,
istags: List[Dict], incomplete_payload_update):
"""
Write the yaml file for the imagestream.
"""
filename = f"updated-tags-for.{imagestream_namespace}.{imagestream_name}" \
f"{'-partial' if incomplete_payload_update else ''}.yaml"
async with aiofiles.open(self.output_path.joinpath(filename), mode="w+", encoding="utf-8") as out_file:
istream_spec = PayloadGenerator.build_payload_imagestream(
self.runtime,
imagestream_name, imagestream_namespace,
istags, self.assembly_issues
)
await out_file.write(yaml.safe_dump(istream_spec, indent=2, default_flow_style=False))
async def apply_arch_imagestream(self, imagestream_namespace: str, imagestream_name: str,
istags: List[Dict], incomplete_payload_update: bool):
"""
Orchestrate the update and tag removal for one arch imagestream in the OCP cluster.
"""
with oc.project(imagestream_namespace):
istream_apiobj = self.ensure_imagestream_apiobj(imagestream_name)
pruning_tags, adding_tags = await self.apply_imagestream_update(
istream_apiobj, istags, incomplete_payload_update)
if pruning_tags:
self.logger.warning('The following tag names are no longer part of the release '
'and will be pruned in %s:%s: %s',
imagestream_namespace, imagestream_name, pruning_tags)
if not self.moist_run:
for old_tag in pruning_tags:
# Even though we have replaced the .spec on the imagestream, the old tag will still
# be reflected in .status. The release controller considers this a legit declaration,
# so we must remove it explicitly using `oc delete istag`
try:
oc.selector(f"istag/{imagestream_name}:{old_tag}").delete()
except Exception:
# This is not a fatal error, but failure to delete may leave issues being
# displayed on the release controller page.
self.logger.error('Unable to delete %s tag fully from %s imagestream in %s:\n%s',
old_tag, imagestream_name, imagestream_namespace, traceback.format_exc())
if adding_tags:
self.logger.warning('The following tag names are net new to %s:%s: %s',
imagestream_namespace, imagestream_name, adding_tags)
@staticmethod
def ensure_imagestream_apiobj(imagestream_name):
"""
Create the imagestream if it does not exist, and return the api object.
"""
istream_apiobj = oc.selector(f"imagestream/{imagestream_name}").object(ignore_not_found=True)
if istream_apiobj:
return istream_apiobj
# The imagestream has not been bootstrapped; create it.
oc.create({
"apiVersion": "image.openshift.io/v1",
"kind": "ImageStream",
"metadata": {
"name": imagestream_name,
}
})
return oc.selector(f"imagestream/{imagestream_name}").object()
async def apply_imagestream_update(self, istream_apiobj, istags: List[Dict],
incomplete_payload_update: bool) -> Tuple[Set[str], Set[str]]:
"""
Apply changes for one imagestream object to the OCP cluster.
"""
# gather diffs between old and new, indicating removal or addition
pruning_tags: Set[str] = set()
adding_tags: Set[str] = set()
# create a closure to be applied below against the api object
def update_single_arch_istags(apiobj: oc.APIObject):
nonlocal pruning_tags
nonlocal adding_tags
new_annotations = dict()
if apiobj.model.metadata.annotations is not oc.Missing:
# We must preserve annotations as they contain release controller configuration information
new_annotations = apiobj.model.metadata.annotations._primitive()
# Remove old inconsistency information if it exists
new_annotations.pop("release.openshift.io/inconsistency", None)
new_annotations.update(PayloadGenerator.build_imagestream_annotations(self.runtime, self.assembly_issues))
apiobj.model.metadata["annotations"] = new_annotations
incoming_tag_names = set([istag["name"] for istag in istags])
existing_tag_names = set([istag["name"] for istag in apiobj.model.spec.tags])
adding_tags = incoming_tag_names - existing_tag_names
new_istags = list(istags) # copy, don't update/embed list parameter
if incomplete_payload_update:
# If our incoming `istags` don't necessarily include everything in the release,
# we need to preserve existing tag values that were not updated.
for istag in apiobj.model.spec.tags:
if istag.name not in incoming_tag_names:
new_istags.append(istag)
else:
# Else, we believe the assembled tags are canonical. Declare
# old tags to prune.
pruning_tags = existing_tag_names - incoming_tag_names
apiobj.model.spec.tags = new_istags
await modify_and_replace_api_object(istream_apiobj, update_single_arch_istags, self.output_path, self.moist_run)
return pruning_tags, adding_tags
async def sync_heterogeneous_payloads(self, multi_specs: Dict[bool, Dict[str, Dict[str, PayloadEntry]]]):
"""
We now generate the artifacts to create heterogeneous release payloads (suitable for
clusters with multiple arches present). A heterogeneous or 'multi' release payload is a
manifest list (i.e. it consists of N release payload manifests, one for each arch).
The release payload images referenced in the multi-release payload manifest list are
themselves somewhat standard release payloads (i.e. they are based on CVO images for their
arch) BUT, each component image they reference is a manifest list. For example, the `cli`
image in the s390x release payload will point to a a manifest list composed of cli image
manifests for each architecture.
So the top-level release payload pullspec is a manifest list, referencing release payload
images for each arch, and these arch specific payload images reference manifest list based
components pullspecs.
The outcome is that this method creates/updates:
1. A component imagestream containing a tag per image component, similar to what we do for
single-arch assemblies, but written to a file rather than applied to the cluster.
2. For each arch, a payload image based on its CVO, built with references to the (multi-arch)
components in this imagestream and stored in
* quay.io/openshift-release-dev/ocp-release-nightly for nightlies or
* quay.io/openshift-release-dev/ocp-release for standard/custom releases.
3. A multi payload image, which is a manifest list built from the arch-specific release
payload images and stored in the same quay.io repos as above.
4. A _release_ imagestream per assembly to record tag(s) for multi-arch payload image(s)
created in step 3, e.g. `4.11-art-latest-multi` for `stream` assembly or
`4.11-art-assembly-4.11.4-multi` for other assemblies.
"""
for private_mode in self.privacy_modes:
if private_mode:
# The CI image registry does not support manifest lists. Thus, we need to publish
# our nightly release payloads to quay.io. As of this writing, we don't have a
# private quay repository into which we could push embargoed release heterogeneous
# release payloads.
red_print("PRIVATE MODE MULTI PAYLOADS ARE CURRENTLY DISABLED. "
"WE NEED A PRIVATE QUAY REPO FOR PRIVATE MULTI RELEASE PAYLOADS")
continue
imagestream_namespace, imagestream_name = payload_imagestream_namespace_and_name(
*self.base_imagestream, "multi", private_mode)
multi_release_istag: str # The tag to record the multi release payload image
multi_release_manifest_list_tag: str # The quay.io tag to preserve the multi payload
multi_release_istag, multi_release_manifest_list_tag = self.get_multi_release_names(private_mode)
tasks = []
for tag_name, arch_to_payload_entry in multi_specs[private_mode].items():
tasks.append(self.build_multi_istag(tag_name, arch_to_payload_entry, imagestream_namespace))
multi_istags: List[Dict] = await asyncio.gather(*tasks)
# now multi_istags contains istags which all point to component manifest lists. We must
# run oc adm release new on this set of tags -- once for each arch - to create the arch
# specific release payloads.
multi_release_is = PayloadGenerator.build_payload_imagestream(
self.runtime,
imagestream_name, imagestream_namespace, multi_istags,
assembly_wide_inconsistencies=self.assembly_issues)
# We will then stitch those arch specific payload images together into a release payload
# manifest list.
multi_release_dest: str = f"quay.io/{'/'.join(self.release_repo)}:{multi_release_manifest_list_tag}"
final_multi_pullspec: str = await self.create_multi_release_image(
imagestream_name, multi_release_is, multi_release_dest, multi_release_istag,
multi_specs, private_mode)
self.logger.info(f"The final pull_spec for the multi release payload is: {final_multi_pullspec}")
with oc.project(imagestream_namespace):
await self.apply_multi_imagestream_update(final_multi_pullspec, imagestream_name, multi_release_istag)
def get_multi_release_names(self, private_mode: bool) -> Tuple[str, str]:
"""
Determine a unique name for the multi release (recorded in the imagestream for the assembly) and a
quay.io tag to prevent garbage collection of the image -- as of 2022-09-09, both tags are
the same.
"""
multi_ts = datetime.now().strftime("%Y-%m-%d-%H%M%S")
if self.runtime.assembly_type is AssemblyTypes.STREAM:
# We are publicizing a nightly. Unlike single-arch payloads, the release controller does
# not react to 4.x-art-latest updates and create a timestamp-based name. We create the
# nightly name in doozer.
multi_release_istag = f"{self.runtime.get_minor_version()}.0-0.nightly" \
f"{go_suffix_for_arch('multi', private_mode)}-{multi_ts}"
# Tag the release image with same name as release displayed in the release controller
multi_release_manifest_list_tag = multi_release_istag
else:
# Tag the release image in quay with anything unique; we just don't want it garbage
# collected. It will not show up in the release controller. The only purpose of this
# image is to provide inputs to the promotion job, which looks at the imagestream
# and not for this tag.
multi_release_manifest_list_tag = f"{self.runtime.get_minor_version()}.0-0.art-assembly-" \
f"{self.runtime.assembly}{go_suffix_for_arch('multi', private_mode)}-" \
f"{multi_ts}"
# This will be the singular tag we create in an imagestream on app.ci. The actual name
# does not matter, because it will not be visible in the release controller and will not
# be the ultimate name used to promote the release. It must be unique, however, because
# Cincinnati chokes if multiple images exist in the repo with the same release name.
multi_release_istag = multi_release_manifest_list_tag
return multi_release_istag, multi_release_manifest_list_tag
async def build_multi_istag(self, tag_name: str, arch_to_payload_entry: Dict[str, PayloadEntry],
imagestream_namespace: str) -> Dict:
"""
Build a single imagestream tag for a component in a multi-arch payload.
"""
# There are two flows:
# 1. The images for ALL arches were part of the same brew built manifest list. In this case,
# we want to reuse the manifest list (it was already mirrored during the mirroring step).
# 2. At least one arch for this component does not have the same manifest list as the
# other images. This will always be true for RHCOS, but also applies
# to -alt images. In this case, we must stitch a manifest list together ourselves.
entries: List[PayloadEntry] = list(arch_to_payload_entry.values())