-
Notifications
You must be signed in to change notification settings - Fork 2.5k
/
OpenshiftProcessor.java
644 lines (571 loc) · 32.5 KB
/
OpenshiftProcessor.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
package io.quarkus.container.image.openshift.deployment;
import static io.quarkus.container.image.openshift.deployment.OpenshiftUtils.getDeployStrategy;
import static io.quarkus.container.image.openshift.deployment.OpenshiftUtils.getNamespace;
import static io.quarkus.container.image.openshift.deployment.OpenshiftUtils.mergeConfig;
import static io.quarkus.container.util.PathsUtil.findMainSourcesRoot;
import static io.quarkus.deployment.pkg.steps.JarResultBuildStep.DEFAULT_FAST_JAR_DIRECTORY_NAME;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.jboss.logging.Logger;
import io.dekorate.utils.Packaging;
import io.dekorate.utils.Serialization;
import io.fabric8.kubernetes.api.model.HasMetadata;
import io.fabric8.kubernetes.api.model.KubernetesList;
import io.fabric8.kubernetes.api.model.Secret;
import io.fabric8.kubernetes.client.Config;
import io.fabric8.kubernetes.client.KubernetesClient;
import io.fabric8.kubernetes.client.KubernetesClientBuilder;
import io.fabric8.kubernetes.client.KubernetesClientException;
import io.fabric8.kubernetes.client.dsl.LogWatch;
import io.fabric8.kubernetes.client.dsl.NamespaceableResource;
import io.fabric8.kubernetes.client.dsl.base.PatchContext;
import io.fabric8.kubernetes.client.dsl.base.PatchType;
import io.fabric8.kubernetes.client.http.HttpClient;
import io.fabric8.openshift.api.model.Build;
import io.fabric8.openshift.api.model.BuildConfig;
import io.fabric8.openshift.api.model.ImageStream;
import io.fabric8.openshift.client.OpenShiftClient;
import io.quarkus.container.image.deployment.ContainerImageConfig;
import io.quarkus.container.image.deployment.util.ImageUtil;
import io.quarkus.container.spi.AvailableContainerImageExtensionBuildItem;
import io.quarkus.container.spi.BaseImageInfoBuildItem;
import io.quarkus.container.spi.ContainerImageBuildRequestBuildItem;
import io.quarkus.container.spi.ContainerImageBuilderBuildItem;
import io.quarkus.container.spi.ContainerImageInfoBuildItem;
import io.quarkus.container.spi.ContainerImagePushRequestBuildItem;
import io.quarkus.deployment.IsNormalNotRemoteDev;
import io.quarkus.deployment.annotations.BuildProducer;
import io.quarkus.deployment.annotations.BuildStep;
import io.quarkus.deployment.builditem.ArchiveRootBuildItem;
import io.quarkus.deployment.builditem.GeneratedFileSystemResourceBuildItem;
import io.quarkus.deployment.pkg.PackageConfig;
import io.quarkus.deployment.pkg.builditem.ArtifactResultBuildItem;
import io.quarkus.deployment.pkg.builditem.CompiledJavaVersionBuildItem;
import io.quarkus.deployment.pkg.builditem.CurateOutcomeBuildItem;
import io.quarkus.deployment.pkg.builditem.JarBuildItem;
import io.quarkus.deployment.pkg.builditem.NativeImageBuildItem;
import io.quarkus.deployment.pkg.builditem.OutputTargetBuildItem;
import io.quarkus.deployment.pkg.steps.NativeBuild;
import io.quarkus.kubernetes.client.deployment.KubernetesClientErrorHandler;
import io.quarkus.kubernetes.client.spi.KubernetesClientBuildItem;
import io.quarkus.kubernetes.spi.DecoratorBuildItem;
import io.quarkus.kubernetes.spi.DeployStrategy;
import io.quarkus.kubernetes.spi.KubernetesCommandBuildItem;
import io.quarkus.kubernetes.spi.KubernetesEnvBuildItem;
public class OpenshiftProcessor {
public static final String OPENSHIFT = "openshift";
private static final String BUILD_CONFIG_NAME = "openshift.io/build-config.name";
private static final String RUNNING = "Running";
private static final String JAVA_APP_JAR = "JAVA_APP_JAR";
private static final int LOG_TAIL_SIZE = 10;
private static final Logger LOG = Logger.getLogger(OpenshiftProcessor.class);
@BuildStep
public AvailableContainerImageExtensionBuildItem availability() {
return new AvailableContainerImageExtensionBuildItem(OPENSHIFT);
}
@BuildStep(onlyIf = { OpenshiftBuild.class }, onlyIfNot = NativeBuild.class)
public void openshiftPrepareJvmDockerBuild(OpenshiftConfig openshiftConfig,
S2iConfig s2iConfig,
OutputTargetBuildItem out,
BuildProducer<DecoratorBuildItem> decorator) {
OpenshiftConfig config = mergeConfig(openshiftConfig, s2iConfig);
if (config.buildStrategy == BuildStrategy.DOCKER) {
decorator.produce(new DecoratorBuildItem(new ApplyDockerfileToBuildConfigDecorator(null,
findMainSourcesRoot(out.getOutputDirectory()).getValue().resolve(openshiftConfig.jvmDockerfile))));
//When using the docker build strategy, we can't possibly know these values, so it's the image responsibility to work without them.
decorator.produce(new DecoratorBuildItem(new RemoveEnvVarDecorator(null, "JAVA_APP_JAR")));
decorator.produce(new DecoratorBuildItem(new RemoveEnvVarDecorator(null, "JAVA_APP_LIB")));
}
}
@BuildStep(onlyIf = { OpenshiftBuild.class, NativeBuild.class })
public void openshiftPrepareNativeDockerBuild(OpenshiftConfig openshiftConfig,
S2iConfig s2iConfig,
OutputTargetBuildItem out,
BuildProducer<DecoratorBuildItem> decorator) {
OpenshiftConfig config = mergeConfig(openshiftConfig, s2iConfig);
if (config.buildStrategy == BuildStrategy.DOCKER) {
decorator.produce(new DecoratorBuildItem(new ApplyDockerfileToBuildConfigDecorator(null,
findMainSourcesRoot(out.getOutputDirectory()).getValue().resolve(openshiftConfig.nativeDockerfile))));
}
//Let's remove this for all kinds of native build
decorator.produce(new DecoratorBuildItem(new RemoveEnvVarDecorator(null, "JAVA_APP_JAR")));
decorator.produce(new DecoratorBuildItem(new RemoveEnvVarDecorator(null, "JAVA_APP_LIB")));
}
@BuildStep(onlyIf = { IsNormalNotRemoteDev.class, OpenshiftBuild.class }, onlyIfNot = NativeBuild.class)
public void openshiftRequirementsJvm(OpenshiftConfig openshiftConfig,
S2iConfig s2iConfig,
CurateOutcomeBuildItem curateOutcomeBuildItem,
OutputTargetBuildItem out,
PackageConfig packageConfig,
JarBuildItem jarBuildItem,
CompiledJavaVersionBuildItem compiledJavaVersion,
BuildProducer<DecoratorBuildItem> decorator,
BuildProducer<KubernetesEnvBuildItem> envProducer,
BuildProducer<BaseImageInfoBuildItem> builderImageProducer,
BuildProducer<KubernetesCommandBuildItem> commandProducer) {
OpenshiftConfig config = mergeConfig(openshiftConfig, s2iConfig);
String outputJarFileName = jarBuildItem.getPath().getFileName().toString();
String jarFileName = config.jarFileName.orElse(outputJarFileName);
String baseJvmImage = config.baseJvmImage
.orElse(OpenshiftConfig.getDefaultJvmImage(compiledJavaVersion.getJavaVersion()));
boolean hasCustomJarPath = config.jarFileName.isPresent() || config.jarDirectory.isPresent();
boolean hasCustomJvmArguments = config.jvmArguments.isPresent();
builderImageProducer.produce(new BaseImageInfoBuildItem(baseJvmImage));
Optional<OpenshiftBaseJavaImage> baseImage = OpenshiftBaseJavaImage.findMatching(baseJvmImage);
if (config.buildStrategy == BuildStrategy.BINARY) {
// Jar directory priorities:
// 1. explicitly specified by the user.
// 2. detected via OpenshiftBaseJavaImage
// 3. fallback value
String jarDirectory = config.jarDirectory
.orElse(baseImage.map(i -> i.getJarDirectory()).orElse(config.FALLBACK_JAR_DIRECTORY));
String pathToJar = concatUnixPaths(jarDirectory, jarFileName);
// If the image is known, we can define env vars for classpath, jar, lib etc.
baseImage.ifPresent(b -> {
envProducer.produce(KubernetesEnvBuildItem.createSimpleVar(b.getJarEnvVar(), pathToJar, null));
envProducer.produce(KubernetesEnvBuildItem.createSimpleVar(b.getJvmOptionsEnvVar(),
String.join(" ", config.getEffectiveJvmArguments()), null));
});
//In all other cases its the responsibility of the image to set those up correctly.
if (hasCustomJarPath || hasCustomJvmArguments) {
List<String> cmd = new ArrayList<>();
cmd.add("java");
cmd.addAll(config.getEffectiveJvmArguments());
cmd.addAll(Arrays.asList("-jar", pathToJar));
envProducer.produce(KubernetesEnvBuildItem.createSimpleVar(JAVA_APP_JAR, pathToJar, null));
commandProducer.produce(KubernetesCommandBuildItem.command(cmd));
} else if (baseImage.isEmpty()) {
envProducer.produce(KubernetesEnvBuildItem.createSimpleVar(JAVA_APP_JAR, pathToJar, null));
}
}
}
@BuildStep(onlyIf = { IsNormalNotRemoteDev.class, OpenshiftBuild.class, NativeBuild.class })
public void openshiftRequirementsNative(OpenshiftConfig openshiftConfig,
S2iConfig s2iConfig,
CurateOutcomeBuildItem curateOutcomeBuildItem,
OutputTargetBuildItem out,
PackageConfig packageConfig,
NativeImageBuildItem nativeImage,
BuildProducer<KubernetesEnvBuildItem> envProducer,
BuildProducer<BaseImageInfoBuildItem> builderImageProducer,
BuildProducer<KubernetesCommandBuildItem> commandProducer) {
OpenshiftConfig config = mergeConfig(openshiftConfig, s2iConfig);
boolean usingDefaultBuilder = ImageUtil.getRepository(OpenshiftConfig.DEFAULT_BASE_NATIVE_IMAGE)
.equals(ImageUtil.getRepository(config.baseNativeImage));
String outputNativeBinaryFileName = nativeImage.getPath().getFileName().toString();
String nativeBinaryFileName = null;
boolean hasCustomNativePath = config.nativeBinaryFileName.isPresent() || config.nativeBinaryDirectory.isPresent();
boolean hasCustomNativeArguments = config.nativeArguments.isPresent();
//The default openshift builder for native builds, renames the native binary.
//To make things easier for the user, we need to handle it.
if (usingDefaultBuilder && !config.nativeBinaryFileName.isPresent()) {
nativeBinaryFileName = OpenshiftConfig.DEFAULT_NATIVE_TARGET_FILENAME;
} else {
nativeBinaryFileName = config.nativeBinaryFileName.orElse(outputNativeBinaryFileName);
}
if (config.buildStrategy == BuildStrategy.BINARY) {
builderImageProducer.produce(new BaseImageInfoBuildItem(config.baseNativeImage));
Optional<OpenshiftBaseNativeImage> baseImage = OpenshiftBaseNativeImage.findMatching(config.baseNativeImage);
// Native binary directory priorities:
// 1. explicitly specified by the user.
// 2. detected via OpenshiftBaseNativeImage
// 3. fallback value
String nativeBinaryDirectory = config.nativeBinaryDirectory
.orElse(baseImage.map(i -> i.getNativeBinaryDirectory()).orElse(config.FALLBACK_NATIVE_BINARY_DIRECTORY));
String pathToNativeBinary = concatUnixPaths(nativeBinaryDirectory, nativeBinaryFileName);
baseImage.ifPresent(b -> {
envProducer.produce(
KubernetesEnvBuildItem.createSimpleVar(b.getHomeDirEnvVar(), nativeBinaryDirectory, OPENSHIFT));
config.nativeArguments.ifPresent(nativeArguments -> {
envProducer.produce(KubernetesEnvBuildItem.createSimpleVar(b.getOptsEnvVar(),
String.join(" ", nativeArguments), OPENSHIFT));
});
});
if (hasCustomNativePath || hasCustomNativeArguments) {
commandProducer
.produce(KubernetesCommandBuildItem.commandWithArgs(pathToNativeBinary, config.nativeArguments.get()));
}
}
}
@BuildStep(onlyIf = { IsNormalNotRemoteDev.class, OpenshiftBuild.class }, onlyIfNot = NativeBuild.class)
public void openshiftBuildFromJar(OpenshiftConfig openshiftConfig,
S2iConfig s2iConfig,
ContainerImageConfig containerImageConfig,
KubernetesClientBuildItem kubernetesClientBuilder,
ContainerImageInfoBuildItem containerImage,
ArchiveRootBuildItem archiveRoot, OutputTargetBuildItem out, PackageConfig packageConfig,
List<GeneratedFileSystemResourceBuildItem> generatedResources,
Optional<ContainerImageBuildRequestBuildItem> buildRequest,
Optional<ContainerImagePushRequestBuildItem> pushRequest,
BuildProducer<ArtifactResultBuildItem> artifactResultProducer,
BuildProducer<ContainerImageBuilderBuildItem> containerImageBuilder,
// used to ensure that the jar has been built
JarBuildItem jar) {
OpenshiftConfig config = mergeConfig(openshiftConfig, s2iConfig);
if (containerImageConfig.isBuildExplicitlyDisabled()) {
return;
}
if (!containerImageConfig.isBuildExplicitlyEnabled() && !containerImageConfig.isPushExplicitlyEnabled()
&& !buildRequest.isPresent() && !pushRequest.isPresent()) {
return;
}
Optional<GeneratedFileSystemResourceBuildItem> openshiftYml = generatedResources
.stream()
.filter(r -> r.getName().endsWith("kubernetes" + File.separator + "openshift.yml"))
.findFirst();
if (openshiftYml.isEmpty()) {
LOG.warn(
"No Openshift manifests were generated so no openshift build process will be taking place");
return;
}
try (KubernetesClient kubernetesClient = buildClient(kubernetesClientBuilder)) {
String namespace = Optional.ofNullable(kubernetesClient.getNamespace()).orElse("default");
LOG.info("Starting (in-cluster) container image build for jar using: " + config.buildStrategy + " on server: "
+ kubernetesClient.getMasterUrl() + " in namespace:" + namespace + ".");
//The contextRoot is where inside the tarball we will add the jars. A null value means everything will be added under '/' while "target" means everything will be added under '/target'.
//For docker kind of builds where we use instructions like: `COPY target/*.jar /deployments` it using '/target' is a requirement.
//For s2i kind of builds where jars are expected directly in the '/' we have to use null.
String outputDirName = out.getOutputDirectory().getFileName().toString();
String contextRoot = getContextRoot(outputDirName, packageConfig.isFastJar(), config.buildStrategy);
KubernetesClientBuilder clientBuilder = newClientBuilderWithoutHttp2(kubernetesClient.getConfiguration(),
kubernetesClientBuilder.getHttpClientFactory());
if (packageConfig.isFastJar()) {
createContainerImage(clientBuilder, openshiftYml.get(), config, contextRoot, jar.getPath().getParent(),
jar.getPath().getParent());
} else if (jar.getLibraryDir() != null) { //When using uber-jar the libraryDir is going to be null, potentially causing NPE.
createContainerImage(clientBuilder, openshiftYml.get(), config, contextRoot, jar.getPath().getParent(),
jar.getPath(), jar.getLibraryDir());
} else {
createContainerImage(clientBuilder, openshiftYml.get(), config, contextRoot, jar.getPath().getParent(),
jar.getPath());
}
artifactResultProducer.produce(new ArtifactResultBuildItem(null, "jar-container", Collections.emptyMap()));
containerImageBuilder.produce(new ContainerImageBuilderBuildItem(OPENSHIFT));
}
}
private String getContextRoot(String outputDirName, boolean isFastJar, BuildStrategy buildStrategy) {
if (buildStrategy != BuildStrategy.DOCKER) {
return null;
}
if (!isFastJar) {
return outputDirName;
}
return outputDirName + "/" + DEFAULT_FAST_JAR_DIRECTORY_NAME;
}
@BuildStep(onlyIf = { IsNormalNotRemoteDev.class, OpenshiftBuild.class, NativeBuild.class })
public void openshiftBuildFromNative(OpenshiftConfig openshiftConfig, S2iConfig s2iConfig,
ContainerImageConfig containerImageConfig,
KubernetesClientBuildItem kubernetesClientBuilder,
ContainerImageInfoBuildItem containerImage,
ArchiveRootBuildItem archiveRoot, OutputTargetBuildItem out, PackageConfig packageConfig,
List<GeneratedFileSystemResourceBuildItem> generatedResources,
Optional<ContainerImageBuildRequestBuildItem> buildRequest,
Optional<ContainerImagePushRequestBuildItem> pushRequest,
BuildProducer<ArtifactResultBuildItem> artifactResultProducer,
BuildProducer<ContainerImageBuilderBuildItem> containerImageBuilder,
NativeImageBuildItem nativeImage) {
OpenshiftConfig config = mergeConfig(openshiftConfig, s2iConfig);
if (containerImageConfig.isBuildExplicitlyDisabled()) {
return;
}
if (!containerImageConfig.isBuildExplicitlyEnabled() && !containerImageConfig.isPushExplicitlyEnabled()
&& !buildRequest.isPresent() && !pushRequest.isPresent()) {
return;
}
try (KubernetesClient kubernetesClient = buildClient(kubernetesClientBuilder)) {
String namespace = Optional.ofNullable(kubernetesClient.getNamespace()).orElse("default");
LOG.info("Starting (in-cluster) container image build for jar using: " + config.buildStrategy + " on server: "
+ kubernetesClient.getMasterUrl() + " in namespace:" + namespace + ".");
Optional<GeneratedFileSystemResourceBuildItem> openshiftYml = generatedResources
.stream()
.filter(r -> r.getName().endsWith("kubernetes" + File.separator + "openshift.yml"))
.findFirst();
if (openshiftYml.isEmpty()) {
LOG.warn(
"No Openshift manifests were generated so no openshift build process will be taking place");
return;
}
//The contextRoot is where inside the tarball we will add the jars. A null value means everything will be added under '/' while "target" means everything will be added under '/target'.
//For docker kind of builds where we use instructions like: `COPY target/*.jar /deployments` it using '/target' is a requirement.
//For s2i kind of builds where jars are expected directly in the '/' we have to use null.
String contextRoot = config.buildStrategy == BuildStrategy.DOCKER ? "target" : null;
createContainerImage(
newClientBuilderWithoutHttp2(kubernetesClient.getConfiguration(),
kubernetesClientBuilder.getHttpClientFactory()),
openshiftYml.get(), config, contextRoot, out.getOutputDirectory(), nativeImage.getPath());
artifactResultProducer.produce(new ArtifactResultBuildItem(null, "native-container", Collections.emptyMap()));
containerImageBuilder.produce(new ContainerImageBuilderBuildItem(OPENSHIFT));
}
}
public static void createContainerImage(KubernetesClientBuilder kubernetesClientBuilder,
GeneratedFileSystemResourceBuildItem openshiftManifests,
OpenshiftConfig openshiftConfig,
String base,
Path output,
Path... additional) {
File tar;
try {
File original = Packaging.packageFile(output, base, additional);
//Let's rename the archive and give it a more descriptive name, as it may appear in the logs.
tar = Files.createTempFile("quarkus-", "-openshift").toFile();
Files.move(original.toPath(), tar.toPath(), StandardCopyOption.REPLACE_EXISTING);
} catch (Exception e) {
throw new RuntimeException("Error creating the openshift binary build archive.", e);
}
try (KubernetesClient client = kubernetesClientBuilder.build()) {
OpenShiftClient openShiftClient = toOpenshiftClient(client);
KubernetesList kubernetesList = Serialization
.unmarshalAsList(new ByteArrayInputStream(openshiftManifests.getData()));
List<HasMetadata> buildResources = kubernetesList.getItems().stream()
.filter(i -> i instanceof BuildConfig || i instanceof ImageStream || i instanceof Secret)
.collect(Collectors.toList());
applyOpenshiftResources(openShiftClient, buildResources);
openshiftBuild(openShiftClient, buildResources, tar, openshiftConfig);
}
}
private static OpenShiftClient toOpenshiftClient(KubernetesClient client) {
try {
return client.adapt(OpenShiftClient.class);
} catch (KubernetesClientException e) {
KubernetesClientErrorHandler.handle(e);
return null; // will never happen
}
}
/**
* Apply the openshift resources and wait until ImageStreamTags are created.
*
* @param client the client instance
* @param buildResources resources to apply
*/
private static void applyOpenshiftResources(OpenShiftClient client, List<HasMetadata> buildResources) {
// Apply build resource requirements
try {
for (HasMetadata i : distinct(buildResources)) {
deployResource(client, i);
LOG.info("Applied: " + i.getKind() + " " + i.getMetadata().getName());
}
try {
OpenshiftUtils.waitForImageStreamTags(client, buildResources, 2, TimeUnit.MINUTES);
} catch (KubernetesClientException e) {
//User may not have permission to get / list `ImageStreamTag` or this step may fail for any reason.
//As this is not an integral part of the build we should catch and log.
LOG.debug("Waiting for ImageStream tag failed. Ignoring.");
}
} catch (KubernetesClientException e) {
KubernetesClientErrorHandler.handle(e);
}
}
private static void openshiftBuild(OpenShiftClient client, List<HasMetadata> buildResources, File binaryFile,
OpenshiftConfig openshiftConfig) {
distinct(buildResources).stream().filter(i -> i instanceof BuildConfig).map(i -> (BuildConfig) i)
.forEach(bc -> openshiftBuild(client, bc, binaryFile, openshiftConfig));
}
/**
* Performs the binary build of the specified {@link BuildConfig} with the given
* binary input.
*
* @param client The openshift client instance
* @param buildConfig The build config
* @param binaryFile The binary file
* @param openshiftConfig The openshift configuration
*/
private static void openshiftBuild(OpenShiftClient client, BuildConfig buildConfig, File binaryFile,
OpenshiftConfig openshiftConfig) {
Build build;
try {
build = client.buildConfigs().withName(buildConfig.getMetadata().getName())
.instantiateBinary()
.withTimeoutInMillis(openshiftConfig.buildTimeout.toMillis())
.fromFile(binaryFile);
} catch (Exception e) {
Optional<Build> running = runningBuildsOf(client, buildConfig).findFirst();
if (running.isPresent()) {
LOG.warn("An exception: '" + e.getMessage()
+ " ' occurred while instantiating the build, however the build has been started.");
build = running.get();
} else {
throw openshiftException(e);
}
}
while (isNew(build) || isPending(build) || isRunning(build)) {
final String buildName = build.getMetadata().getName();
Build updated = client.builds().withName(buildName).get();
if (updated == null) {
throw new IllegalStateException("Build:" + build.getMetadata().getName() + " is no longer present!");
} else if (updated.getStatus() == null) {
throw new IllegalStateException("Build:" + build.getMetadata().getName() + " has no status!");
} else if (isNew(updated) || isPending(updated) || isRunning(updated)) {
build = updated;
try (LogWatch w = client.builds().withName(buildName).withPrettyOutput().watchLog();
Reader reader = new InputStreamReader(w.getOutput())) {
display(reader, openshiftConfig.buildLogLevel);
} catch (IOException e) {
// This may happen if the LogWatch is closed while we are still reading.
// We shouldn't let the build fail, so let's log a warning and display last few lines of the log
LOG.warn("Log stream closed, redisplaying last " + LOG_TAIL_SIZE + " entries:");
try {
display(client.builds().withName(buildName).tailingLines(LOG_TAIL_SIZE).getLogReader(),
Logger.Level.WARN);
} catch (IOException ex) {
// Let's ignore this.
}
}
} else if (isComplete(updated)) {
return;
} else if (isCancelled(updated)) {
throw new IllegalStateException("Build:" + buildName + " cancelled!");
} else if (isFailed(updated)) {
throw new IllegalStateException(
"Build:" + buildName + " failed! " + updated.getStatus().getMessage());
} else if (isError(updated)) {
throw new IllegalStateException(
"Build:" + buildName + " encountered error! " + updated.getStatus().getMessage());
}
}
}
public static Predicate<HasMetadata> distinctByResourceKey() {
Map<Object, Boolean> seen = new ConcurrentHashMap<>();
return t -> seen.putIfAbsent(t.getApiVersion() + "/" + t.getKind() + ":" + t.getMetadata().getName(),
Boolean.TRUE) == null;
}
private static Collection<HasMetadata> distinct(Collection<HasMetadata> resources) {
return resources.stream().filter(distinctByResourceKey()).collect(Collectors.toList());
}
private static List<Build> buildsOf(OpenShiftClient client, BuildConfig config) {
return client.builds().withLabel(BUILD_CONFIG_NAME, config.getMetadata().getName()).list().getItems();
}
private static Stream<Build> runningBuildsOf(OpenShiftClient client, BuildConfig config) {
return buildsOf(client, config).stream().filter(b -> RUNNING.equalsIgnoreCase(b.getStatus().getPhase()));
}
private static RuntimeException openshiftException(Throwable t) {
if (t instanceof KubernetesClientException) {
KubernetesClientErrorHandler.handle((KubernetesClientException) t);
}
return new RuntimeException("Execution of openshift build failed. See build output for more details", t);
}
private static void display(Reader logReader, Logger.Level level) throws IOException {
BufferedReader reader = new BufferedReader(logReader);
for (String line = reader.readLine(); line != null; line = reader.readLine()) {
LOG.log(level, line);
}
}
private static KubernetesClientBuilder newClientBuilderWithoutHttp2(Config configuration,
HttpClient.Factory httpClientFactory) {
//Let's disable http2 as it causes issues with duplicate build triggers.
configuration.setHttp2Disable(true);
return new KubernetesClientBuilder().withConfig(configuration).withHttpClientFactory(httpClientFactory);
}
private static KubernetesClient buildClient(KubernetesClientBuildItem kubernetesClientBuilder) {
getNamespace().ifPresent(kubernetesClientBuilder.getConfig()::setNamespace);
return kubernetesClientBuilder.buildClient();
}
private static void deployResource(OpenShiftClient client, HasMetadata metadata) {
DeployStrategy deployStrategy = getDeployStrategy();
var r = client.resource(metadata);
// Delete build config it already existed unless the deploy strategy is not create or update.
if (deployStrategy != DeployStrategy.CreateOrUpdate && r instanceof BuildConfig) {
deleteBuildConfig(client, metadata, r);
}
// If the image stream is already installed, we proceed with the next.
if (r instanceof ImageStream) {
ImageStream is = (ImageStream) r;
ImageStream existing = client.imageStreams().withName(metadata.getMetadata().getName()).get();
if (existing != null &&
existing.getSpec() != null &&
existing.getSpec().getDockerImageRepository() != null &&
existing.getSpec().getDockerImageRepository().equals(is.getSpec().getDockerImageRepository())) {
LOG.info("Found: " + metadata.getKind() + " " + metadata.getMetadata().getName() + " repository: "
+ existing.getSpec().getDockerImageRepository());
return;
}
}
// Deploy the current resource.
switch (deployStrategy) {
case Create:
r.create();
break;
case Replace:
r.replace();
break;
case ServerSideApply:
r.patch(PatchContext.of(PatchType.SERVER_SIDE_APPLY));
break;
default:
r.createOrReplace();
break;
}
}
private static void deleteBuildConfig(OpenShiftClient client, HasMetadata metadata, NamespaceableResource<HasMetadata> r) {
r.cascading(true).delete();
try {
client.resource(metadata).waitUntilCondition(d -> d == null, 10, TimeUnit.SECONDS);
} catch (IllegalArgumentException e) {
// We should ignore that, as its expected to be thrown when item is actually
// deleted.
}
}
// visible for test
static String concatUnixPaths(String... elements) {
StringBuilder result = new StringBuilder();
for (String element : elements) {
if (element.endsWith("/")) {
element = element.substring(0, element.length() - 1);
}
if (element.isEmpty()) {
continue;
}
if (!element.startsWith("/") && result.length() > 0) {
result.append('/');
}
result.append(element);
}
return result.toString();
}
static boolean isNew(Build build) {
return build != null && build.getStatus() != null
&& BuildStatus.New.name().equalsIgnoreCase(build.getStatus().getPhase());
}
static boolean isPending(Build build) {
return build != null && build.getStatus() != null
&& BuildStatus.Pending.name().equalsIgnoreCase(build.getStatus().getPhase());
}
static boolean isRunning(Build build) {
return build != null && build.getStatus() != null
&& BuildStatus.Running.name().equalsIgnoreCase(build.getStatus().getPhase());
}
static boolean isComplete(Build build) {
return build != null && build.getStatus() != null
&& BuildStatus.Complete.name().equalsIgnoreCase(build.getStatus().getPhase());
}
static boolean isFailed(Build build) {
return build != null && build.getStatus() != null
&& BuildStatus.Failed.name().equalsIgnoreCase(build.getStatus().getPhase());
}
static boolean isError(Build build) {
return build != null && build.getStatus() != null
&& BuildStatus.Error.name().equalsIgnoreCase(build.getStatus().getPhase());
}
static boolean isCancelled(Build build) {
return build != null && build.getStatus() != null
&& BuildStatus.Cancelled.name().equalsIgnoreCase(build.getStatus().getPhase());
}
}