This repository has been archived by the owner on Nov 28, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 46
/
kubeutil.ts
351 lines (312 loc) · 14.7 KB
/
kubeutil.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
/*******************************************************************************
* Copyright (c) 2019 IBM Corporation and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v2.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v20.html
*
* Contributors:
* IBM Corporation - initial API and implementation
*******************************************************************************/
"use strict";
import { promisify } from "util";
import { exec } from "child_process";
import { Operation } from "../projects/operation";
const execAsync = promisify(exec);
import * as path from "path";
import * as logger from "./logger";
import { ContainerStates } from "../projects/constants";
import * as processManager from "./processManager";
import { ProcessResult } from "./processManager";
import { ProjectInfo } from "../projects/Project";
import * as projectExtensions from "../extensions/projectExtensions";
const Client = require("kubernetes-client").Client; // tslint:disable-line:no-require-imports
const config = require("kubernetes-client").config; // tslint:disable-line:no-require-imports
let k8sClient: any = undefined;
if (process.env.IN_K8) {
k8sClient = new Client({ config: config.getInCluster(), version: "1.9"});
}
const KUBE_NAMESPACE = process.env.KUBE_NAMESPACE || "default";
export interface PodInfo {
podName: string;
ip: string;
serviceName: string;
exposedPort: string;
internalPort: string;
podPorts: string[];
}
/**
* @function
* @description Get kube pod info.
*
* @param projectInfo <Required | ProjectInfo> - The metadata information for a project.
* @param operation <Required | Operation> - The operation to get kube pod info.
*
* @returns Promise<PodInfo>
*/
export async function getApplicationContainerInfo(projectInfo: ProjectInfo, operation: Operation): Promise<PodInfo> {
const projectID = projectInfo.projectID;
const info: PodInfo = {
podName: "",
ip: "",
serviceName: "",
exposedPort: "",
internalPort: "",
podPorts: []
};
const projectLocation = operation.projectInfo.location;
if (projectLocation.endsWith("/")) {
projectLocation.slice(0, -1);
}
const releaseName = operation.containerName;
let releaseLabel = "release=" + releaseName;
if (projectInfo.projectType == "odo") {
const componentName = path.basename(projectInfo.location);
const projectHandler = await projectExtensions.getProjectHandler(projectInfo);
const appName = await projectHandler.getAppName(projectID);
releaseLabel = "deploymentconfig=" + "cw-" + componentName + "-" + appName;
}
const projectName = path.basename(projectLocation);
// Before deploying the application, we added a release label to the deployment, pod, and service,
// Use that to get the application's pod and service names.
// If the application is stuck in the starting state with missing ports, it's likely a step below here failed.
// Get the pod name for project
try {
let podName;
const resp = await k8sClient.api.v1.namespaces(KUBE_NAMESPACE).pods.get({ qs: { labelSelector: releaseLabel } });
// We are getting the list of pods by the release label
for ( let i = 0 ; i < resp.body.items.length ; i++ ) {
if (resp.body.items[i].status && resp.body.items[i].status.phase) {
// For a terminating pod, the metadata.deletionTimestamp will be set
if (resp.body.items[i].status.phase !== "Running") {
logger.logProjectInfo("Application pod is not running, the status is: " + resp.body.items[i].status.phase, projectID, projectName);
continue;
} else if (resp.body.items[i].metadata.deletionTimestamp == undefined) {
podName = resp.body.items[i].metadata.name;
}
}
}
info.podName = podName;
if (podName) {
logger.logProjectInfo("Found the pod while looking up service information for project.", projectID, projectName);
logger.logProjectInfo("Pod name: " + podName, projectID, projectName);
} else {
logger.logProjectInfo("Pod name was not found while looking up service information for project.", projectID, projectName);
}
} catch (err) {
logger.logProjectError("Failed to get the pod name for: " + path.basename(projectLocation), projectID, projectName);
logger.logProjectError(err, projectID, projectName);
}
/*
* IMPORTANT: If the pod name is undefined that means the helm install failed so there isn't actually a release so we should return
* at this point since it's possible that another release can have a deployment with the same name and we'd end up sending the port/ip
* information for the wrong deployment and marking the application as running.
*/
if (info.podName === undefined) {
return info;
}
// Get the internal and exposed ports
try {
const internalPorts: Array<string> = [];
const exposedPorts: Array<string> = [];
const appPorts = projectInfo.appPorts;
let resp: any = undefined;
let deploymentconfig: string = undefined;
if (projectInfo.projectType == "odo") {
resp = await k8sClient.api.v1.namespaces(KUBE_NAMESPACE).services.get();
const res: string[] = releaseLabel.split("=");
deploymentconfig = res[1].trim();
} else {
resp = await k8sClient.api.v1.namespaces(KUBE_NAMESPACE).services.get({ qs: { labelSelector: releaseLabel } });
}
// iterate through the available ports and store both the internal and exposed ports
for ( let i = 0 ; i < resp.body.items.length ; i++ ) {
if (projectInfo.projectType != "odo" || (projectInfo.projectType == "odo" && resp.body.items[i].spec.selector.deploymentconfig == deploymentconfig)) {
info.serviceName = resp.body.items[i].metadata.name;
for (let j = 0; j < resp.body.items[i].spec.ports.length; j++) {
internalPorts.push(String(resp.body.items[i].spec.ports[j].targetPort));
exposedPorts.push(String(resp.body.items[i].spec.ports[j].nodePort));
}
}
}
info.podPorts = internalPorts;
let appPortMatched = false;
// iterate through the list of ports recieved to match with the internal ports
for (let i = 0; i < appPorts.length; i++) {
const port = appPorts[i];
// check if the appPort recieved matches any of the available ports
if (internalPorts.includes(port)) {
// set the internal port to be the matched appPort
info.internalPort = port;
const indexOfPort = internalPorts.indexOf(port);
// set the exposed port that matches the matched internal port
info.exposedPort = exposedPorts[indexOfPort];
// set the app port to be matched only if the exposed port has been set
appPortMatched = info.exposedPort ? true : false;
// break out of the loop if the port has been assigned
break;
}
}
// if the app ports didn't match, assign them to the first available port
if (!appPortMatched) {
info.internalPort = internalPorts[0];
info.exposedPort = exposedPorts[0];
}
// For Che on K8s, we are now using the service name for the application ping hostname
// Check projectUtil.isApplicationUp() for relevant logic
// log the service name/hostname
logger.logProjectInfo("IP address info: " + info.serviceName, projectID, projectName);
// log internalPorts and exposedPorts
logger.logProjectInfo("Internal port: " + info.internalPort, projectInfo.projectID, projectInfo.projectName);
logger.logProjectInfo("Exposed port: " + info.exposedPort, projectInfo.projectID, projectInfo.projectName);
// log podPorts
logger.logProjectInfo("All the exposed pod ports: " + JSON.stringify(info.podPorts), projectInfo.projectID, projectInfo.projectName);
} catch (err) {
logger.logProjectError("Failed to get the service internal/exposed port for project: " + projectName, projectID, projectName);
logger.logProjectError(err, projectID, projectName);
}
return info;
}
/**
* @function
* @description Check to see if container is active.
*
* @param containerName <Required | String> - The kube container name.
*
* @returns Promise<any>
*/
export async function isContainerActive(containerName: string, projectInfo?: ProjectInfo): Promise<any> {
try {
let releaseLabel = "release=" + containerName;
if (projectInfo.projectType == "odo") {
const componentName = path.basename(projectInfo.location);
const projectHandler = await projectExtensions.getProjectHandler(projectInfo);
const appName = await projectHandler.getAppName(projectInfo.projectID);
releaseLabel = "deploymentconfig=" + "cw-" + componentName + "-" + appName;
}
let containerState = {state: ContainerStates.containerNotFound};
// We are getting the list of pods by the release label
const resp = await k8sClient.api.v1.namespaces(KUBE_NAMESPACE).pods.get({ qs: { labelSelector: releaseLabel } });
const podInfo = resp.body;
if (podInfo && podInfo.items && (Object.keys(podInfo.items).length > 0)) {
for (const pod of podInfo.items) {
if (pod.status && pod.status.phase) {
const podPhase = pod.status.phase;
// Pod phases are Pending, Running, Succeeded (terminated in success),
// Failed, Unknown
if (podPhase !== "Pending" && podPhase !== "Running") {
continue;
}
}
if (pod.status && pod.status.containerStatuses &&
Object.keys(pod.status.containerStatuses).length > 0) {
for (const containerStatus of pod.status.containerStatuses) {
// Container state entries are running, terminated, waiting
// Container waiting reasons are: ContainerCreating, CrashLoopBackOff, ErrImagePull, ImagePullBackOff
if (containerStatus.state && containerStatus.state.running) {
containerState = {state: ContainerStates.containerActive};
} else if (containerStatus.state && containerStatus.state.waiting &&
containerStatus.state.waiting.reason &&
containerStatus.state.waiting.reason === "ContainerCreating") {
containerState = {state: ContainerStates.containerStarting};
} else {
// No active pods or containers
containerState = {state: ContainerStates.containerStopped};
}
}
}
}
} else {
containerState = {state: ContainerStates.containerNotFound};
}
return containerState;
} catch (err) {
const msg = "Error getting container status: " + err;
logger.logError(msg);
return {error: err};
}
}
/**
* @function
* @description Remove the project's Helm release
*
* @param projectID <Required | String> - An alphanumeric identifier for a project.
* @param deploymentName <Required | String> - The name of the release.
*
* @returns Promise<ProcessResult>
*/
export async function deleteHelmRelease(projectID: string, releaseName: string): Promise<ProcessResult> {
const deleteRelease: string[] = ["delete", "--purge", releaseName];
let response: ProcessResult;
// Remove deployment
try {
logger.logProjectInfo("Removing Helm release", projectID);
response = await runHelmCommand(projectID, deleteRelease);
} catch (err) {
logger.logProjectError("Error removing Helm release", projectID);
logger.logProjectError(err, projectID);
throw err;
}
return response;
}
/**
* @function
* @description Print helm status
*
* @param projectID <Required | String> - An alphanumeric identifier for a project.
* @param releaseName <Required | String> - The name of the deployment.
*
* @returns Promise<void>
*/
export async function printHelmStatus(projectID: string, releaseName: string): Promise<void> {
const helmStatus: string[] = ["status", releaseName];
let response: ProcessResult;
// Print the release
try {
logger.logProjectInfo("Printing Helm release", projectID);
response = await runHelmCommand(projectID, helmStatus);
logger.logProjectInfo(response.stdout, projectID);
} catch (err) {
logger.logProjectError("Error getting the Helm release", projectID);
logger.logProjectError(err, projectID);
}
}
/**
* @function
* @description Install helm chart.
*
* @param projectID <Required | String> - An alphanumeric identifier for a project.
* @param deploymentName <Required | String> - The name of the deployment.
* @param chartLocation <Required | String> - The location of the chart file.
*
* @returns Promise<ProcessResult>
*/
export async function installChart(projectID: string, deploymentName: string, chartLocation: string, deploymentRegistry: string): Promise<ProcessResult> {
const installDeployment: string[] = ["upgrade", "--install", deploymentName, "--recreate-pods", "--values=/file-watcher/scripts/override-values.yaml", "--set", "image.repository=" + deploymentRegistry + "/" + deploymentName, chartLocation];
let response: ProcessResult;
// Install deployment
try {
logger.logProjectInfo("Installing helm chart", projectID);
response = await runHelmCommand(projectID, installDeployment);
} catch (err) {
logger.logProjectError("Error installing helm chart", projectID);
logger.logProjectError(err, projectID);
throw err;
}
return response;
}
/**
* @function
* @description Run a helm command.
*
* @param projectID <Required | String> - An alphanumeric identifier for a project.
* @param args <Required | String[]> - List of args to pass to the helm command.
*/
async function runHelmCommand(projectID: string, args: string[]): Promise<ProcessResult> {
try {
logger.logProjectInfo("Running helm command: helm " + args, projectID);
return await processManager.spawnDetachedAsync(projectID, "helm", args, {});
} catch (err) {
throw err;
}
}