This repository has been archived by the owner on May 9, 2020. It is now read-only.
/
local_live_log.js
199 lines (170 loc) · 5.86 KB
/
local_live_log.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
/**
This module handles the creation of the "taskcluster" proxy container which
allows tasks to talk directly to taskcluster services over a http proxy which
grants a particular permission level based on the task scopes.
*/
import Debug from 'debug';
import http from 'http';
import Promise from 'promise';
import slugid from 'slugid';
import URL from 'url';
import BulkLog from './bulk_log';
import { pullImageStreamTo } from '../pull_image_to_stream';
import waitForEvent from '../wait_for_event';
import waitForPort from '../wait_for_port';
const ARTIFACT_NAME = 'public/logs/live.log';
const BACKING_ARTIFACT_NAME = 'public/logs/live_backing.log';
// Maximum time to wait for the put socket to become available.
const INIT_TIMEOUT = 2000;
let debug = Debug('taskcluster-docker-worker:features:local_live_log');
// Alias used to link the proxy.
export default class TaskclusterLogs {
constructor() {
/**
Docker container used in the linking process.
*/
this.container = null;
this.token = slugid.v4();
}
async link(task) {
debug('create live log container...')
// ensure we have a bulk log backing stuff...
this.bulkLog = new BulkLog(BACKING_ARTIFACT_NAME);
await this.bulkLog.created(task);
let docker = task.runtime.docker;
// Image name for the proxy container.
let image = task.runtime.taskclusterLogImage;
await pullImageStreamTo(docker, image, process.stdout);
let envs = [];
if (process.env.DEBUG) {
envs.push('DEBUG=' + process.env.DEBUG);
}
// create the container.
let createConfig = {
Image: image,
Tty: false,
Env: [
"DEBUG=*",
`ACCESS_TOKEN=${this.token}`
],
//Env: envs,
AttachStdin: false,
AttachStdout: true,
AttachStderr: true,
ExposedPorts: {
'60023/tcp': {}
},
HostConfig: {}
};
if (task.runtime.logging.secureLiveLogging) {
createConfig.Env.push('SERVER_CRT_FILE=/etc/sslcert.crt')
createConfig.Env.push('SERVER_KEY_FILE=/etc/sslkey.key')
createConfig.HostConfig.Binds = [
`${task.runtime.ssl.certificate}:/etc/sslcert.crt:ro`,
`${task.runtime.ssl.key}:/etc/sslkey.key:ro`
]
}
this.container = await docker.createContainer(createConfig);
// Terrible hack to get container promise proxy.
this.container = docker.getContainer(this.container.id);
// TODO: In theory the output of the proxy might be useful consider logging
// this somehow.
await this.container.start({
// bind the reading side to the host so we can expose it to the world...
PortBindings: {
"60023/tcp": [{ HostPort: "0" }]
}
});
let inspect = await this.container.inspect();
try {
// wait for the initial server response...
await waitForPort(
inspect.NetworkSettings.IPAddress, '60022', INIT_TIMEOUT
);
} catch (e) {
task.runtime.log('Failed to connect to live log server', {
taskId: task.status.taskId,
runId: task.runId
});
// The killed method below will handle cleanup of resources...
return
}
// Log PUT url is only available on the host itself
let putUrl = `http:\/\/${inspect.NetworkSettings.IPAddress}:60022/log`;
let opts = URL.parse(putUrl);
opts.method = 'put';
this.stream = http.request(opts);
// Note here that even if the live logging server or upload fails we don't
// care too much since the backing log should always work... So we basically
// want to handle errors just enough so we don't accidentally fall over as
// we switch to the backing log.
this.stream.on('error', function(err) {
task.runtime.log('Error piping data to live log', {
err: err.toString(),
taskId: task.status.taskId,
runId: task.runId
});
task.stream.unpipe(this.stream);
}.bind(this));
task.stream.pipe(this.stream);
let publicPort = inspect.NetworkSettings.Ports['60023/tcp'][0].HostPort;
this.publicUrl = URL.format({
protocol: task.runtime.logging.secureLiveLogging ? 'https' : 'http',
hostname: task.hostname,
port: publicPort,
pathname: `log/${this.token}`
})
debug('live log running: putUrl', putUrl)
debug('live log running: publicUrl', this.publicUrl);
let queue = task.runtime.queue;
// Intentionally used the same expiration as the bulkLog
let expiration =
new Date(Date.now() + task.runtime.logging.bulkLogExpires);
// Create the redirect artifact...
await queue.createArtifact(
task.status.taskId,
task.runId,
ARTIFACT_NAME,
{
storageType: 'reference',
expires: expiration,
contentType: 'text/plain',
url: this.publicUrl
}
);
return {
links: [],
env: {}
};
}
async killed(task) {
debug('switching live log redirect to backing log...')
// Can't create artifacts for a task that's been canceled
if (task.isCanceled()) {
// Cleanup all references to the live logging server...
task.runtime.gc.removeContainer(this.container.id);
return;
}
// Note here we don't wait or care for the live logging to complete
// correctly we simply let it pass/fail to finish since we are going to kill
// the connection anyway...
let backingUrl = await this.bulkLog.killed(task);
// Switch references to the new log file on s3 rather then the local worker
// server...
let expiration =
new Date(Date.now() + task.runtime.logging.bulkLogExpires);
await task.runtime.queue.createArtifact(
task.status.taskId,
task.runId,
ARTIFACT_NAME,
{
storageType: 'reference',
expires: expiration,
contentType: 'text/plain',
url: backingUrl
}
);
// Cleanup all references to the live logging server...
task.runtime.gc.removeContainer(this.container.id);
}
}