forked from elastic/kibana
-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.js
110 lines (92 loc) · 3.85 KB
/
index.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
/*
* Licensed to Elasticsearch B.V. under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch B.V. licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import { combineLatest } from 'rxjs';
import { first, map } from 'rxjs/operators';
import healthCheck from './lib/health_check';
import { Cluster } from './lib/cluster';
import { createProxy } from './lib/create_proxy';
export default function (kibana) {
let defaultVars;
return new kibana.Plugin({
require: ['kibana'],
uiExports: { injectDefaultVars: () => defaultVars },
async init(server) {
// All methods that ES plugin exposes are synchronous so we should get the first
// value from all observables here to be able to synchronously return and create
// cluster clients afterwards.
const [esConfig, adminCluster, dataCluster] = await combineLatest(
server.newPlatform.setup.core.elasticsearch.legacy.config$,
server.newPlatform.setup.core.elasticsearch.adminClient$,
server.newPlatform.setup.core.elasticsearch.dataClient$
).pipe(
first(),
map(([config, adminClusterClient, dataClusterClient]) => [
config,
new Cluster(adminClusterClient),
new Cluster(dataClusterClient)
])
).toPromise();
defaultVars = {
esRequestTimeout: esConfig.requestTimeout.asMilliseconds(),
esShardTimeout: esConfig.shardTimeout.asMilliseconds(),
esApiVersion: esConfig.apiVersion,
};
const clusters = new Map();
server.expose('getCluster', (name) => {
if (name === 'admin') {
return adminCluster;
}
if (name === 'data') {
return dataCluster;
}
return clusters.get(name);
});
server.expose('createCluster', (name, clientConfig = {}) => {
// NOTE: Not having `admin` and `data` clients provided by the core in `clusters`
// map implicitly allows to create custom `data` and `admin` clients. This is
// allowed intentionally to support custom `admin` cluster client created by the
// x-pack/monitoring bulk uploader. We should forbid that as soon as monitoring
// bulk uploader is refactored, see https://github.com/elastic/kibana/issues/31934.
if (clusters.has(name)) {
throw new Error(`cluster '${name}' already exists`);
}
// We fill all the missing properties in the `clientConfig` using the default
// Elasticsearch config so that we don't depend on default values set and
// controlled by underlying Elasticsearch JS client.
const cluster = new Cluster(server.newPlatform.setup.core.elasticsearch.createClient(name, {
...esConfig,
...clientConfig,
}));
clusters.set(name, cluster);
return cluster;
});
server.events.on('stop', () => {
for (const cluster of clusters.values()) {
cluster.close();
}
clusters.clear();
});
createProxy(server);
// Set up the health check service and start it.
const { start, waitUntilReady } = healthCheck(this, server, esConfig.healthCheckDelay.asMilliseconds());
server.expose('waitUntilReady', waitUntilReady);
start();
}
});
}