/
cluster.rs
372 lines (341 loc) · 11.9 KB
/
cluster.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
use futures::stream::{self, StreamExt};
use shipcat_definitions::{BaseManifest, Config, Region, ShipcatConfig};
use shipcat_filebacked::SimpleManifest;
use super::{kubectl, Error, ErrorKind, Result};
use crate::{
apply, diff, helm,
kubeapi::ShipKube,
webhooks::{self, UpgradeState},
};
struct DiffResult {
name: String,
diff: Option<String>,
}
async fn diff_summary(svc: String, conf: &Config, reg: &Region) -> Result<DiffResult> {
let mut mf = shipcat_filebacked::load_manifest(&svc, &conf, ®)
.await?
.complete(®)
.await?;
// complete with version and uid from crd
let s = ShipKube::new(&mf).await?;
let crd = s.get().await?;
mf.version = mf.version.or(crd.spec.version);
mf.uid = crd.metadata.uid;
info!("diffing {}", mf.name);
let d = if let Some(kdiffunobfusc) = diff::template_vs_kubectl(&mf).await? {
let kubediff = diff::obfuscate_secrets(
kdiffunobfusc, // move this away quickly..
mf.get_secrets(),
);
let smalldiff = diff::minify(&kubediff);
Some(smalldiff)
} else {
None
};
Ok(DiffResult {
name: mf.name,
diff: d,
})
}
/// Diffs all services in a region
///
/// Helper that shells out to kubectl diff in parallel.
pub async fn mass_diff(conf: &Config, reg: &Region) -> Result<()> {
let svcs = shipcat_filebacked::available(conf, reg).await?;
assert!(conf.has_secrets());
let mut buffered = stream::iter(svcs)
.map(move |mf| diff_summary(mf.base.name, &conf, ®))
.buffer_unordered(10);
let mut errs = vec![];
let mut diffs = vec![];
while let Some(r) = buffered.next().await {
match r {
Ok(dr) => diffs.push(dr),
Err(e) => errs.push(e),
}
}
for dr in diffs {
if let Some(diff) = dr.diff {
info!("{} diff output:\n{}", dr.name, diff);
} else {
info!("{} unchanged", dr.name)
}
}
if !errs.is_empty() {
for e in &errs {
match e {
Error(ErrorKind::KubeError(e2), _) => {
warn!("{}", e2); // probably missing service (undiffeable)
}
Error(ErrorKind::MissingRollingVersion(svc), _) => {
// This only happens in rolling envs because version is mandatory in other envs
warn!("ignored missing service {}: {}", svc, e.description());
}
_ => {
error!("{}", e);
debug!("{:?}", e);
}
}
}
bail!("Failed to diff {} manifests", errs.len());
}
Ok(())
}
async fn check_summary(svc: String, skipped: &[String], conf: &Config, reg: &Region) -> Result<String> {
let mut mf = shipcat_filebacked::load_manifest(&svc, &conf, ®)
.await?
.stub(®)
.await?;
mf.version = mf.version.or(Some("latest".to_string()));
mf.uid = Some("FAKE-GUID".to_string());
info!("verifying template for {}", mf.name);
let tpl = helm::template(&mf, None).await?;
helm::template_check(&mf, reg, skipped, &tpl)?;
Ok(mf.name)
}
/// Verifies all populated templates for all services in a region
///
/// Helper that shells out to helm template in parallel.
pub async fn mass_template_verify(conf: &Config, reg: &Region, skipped: &[String]) -> Result<()> {
let svcs = shipcat_filebacked::available(conf, reg).await?;
let mut buffered = stream::iter(svcs)
.map(move |mf| check_summary(mf.base.name, &skipped, &conf, ®))
.buffer_unordered(100);
let (mut errs, mut passed): (Vec<Error>, Vec<_>) = (vec![], vec![]);
while let Some(r) = buffered.next().await {
match r {
Ok(p) => passed.push(p),
Err(e) => errs.push(e),
}
}
for n in passed {
info!("{} verified", n)
}
if !errs.is_empty() {
for e in &errs {
error!("{}", e);
debug!("{:?}", e);
}
bail!("Failed to verify templates for {} manifest", errs.len());
}
Ok(())
}
/// Apply CRDs in all region
pub async fn crd_install(reg: &Region) -> Result<()> {
use shipcat_definitions::gen_all_crds;
for crdef in gen_all_crds() {
kubectl::apply_resource(®.name, crdef, ®.namespace).await?;
}
Ok(())
}
/// Apply all services in the region
///
/// Helper that shells out to kubectl apply in parallel.
pub async fn mass_crd(conf_sec: &Config, conf_base: &Config, reg: &Region, n_workers: usize) -> Result<()> {
let svcs = shipcat_filebacked::available(conf_base, reg).await?;
crd_reconcile(svcs, conf_sec, conf_base, ®.name, n_workers).await
}
async fn crd_reconcile(
svcs: Vec<SimpleManifest>,
config_sec: &Config,
config_base: &Config,
region: &str,
n_workers: usize,
) -> Result<()> {
// NB: This needs config_base for base crd application
// shipcatconfig crd should not have secrets when applied
// shipcatmanifest_crd should not have secrets when applied (fine as long as manifest is not complete())
// but when doing the actual upgrade we need a config + region with secrets.
assert!(config_sec.has_secrets());
assert!(!config_base.has_secrets());
let region_sec = config_sec
.get_regions()
.iter()
.find(|r| r.name == region)
.unwrap()
.clone();
let region_base = config_base
.get_regions()
.iter()
.find(|r| r.name == region)
.unwrap()
.clone();
webhooks::reconcile_event(UpgradeState::Pending, ®ion_sec).await;
// Always reconcile the CRDs (definitions themselves) first
crd_install(®ion_base).await?;
// Make sure config can apply first
let applycfg: ShipcatConfig = if let Some(ref crs) = ®ion_base.customResources {
// special configtype detected - re-populating config object
Config::new(crs.shipcatConfig.clone(), ®ion_base.name).await?.0
} else {
config_base.clone()
}
.into();
kubectl::apply_resource(®ion_base.name, applycfg, ®ion_base.namespace).await?;
// Single instruction kubectl delete shipcat manifests .... of excess ones
let svc_names = svcs.iter().map(|x| x.base.name.to_string()).collect::<Vec<_>>();
let excess = kubectl::find_redundant_manifests(®ion_sec.namespace, &svc_names).await?;
if !excess.is_empty() {
info!("Will remove excess manifests: {:?}", excess);
}
for svc in excess {
// NB: doing deletion sequentially...
apply::delete(&svc, ®ion_sec, &config_sec).await?;
}
info!(
"Spawning {} parallel kube jobs with {} workers",
svcs.len(),
n_workers
);
webhooks::reconcile_event(UpgradeState::Started, ®ion_sec).await;
// then parallel apply the remaining ones
let force = std::env::var("SHIPCAT_MASS_RECONCILE").unwrap_or("0".into()) == "1";
let wait_for_rollout = true;
let conf = config_sec.clone();
let reg = region_sec.clone();
let mut buffered = stream::iter(svcs)
.map(|mf| {
debug!("Running CRD reconcile for {:?}", mf.base.name);
apply::apply(mf.base.name, force, ®, &conf, wait_for_rollout, None)
})
.buffer_unordered(n_workers);
let mut errs = vec![];
while let Some(r) = buffered.next().await {
if let Err(e) = r {
warn!("{}", e);
errs.push(e);
}
}
// propagate first non-ignorable error if exists
for e in errs {
match e {
Error(ErrorKind::MissingRollingVersion(svc), _) => {
// This only happens in rolling envs because version is mandatory in other envs
warn!(
"'{}' missing version for {} - please add or install",
svc, region_sec.name
);
}
// remaining cases not ignorable
_ => {
webhooks::reconcile_event(UpgradeState::Failed, ®ion_sec).await;
return Err(e);
}
}
}
// Otherwise we're good
webhooks::reconcile_event(UpgradeState::Completed, ®ion_sec).await;
Ok(())
}
/// Apply all vault policies in a region
///
/// Generates and writes policies direct to vault using their github team name as auth mappers.
/// Equivalent to:
///
/// ```pseudo
/// for team in config.teams:
/// shipcat get vaultpolicy {team.name} | vault policy write {team.admins} -
/// vault write auth/github/map/teams/{team.admins} value={team.admins}
/// ```
///
/// using vault setup for the vault specified in the `Region`.
/// If one vault is reused for all regions, this can be done once.
///
/// Requires a `vault login` outside of this command as a user who
/// is sufficiently elevated to write general policies.
pub async fn mass_vault(conf: &Config, reg: &Region, n_workers: usize) -> Result<()> {
let svcs = shipcat_filebacked::all(conf).await?;
vault_reconcile(svcs, conf, reg, n_workers).await
}
async fn vault_reconcile(
mfs: Vec<BaseManifest>,
conf: &Config,
region: &Region,
n_workers: usize,
) -> Result<()> {
let n_jobs = conf.owners.squads.len();
info!(
"Starting {} parallel vault jobs with {} workers",
n_jobs, n_workers
);
// then parallel apply the remaining ones
let reg = region.clone();
let mut buffered = stream::iter(conf.owners.clone().squads)
.map(|(name, squad)| {
let mfs = mfs.clone();
debug!("Running vault reconcile for {}", name);
vault_reconcile_worker(mfs, name, squad.github.admins, ®)
})
.buffer_unordered(n_workers);
let mut errs = vec![];
while let Some(r) = buffered.next().await {
if let Err(e) = r {
warn!("{}", e);
errs.push(e);
}
}
// propagate first non-ignorable error if exists
if let Some(e) = errs.into_iter().next() {
// no errors ignoreable atm
return Err(e);
}
Ok(())
}
async fn vault_reconcile_worker(
svcs: Vec<BaseManifest>,
team: String,
admins_opt: Option<String>,
reg: &Region,
) -> Result<()> {
use std::{fs::File, io::Write, path::Path};
if admins_opt.is_none() {
debug!("'{}' does not have a github admins team - ignoring", team);
return Ok(()); // nothing to do
};
let admins = admins_opt.unwrap();
let policy = reg
.vault
.make_policy(svcs, &team, reg.environment.clone())
.await?;
debug!("Vault policy: {}", policy);
// Write policy to a file named "{admins}-policy.hcl"
let pth = Path::new(".").join(format!("{}-policy.hcl", admins));
info!("Writing vault policy for {} to {}", admins, pth.display());
let mut f = File::create(&pth)?;
writeln!(f, "{}", policy)?;
// Write a vault policy with the name equal to the admin team:
use tokio::process::Command;
// vault write policy < file
{
info!("Applying vault policy for {} in {}", admins, reg.name);
let write_args = vec![
"policy".into(),
"write".into(),
admins.clone(),
format!("{}-policy.hcl", admins),
];
debug!("vault {}", write_args.join(" "));
let s = Command::new("vault").args(&write_args).status().await?;
if !s.success() {
bail!("Subprocess failure from vault: {}", s.code().unwrap_or(1001))
}
}
// vault write auth -> team
{
info!(
"Associating vault policy for {} with github team {} in {}",
team, admins, reg.name
);
let assoc_args = vec![
"write".into(),
format!("auth/github/map/teams/{}", admins),
format!("value={}", admins),
];
debug!("vault {}", assoc_args.join(" "));
let s = Command::new("vault").args(&assoc_args).status().await?;
if !s.success() {
bail!("Subprocess failure from vault: {}", s.code().unwrap_or(1001))
}
}
Ok(())
}