/
process.go
751 lines (636 loc) · 24.2 KB
/
process.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
package rmdata
/*
Copyright 2019 - 2022 Crunchy Data
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"errors"
"fmt"
"strings"
"time"
"github.com/crunchydata/postgres-operator/internal/config"
"github.com/crunchydata/postgres-operator/internal/util"
log "github.com/sirupsen/logrus"
kerror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
MAX_TRIES = 16
pgBackRestPathFormat = "/backrestrepo/%s"
pgBackRestRepoPVC = "%s-pgbr-repo"
pgDumpPVCPrefix = "backup-%s-pgdump"
pgDataPathFormat = "/pgdata/%s"
tablespacePathFormat = "/tablespaces/%s/%s"
// the tablespace on a replcia follows the pattern "<replicaName-tablespace-.."
tablespaceReplicaPVCPattern = "%s-tablespace-"
// the WAL PVC on a replcia follows the pattern "<replicaName-wal>"
walReplicaPVCPattern = "%s-wal"
// the following constants define the suffixes for the various configMaps created by Patroni
configConfigMapSuffix = "config"
leaderConfigMapSuffix = "leader"
failoverConfigMapSuffix = "failover"
syncConfigMapSuffix = "sync"
)
func Delete(request Request) {
log.Infof("rmdata.Process %v", request)
//the case of 'pgo scaledown'
if request.IsReplica {
log.Info("rmdata.Process scaledown replica use case")
removeReplicaServices(request)
pvcList, err := getReplicaPVC(request)
if err != nil {
log.Error(err)
}
//delete the pgreplica CRD
if err := request.Clientset.
CrunchydataV1().Pgreplicas(request.Namespace).
Delete(request.ReplicaName, &metav1.DeleteOptions{}); err != nil {
// if the pgreplica is not found, assume we're scaling down the original primary and
// continue with removing the replica
if !kerror.IsNotFound(err) {
log.Error(err)
} else {
log.Debug("pgreplica not found, assuming scale down of original primary")
}
}
err = removeReplica(request)
if err != nil {
log.Error(err)
}
if request.RemoveData {
removePVCs(pvcList, request)
}
//scale down is its own use case so we leave when done
return
}
if request.IsBackup {
log.Info("rmdata.Process backup use case")
//the case of removing a backup using `pgo delete backup`, only applies to
// "backup-type=pgdump"
removeBackupJobs(request)
removeLogicalBackupPVCs(request)
// this is the special case of removing an ad hoc backup removal, so we can
// exit here
return
}
log.Info("rmdata.Process cluster use case")
// first, clear out any of the scheduled jobs that may occur, as this would be
// executing asynchronously against any stale data
removeSchedules(request)
//the user had done something like:
//pgo delete cluster mycluster --delete-data
if request.RemoveData {
removeUserSecrets(request)
}
//handle the case of 'pgo delete cluster mycluster'
removeCluster(request)
if err := request.Clientset.
CrunchydataV1().Pgclusters(request.Namespace).
Delete(request.ClusterName, &metav1.DeleteOptions{}); err != nil {
log.Error(err)
}
removeServices(request)
removeAddons(request)
removePgreplicas(request)
removePgtasks(request)
removeClusterConfigmaps(request)
//removeClusterJobs(request)
if request.RemoveData {
if pvcList, err := getInstancePVCs(request); err != nil {
log.Error(err)
} else {
log.Debugf("rmdata pvc list: [%v]", pvcList)
removePVCs(pvcList, request)
}
}
// backups have to be the last thing we remove. We want to ensure that all
// the clusters (well, really, the primary) have stopped. This means that no
// more WAL archives are being pushed, and at this point it is safe for us to
// remove the pgBackRest repo if we have opted to remove all of the backups.
//
// Regardless of the choice the user made, we want to remove all of the
// backup jobs, as those take up space
removeBackupJobs(request)
// Now, even though it appears we are removing the pgBackRest repo here, we
// are **not** removing the physical data unless request.RemoveBackup is true.
// In that case, only the deployment/services for the pgBackRest repo are
// removed
removeBackrestRepo(request)
// now, check to see if the user wants the remainder of the physical data and
// PVCs to be removed
if request.RemoveBackup {
removeBackupSecrets(request)
removeAllBackupPVCs(request)
}
}
// removeBackRestRepo removes the pgBackRest repo that is associated with the
// PostgreSQL cluster
func removeBackrestRepo(request Request) {
deploymentName := fmt.Sprintf("%s-backrest-shared-repo", request.ClusterName)
log.Debugf("deleting the pgbackrest repo [%s]", deploymentName)
// now delete the deployment and services
deletePropagation := metav1.DeletePropagationForeground
err := request.Clientset.
AppsV1().Deployments(request.Namespace).
Delete(deploymentName, &metav1.DeleteOptions{PropagationPolicy: &deletePropagation})
if err != nil {
log.Error(err)
}
//delete the service for the backrest repo
err = request.Clientset.
CoreV1().Services(request.Namespace).
Delete(deploymentName, &metav1.DeleteOptions{})
if err != nil {
log.Error(err)
}
}
// removeAllBackupPVCs removes all of the PVCs associated with any kind of
// backup
func removeAllBackupPVCs(request Request) {
// first, ensure that logical backups are removed
removeLogicalBackupPVCs(request)
// finally, we will remove the pgBackRest repo PVC...or PVCs?
removePgBackRestRepoPVCs(request)
}
// removeBackupSecrets removes any secrets that are associated with backups
// for this cluster, in particular, the secret that is used by the pgBackRest
// repository that is available for this cluster.
func removeBackupSecrets(request Request) {
// first, derive the secrename of the pgBackRest repo, which is the
// "`clusterName`-`LABEL_BACKREST_REPO_SECRET`"
secretName := fmt.Sprintf("%s-%s",
request.ClusterName, config.LABEL_BACKREST_REPO_SECRET)
log.Debugf("removeBackupSecrets: %s", secretName)
// we can attempt to delete the secret directly without making any further
// API calls. Even if we did a "get", there could still be a race with some
// independent process (e.g. an external user) deleting the secret before we
// get to it. The main goal is to have the secret deleted
//
// we'll also check to see if there was an error, but if there is we'll only
// log the fact there was an error; this function is just a pass through
if err := request.Clientset.CoreV1().Secrets(request.Namespace).Delete(secretName, &metav1.DeleteOptions{}); err != nil {
log.Error(err)
}
// and done!
return
}
// removeClusterConfigmaps deletes the configmaps that are created for each
// cluster. The first two are created by Patroni when it initializes a new cluster:
// <cluster-name>-leader (stores data pertinent to the leader election process)
// <cluster-name>-config (stores global/cluster-wide configuration settings)
// Additionally, the Postgres Operator also creates a configMap for each cluster
// containing a default Patroni configuration file:
// <cluster-name>-pgha-config (stores a Patroni config file in YAML format)
func removeClusterConfigmaps(request Request) {
// Store the derived names of the three configmaps in an array
clusterConfigmaps := []string{
// first, derive the name of the PG HA default configmap, which is
// "`clusterName`-`LABEL_PGHA_CONFIGMAP`"
fmt.Sprintf("%s-%s", request.ClusterName, config.LABEL_PGHA_CONFIGMAP),
// next, the name of the leader configmap, which is
// "`clusterName`-leader"
fmt.Sprintf("%s-%s", request.ClusterName, leaderConfigMapSuffix),
// next, the name of the general configuration settings configmap, which is
// "`clusterName`-config"
fmt.Sprintf("%s-%s", request.ClusterName, configConfigMapSuffix),
// next, the name of the failover configmap, which is
// "`clusterName`-failover"
fmt.Sprintf("%s-%s", request.ClusterName, failoverConfigMapSuffix),
// next, if there is a synchronous replication configmap, clean that up
fmt.Sprintf("%s-%s", request.ClusterName, syncConfigMapSuffix),
// finally, if there is a pgbouncer, remove the pgbouncer configmap
util.GeneratePgBouncerConfigMapName(request.ClusterName),
}
// As with similar resources, we can attempt to delete the configmaps directly without
// making any further API calls since the goal is simply to delete the configmap. Race
// conditions are more or less unavoidable but should not cause any additional problems.
// We'll also check to see if there was an error, but if there is we'll only
// log the fact there was an error; this function is just a pass through
for _, cm := range clusterConfigmaps {
if err := request.Clientset.CoreV1().ConfigMaps(request.Namespace).Delete(cm, &metav1.DeleteOptions{}); err != nil && !kerror.IsNotFound(err) {
log.Error(err)
}
}
}
func removeClusterJobs(request Request) {
selector := config.LABEL_PG_CLUSTER + "=" + request.ClusterName
jobs, err := request.Clientset.
BatchV1().Jobs(request.Namespace).
List(metav1.ListOptions{LabelSelector: selector})
if err != nil {
log.Error(err)
return
}
for i := 0; i < len(jobs.Items); i++ {
deletePropagation := metav1.DeletePropagationForeground
err := request.Clientset.
BatchV1().Jobs(request.Namespace).
Delete(jobs.Items[i].Name, &metav1.DeleteOptions{PropagationPolicy: &deletePropagation})
if err != nil {
log.Error(err)
}
}
}
// removeCluster removes the cluster deployments EXCEPT for the pgBackRest repo
func removeCluster(request Request) {
// ensure we are deleting every deployment EXCEPT for the pgBackRest repo,
// which needs to happen in a separate step to ensure we clear out all the
// data
selector := fmt.Sprintf("%s=%s,%s!=true",
config.LABEL_PG_CLUSTER, request.ClusterName, config.LABEL_PGO_BACKREST_REPO)
deployments, err := request.Clientset.
AppsV1().Deployments(request.Namespace).
List(metav1.ListOptions{LabelSelector: selector})
// if there is an error here, return as we cannot iterate over the deployment
// list
if err != nil {
log.Error(err)
return
}
// iterate through each deployment and delete it
for _, d := range deployments.Items {
deletePropagation := metav1.DeletePropagationForeground
err := request.Clientset.
AppsV1().Deployments(request.Namespace).
Delete(d.Name, &metav1.DeleteOptions{PropagationPolicy: &deletePropagation})
if err != nil {
log.Error(err)
}
}
// this was here before...this looks like it ensures that deployments are
// deleted. the only thing I'm modifying is the selector
var completed bool
for i := 0; i < MAX_TRIES; i++ {
deployments, err := request.Clientset.
AppsV1().Deployments(request.Namespace).
List(metav1.ListOptions{LabelSelector: selector})
if err != nil {
log.Error(err)
}
if len(deployments.Items) > 0 {
log.Info("sleeping to wait for Deployments to fully terminate")
time.Sleep(time.Second * time.Duration(4))
} else {
completed = true
}
}
if !completed {
log.Error("could not terminate all cluster deployments")
}
}
func removeReplica(request Request) error {
deletePropagation := metav1.DeletePropagationForeground
err := request.Clientset.
AppsV1().Deployments(request.Namespace).
Delete(request.ReplicaName, &metav1.DeleteOptions{PropagationPolicy: &deletePropagation})
if err != nil {
log.Error(err)
return err
}
//wait for the deployment to go away fully
var completed bool
for i := 0; i < MAX_TRIES; i++ {
_, err = request.Clientset.
AppsV1().Deployments(request.Namespace).
Get(request.ReplicaName, metav1.GetOptions{})
if err == nil {
log.Info("sleeping to wait for Deployments to fully terminate")
time.Sleep(time.Second * time.Duration(4))
} else {
completed = true
break
}
}
if !completed {
return errors.New("could not delete replica deployment within max tries")
}
return nil
}
func removeUserSecrets(request Request) {
//get all that match pg-cluster=db
selector := config.LABEL_PG_CLUSTER + "=" + request.ClusterName
secrets, err := request.Clientset.
CoreV1().Secrets(request.Namespace).
List(metav1.ListOptions{LabelSelector: selector})
if err != nil {
log.Error(err)
return
}
for _, s := range secrets.Items {
if s.ObjectMeta.Labels[config.LABEL_PGO_BACKREST_REPO] == "" {
err := request.Clientset.CoreV1().Secrets(request.Namespace).Delete(s.ObjectMeta.Name, &metav1.DeleteOptions{})
if err != nil {
log.Error(err)
}
}
}
}
func removeAddons(request Request) {
//remove pgbouncer
pgbouncerDepName := request.ClusterName + "-pgbouncer"
deletePropagation := metav1.DeletePropagationForeground
_ = request.Clientset.
AppsV1().Deployments(request.Namespace).
Delete(pgbouncerDepName, &metav1.DeleteOptions{PropagationPolicy: &deletePropagation})
//delete the service name=<clustename>-pgbouncer
_ = request.Clientset.
CoreV1().Services(request.Namespace).
Delete(pgbouncerDepName, &metav1.DeleteOptions{})
}
func removeServices(request Request) {
//remove any service for this cluster
selector := config.LABEL_PG_CLUSTER + "=" + request.ClusterName
services, err := request.Clientset.
CoreV1().Services(request.Namespace).
List(metav1.ListOptions{LabelSelector: selector})
if err != nil {
log.Error(err)
return
}
for i := 0; i < len(services.Items); i++ {
err := request.Clientset.
CoreV1().Services(request.Namespace).
Delete(services.Items[i].Name, &metav1.DeleteOptions{})
if err != nil {
log.Error(err)
}
}
}
func removePgreplicas(request Request) {
//get a list of pgreplicas for this cluster
replicaList, err := request.Clientset.CrunchydataV1().Pgreplicas(request.Namespace).List(metav1.ListOptions{
LabelSelector: config.LABEL_PG_CLUSTER + "=" + request.ClusterName,
})
if err != nil {
log.Error(err)
return
}
log.Debugf("pgreplicas found len is %d\n", len(replicaList.Items))
for _, r := range replicaList.Items {
if err := request.Clientset.
CrunchydataV1().Pgreplicas(request.Namespace).
Delete(r.Spec.Name, &metav1.DeleteOptions{}); err != nil {
log.Warn(err)
}
}
}
func removePgtasks(request Request) {
//get a list of pgtasks for this cluster
taskList, err := request.Clientset.
CrunchydataV1().Pgtasks(request.Namespace).
List(metav1.ListOptions{LabelSelector: config.LABEL_PG_CLUSTER + "=" + request.ClusterName})
if err != nil {
log.Error(err)
return
}
log.Debugf("pgtasks to remove is %d\n", len(taskList.Items))
for _, r := range taskList.Items {
if err := request.Clientset.CrunchydataV1().Pgtasks(request.Namespace).Delete(r.Spec.Name, &metav1.DeleteOptions{}); err != nil {
log.Warn(err)
}
}
}
// getInstancePVCs gets all the PVCs that are associated with PostgreSQL
// instances (at least to the best of our knowledge)
func getInstancePVCs(request Request) ([]string, error) {
pvcList := make([]string, 0)
selector := fmt.Sprintf("%s=%s", config.LABEL_PG_CLUSTER, request.ClusterName)
pgDump, pgBackRest := fmt.Sprintf(pgDumpPVCPrefix, request.ClusterName),
fmt.Sprintf(pgBackRestRepoPVC, request.ClusterName)
log.Debugf("instance pvcs overall selector: [%s]", selector)
// get all of the PVCs to analyze (see the step below)
pvcs, err := request.Clientset.
CoreV1().PersistentVolumeClaims(request.Namespace).
List(metav1.ListOptions{LabelSelector: selector})
// if there is an error, return here and log the error in the calling function
if err != nil {
return pvcList, err
}
// ...this will be a bit janky.
//
// ...we are going to go through all of the PVCs that are associated with this
// cluster. We will then compare them against the names of the backup types
// of PVCs. If they do not match any of those names, then we will add them
// to the list.
//
// ...process of elimination until we tighten up the labeling
for _, pvc := range pvcs.Items {
pvcName := pvc.ObjectMeta.Name
log.Debugf("found pvc: [%s]", pvcName)
if strings.HasPrefix(pvcName, pgDump) || pvcName == pgBackRest {
log.Debug("skipping...")
continue
}
pvcList = append(pvcList, pvcName)
}
log.Debugf("instance pvcs found: [%v]", pvcList)
return pvcList, nil
}
//get the pvc for this replica deployment
func getReplicaPVC(request Request) ([]string, error) {
pvcList := make([]string, 0)
//at this point, the naming convention is useful
//and ClusterName is the replica deployment name
//when isReplica=true
pvcList = append(pvcList, request.ReplicaName)
// see if there are any tablespaces or WAL volumes assigned to this replica,
// and add them to the list.
//
// ...this is a bit janky, as we have to iterate through ALL the PVCs
// associated with this managed cluster, and pull out anyones that have a name
// with the pattern "<replicaName-tablespace>" or "<replicaName-wal>"
selector := fmt.Sprintf("%s=%s", config.LABEL_PG_CLUSTER, request.ClusterName)
// get all of the PVCs that are specific to this replica and remove them
pvcs, err := request.Clientset.
CoreV1().PersistentVolumeClaims(request.Namespace).
List(metav1.ListOptions{LabelSelector: selector})
// if there is an error, return here and log the error in the calling function
if err != nil {
return pvcList, err
}
// ...and where the fun begins
tablespaceReplicaPVCPrefix := fmt.Sprintf(tablespaceReplicaPVCPattern, request.ReplicaName)
walReplicaPVCName := fmt.Sprintf(walReplicaPVCPattern, request.ReplicaName)
// iterate over the PVC list and append the tablespace PVCs
for _, pvc := range pvcs.Items {
pvcName := pvc.ObjectMeta.Name
// if it does not start with the tablespace replica PVC pattern and does not equal the WAL
// PVC pattern then continue
if !(strings.HasPrefix(pvcName, tablespaceReplicaPVCPrefix) ||
pvcName == walReplicaPVCName) {
continue
}
log.Debugf("found pvc: [%s]", pvcName)
pvcList = append(pvcList, pvcName)
}
return pvcList, nil
}
func removePVCs(pvcList []string, request Request) error {
for _, p := range pvcList {
log.Infof("deleting pvc %s", p)
deletePropagation := metav1.DeletePropagationForeground
err := request.Clientset.
CoreV1().PersistentVolumeClaims(request.Namespace).
Delete(p, &metav1.DeleteOptions{PropagationPolicy: &deletePropagation})
if err != nil {
log.Error(err)
}
}
return nil
}
// removeBackupJobs removes any job associated with a backup. These include:
//
// - pgBackRest
// - pg_dump (logical)
func removeBackupJobs(request Request) {
// Some mild cleanup for this function...going to make a list of selectors
// for the different kinds of backup jobs so they can be deleted, but cannot
// do a full cleanup of this process just yet
selectors := []string{
// pgBackRest
fmt.Sprintf("%s=%s,%s=true", config.LABEL_PG_CLUSTER, request.ClusterName, config.LABEL_BACKREST_JOB),
// pg_dump
fmt.Sprintf("%s=%s,%s=true", config.LABEL_PG_CLUSTER, request.ClusterName, config.LABEL_BACKUP_TYPE_PGDUMP),
}
// iterate through each type of selector and attempt to get all of the jobs
// that are associated with it
for _, selector := range selectors {
log.Debugf("backup job selector: [%s]", selector)
// find all the jobs associated with this selector
jobs, err := request.Clientset.
BatchV1().Jobs(request.Namespace).
List(metav1.ListOptions{LabelSelector: selector})
if err != nil {
log.Error(err)
continue
}
// iterate through the list of jobs and attempt to delete them
for i := 0; i < len(jobs.Items); i++ {
deletePropagation := metav1.DeletePropagationForeground
err := request.Clientset.
BatchV1().Jobs(request.Namespace).
Delete(jobs.Items[i].Name, &metav1.DeleteOptions{PropagationPolicy: &deletePropagation})
if err != nil {
log.Error(err)
}
}
// ...ensure all the jobs are deleted
var completed bool
for i := 0; i < MAX_TRIES; i++ {
jobs, err := request.Clientset.
BatchV1().Jobs(request.Namespace).
List(metav1.ListOptions{LabelSelector: selector})
if len(jobs.Items) > 0 || err != nil {
log.Debug("sleeping to wait for backup jobs to fully terminate")
time.Sleep(time.Second * time.Duration(4))
} else {
completed = true
break
}
}
if !completed {
log.Error("could not remove all backup jobs for [%s]", selector)
}
}
}
// removeLogicalBackupPVCs removes the logical backups associated with a cluster
// this is an "all-or-nothing" solution: as right now it will only remove the
// PVC, it will remove **all** logical backups
//
// Additionally, as these backups are nota actually mounted anywhere, except
// during one-off jobs, we cannot perform a delete of the filesystem (i.e.
// "rm -rf" like in other commands). Well, we could...we could write a job to do
// this, but that will be saved for future work
func removeLogicalBackupPVCs(request Request) {
pvcList := make([]string, 0)
selector := fmt.Sprintf("%s=%s", config.LABEL_PG_CLUSTER, request.ClusterName)
dumpPrefix := fmt.Sprintf(pgDumpPVCPrefix, request.ClusterName)
// get all of the PVCs to analyze (see the step below)
pvcs, err := request.Clientset.
CoreV1().PersistentVolumeClaims(request.Namespace).
List(metav1.ListOptions{LabelSelector: selector})
if err != nil {
log.Error(err)
return
}
// Now iterate through all the PVCs to identify those that are for a logical backup and add
// them to the PVC list for deletion. This pattern matching will be utilized until better
// labeling is in place to uniquely identify logical backup PVCs.
for _, pvc := range pvcs.Items {
pvcName := pvc.GetName()
if !strings.HasPrefix(pvcName, dumpPrefix) {
continue
}
pvcList = append(pvcList, pvcName)
}
log.Debugf("logical backup pvcs found: [%v]", pvcList)
removePVCs(pvcList, request)
}
// removePgBackRestRepoPVCs removes any PVCs that are used by a pgBackRest repo
func removePgBackRestRepoPVCs(request Request) {
// there is only a single PVC for a pgBackRest repo, and it has a well-defined
// name
pvcName := fmt.Sprintf(pgBackRestRepoPVC, request.ClusterName)
log.Debugf("remove backrest pvc name [%s]", pvcName)
// make a simple of the PVCs that can be removed by the removePVC command
pvcList := []string{pvcName}
removePVCs(pvcList, request)
}
// removeReplicaServices removes the replica service if there is currently only a single replica
// in the cluster, i.e. if the last/final replica is being being removed with the current rmdata
// job. If more than one replica still exists, then no action is taken.
func removeReplicaServices(request Request) {
// selector in the format "pg-cluster=<cluster-name>,role=replica"
// which will grab any/all replicas
selector := fmt.Sprintf("%s=%s,%s=%s", config.LABEL_PG_CLUSTER, request.ClusterName,
config.LABEL_PGHA_ROLE, config.LABEL_PGHA_ROLE_REPLICA)
replicaList, err := request.Clientset.
CoreV1().Pods(request.Namespace).
List(metav1.ListOptions{LabelSelector: selector})
if err != nil {
log.Error(err)
return
}
switch len(replicaList.Items) {
case 0:
log.Error("no replicas found for this cluster")
return
case 1:
log.Debug("removing replica service when scaling down to 0 replicas")
err := request.Clientset.
CoreV1().Services(request.Namespace).
Delete(request.ClusterName+"-replica", &metav1.DeleteOptions{})
if err != nil {
log.Error(err)
return
}
}
log.Debug("more than one replica detected, replica service will not be deleted")
}
// removeSchedules removes any of the ConfigMap objects that were created to
// execute schedule tasks, such as backups
// As these are consistently labeled, we can leverage Kuernetes selectors to
// delete all of them
func removeSchedules(request Request) {
log.Debugf("removing schedules for '%s'", request.ClusterName)
// a ConfigMap used for the schedule uses the following label selector:
// crunchy-scheduler=true,<config.LABEL_PG_CLUSTER>=<request.ClusterName>
selector := fmt.Sprintf("crunchy-scheduler=true,%s=%s",
config.LABEL_PG_CLUSTER, request.ClusterName)
// run the query the deletes all of the scheduled configmaps
// if there is an error, log it, but continue on without making a big stink
err := request.Clientset.
CoreV1().ConfigMaps(request.Namespace).
DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector})
if err != nil {
log.Error(err)
}
}