/
tf_controller_plan.go
152 lines (130 loc) · 5.08 KB
/
tf_controller_plan.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
package controllers
import (
"context"
"fmt"
eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1"
"github.com/weaveworks/tf-controller/api/planid"
infrav1 "github.com/weaveworks/tf-controller/api/v1alpha2"
"github.com/weaveworks/tf-controller/runner"
"google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
)
func (r *TerraformReconciler) shouldPlan(terraform infrav1.Terraform) bool {
// Please do not optimize this logic, as we'd like others to easily understand the logics behind this behaviour.
if terraform.Spec.Force {
return true
}
if terraform.Status.Plan.Pending == "" {
return true
} else if terraform.Status.Plan.Pending != "" {
return false
}
return false
}
func (r *TerraformReconciler) plan(ctx context.Context, terraform infrav1.Terraform, tfInstance string, runnerClient runner.RunnerClient, revision string) (infrav1.Terraform, error) {
log := ctrl.LoggerFrom(ctx)
log.Info("calling plan ...")
objectKey := types.NamespacedName{Namespace: terraform.Namespace, Name: terraform.Name}
terraform = infrav1.TerraformProgressing(terraform, "Terraform Planning")
if err := r.patchStatus(ctx, objectKey, terraform.Status); err != nil {
log.Error(err, "unable to update status before Terraform planning")
return terraform, err
}
const tfplanFilename = "tfplan"
planRequest := &runner.PlanRequest{
TfInstance: tfInstance,
Out: tfplanFilename,
Refresh: true, // be careful, refresh requires to be true by default
Targets: terraform.Spec.Targets,
}
// if backend is disabled completely, there will be no plan output file (req.Out = "")
if r.backendCompletelyDisable(terraform) {
planRequest.Out = ""
}
// check if destroy is set to true or
// the object is being deleted and DestroyResourcesOnDeletion is set to true
if terraform.Spec.Destroy || (!terraform.ObjectMeta.DeletionTimestamp.IsZero() && terraform.Spec.DestroyResourcesOnDeletion) {
log.Info("plan to destroy")
planRequest.Destroy = true
}
planReply, err := runnerClient.Plan(ctx, planRequest)
if err != nil {
eventSent := false
if st, ok := status.FromError(err); ok {
for _, detail := range st.Details() {
if reply, ok := detail.(*runner.PlanReply); ok {
msg := fmt.Sprintf("Plan error: State locked with Lock Identifier %s", reply.StateLockIdentifier)
r.event(ctx, terraform, revision, eventv1.EventSeverityError, msg, nil)
eventSent = true
terraform = infrav1.TerraformStateLocked(terraform, reply.StateLockIdentifier, fmt.Sprintf("Terraform Locked with Lock Identifier: %s", reply.StateLockIdentifier))
}
}
}
if eventSent == false {
msg := fmt.Sprintf("Plan error: %s", err.Error())
r.event(ctx, terraform, revision, eventv1.EventSeverityError, msg, nil)
}
err = fmt.Errorf("error running Plan: %s", err)
return infrav1.TerraformNotReady(
terraform,
revision,
infrav1.TFExecPlanFailedReason,
err.Error(),
), err
}
drifted := planReply.Drifted
log.Info(fmt.Sprintf("plan: %s, found drift: %v", planReply.Message, drifted))
// currently the PlanCreated flag is only used here to determine if the destroy plan is empty
if planRequest.Destroy && planReply.PlanCreated == false {
// A corner case
// If the destroy plan is empty, we should not call apply
terraform = infrav1.TerraformPlannedNoChanges(terraform, revision, "No objects need to be destroyed")
return terraform, nil
}
if shouldProcessPostPlanningWebhooks(terraform) {
log.Info("calling post planning webhooks ...")
terraform, err = r.processPostPlanningWebhooks(ctx, terraform, runnerClient, revision, tfInstance)
if err != nil {
log.Error(err, "failed during the process of post planning webhooks")
return infrav1.TerraformNotReady(
terraform,
revision,
infrav1.PostPlanningWebhookFailedReason,
err.Error(),
), err
}
}
saveTFPlanReply, err := runnerClient.SaveTFPlan(ctx, &runner.SaveTFPlanRequest{
TfInstance: tfInstance,
BackendCompletelyDisable: r.backendCompletelyDisable(terraform),
Name: terraform.Name,
Namespace: terraform.Namespace,
Uuid: string(terraform.GetUID()),
Revision: revision,
})
if err != nil {
err = fmt.Errorf("error saving plan secret: %s", err)
return infrav1.TerraformNotReady(
terraform,
revision,
infrav1.TFExecPlanFailedReason,
err.Error(),
), err
}
log.Info(fmt.Sprintf("save tfplan: %s", saveTFPlanReply.Message))
if drifted {
forceOrAutoApply := r.forceOrAutoApply(terraform)
// this is the manual mode, we fire the event to show how to apply the plan
if forceOrAutoApply == false {
planId := planid.GetPlanID(revision)
approveMessage := planid.GetApproveMessage(planId, "Plan generated")
msg := fmt.Sprintf("Planned.\n%s", approveMessage)
r.event(ctx, terraform, revision, eventv1.EventSeverityInfo, msg, nil)
}
terraform = infrav1.TerraformPlannedWithChanges(terraform, revision, forceOrAutoApply, "Plan generated")
} else {
terraform = infrav1.TerraformPlannedNoChanges(terraform, revision, "Plan no changes")
}
return terraform, nil
}