generated from kyma-project/template-repository
-
Notifications
You must be signed in to change notification settings - Fork 12
/
validateExistingMountTargets.go
76 lines (67 loc) · 3.21 KB
/
validateExistingMountTargets.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
package nfsinstance
import (
"context"
"fmt"
efsTypes "github.com/aws/aws-sdk-go-v2/service/efs/types"
"github.com/elliotchance/pie/v2"
cloudresourcesv1beta1 "github.com/kyma-project/cloud-manager/api/cloud-control/v1beta1"
"github.com/kyma-project/cloud-manager/pkg/composed"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
)
// validateExistingMountTargets validates if there are any mount targets referring to a subnet not
// created by the IpRange. If any such mount target is found the NfsInstance status is set to error
// state and reconciliation stopped. All this is due to the fact that AWS EFS API allows only
// one mount target per availability zone. If we keep trying to create a mount target in the
// IpRange subnet (in some zone) and another mount target in same zone already exists we would get an error
// that can not be fixed by repeating the reconciliation loop. Finer kind of validation can be made
// by loading that subnet referred by the mount target and checking its zone, but following the idea
// that no one should mess with cloud resources this operator created and the fact there's a small number
// of zones and we want to create a mount target in each, most probably that foreign subnet is occupying
// a zone we have IpRange's subnet in. So to fail quickly, as soon as we detect a mount target with
// non IpRange subnet we will put the object in the failed state
func validateExistingMountTargets(ctx context.Context, st composed.State) (error, context.Context) {
logger := composed.LoggerFromCtx(ctx)
state := st.(*State)
var invalidMountTargets []efsTypes.MountTargetDescription
for _, mt := range state.mountTargets {
x := state.IpRange().Status.Subnets.SubnetById(pointer.StringDeref(mt.SubnetId, ""))
if x == nil {
invalidMountTargets = append(invalidMountTargets, mt)
}
}
if len(invalidMountTargets) == 0 {
return nil, nil
}
logger.WithValues(
"invalidMountTargets",
fmt.Sprintf("%v", pie.Map(invalidMountTargets, func(mt efsTypes.MountTargetDescription) string {
return fmt.Sprintf(
"(%s %s %s)",
pointer.StringDeref(mt.MountTargetId, ""),
pointer.StringDeref(mt.AvailabilityZoneName, ""),
pointer.StringDeref(mt.SubnetId, ""),
)
})),
).
Info("Invalid mount targets")
meta.SetStatusCondition(state.ObjAsNfsInstance().Conditions(), metav1.Condition{
Type: cloudresourcesv1beta1.ConditionTypeError,
Status: "True",
Reason: cloudresourcesv1beta1.ReasonInvalidMountTargetsAlreadyExist,
Message: fmt.Sprintf("Invalid mount targets already exist: %v", pie.Map(invalidMountTargets, func(mt efsTypes.MountTargetDescription) string {
return pointer.StringDeref(mt.MountTargetId, "")
})),
})
err := state.UpdateObjStatus(ctx)
if err != nil {
return composed.LogErrorAndReturn(err, "Error updating NfsInstance status conditions after invalid mount targets found", composed.StopWithRequeue, ctx)
}
state.ObjAsNfsInstance().Status.State = cloudresourcesv1beta1.ErrorState
err = state.UpdateObj(ctx)
if err != nil {
return composed.LogErrorAndReturn(err, "Error updating NfsInstance status state after invalid mount targets found", composed.StopWithRequeue, ctx)
}
return composed.StopAndForget, nil
}