-
Notifications
You must be signed in to change notification settings - Fork 23
/
utils.go
308 lines (284 loc) · 10.5 KB
/
utils.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
package utils
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"github.com/ghodss/yaml"
"github.com/hashicorp/go-multierror"
"github.com/xeipuuv/gojsonschema"
appsv1 "k8s.io/api/apps/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
languagev1 "github.com/fluxninja/aperture/v2/api/gen/proto/go/aperture/policy/language/v1"
"github.com/fluxninja/aperture/v2/pkg/log"
"github.com/fluxninja/aperture/v2/pkg/policies/controlplane"
"github.com/fluxninja/aperture/v2/pkg/policies/controlplane/circuitfactory"
"github.com/fluxninja/aperture/v2/pkg/policies/controlplane/runtime"
)
// GenerateDotFile generates a DOT file from the given circuit with the specified depth.
// The depth determines how many levels of components in the tree should be expanded in the graph.
// If maxDepth is set to -1, the function will expand components up to the maximum possible depth.
//
// Parameters:
// - circuit: A pointer to the circuitfactory.Circuit object to be used for generating the DOT file.
// - dotFilePath: The file path where the generated DOT file should be saved.
// - maxDepth: The maximum depth the graph should be expanded to.
// If set to -1, the function will expand components up to the maximum possible depth.
//
// Returns:
// - An error if any issues occur during the file creation or writing process, otherwise nil.
//
// Example usage:
//
// err := GenerateDotFile(circuit, "output.dot", 3)
// // This will generate a DOT file with components expanded up to a depth of 3.
//
// err := GenerateDotFile(circuit, "output.dot", -1)
// // This will generate a DOT file with components expanded up to the maximum possible depth.
func GenerateDotFile(circuit *circuitfactory.Circuit, dotFilePath string, depth int) error {
graph, err := circuit.Tree.GetSubGraph(runtime.NewComponentID(runtime.RootComponentID), depth)
if err != nil {
return err
}
d := circuitfactory.DOTGraph(graph)
f, err := os.Create(dotFilePath)
if err != nil {
log.Error().Err(err).Msg("error creating file")
return err
}
defer f.Close()
_, err = f.WriteString(d)
if err != nil {
log.Error().Err(err).Msg("error writing to file")
return err
}
return nil
}
// GenerateMermaidFile generates a Mermaid file from the given circuit with the specified depth.
// The depth determines how many levels of components in the tree should be expanded in the graph.
// If maxDepth is set to -1, the function will expand components up to the maximum possible depth.
//
// Parameters:
// - circuit: A pointer to the circuitfactory.Circuit object to be used for generating the Mermaid file.
// - mermaidFile: The file path where the generated Mermaid file should be saved.
// - maxDepth: The maximum depth the graph should be expanded to.
// If set to -1, the function will expand components up to the maximum possible depth.
//
// Returns:
// - An error if any issues occur during the file creation or writing process, otherwise nil.
//
// Example usage:
//
// err := GenerateMermaidFile(circuit, "output.mmd", 3)
// // This will generate a Mermaid file with components expanded up to a depth of 3.
//
// err := GenerateMermaidFile(circuit, "output.mmd", -1)
// // This will generate a Mermaid file with components expanded up to the maximum possible depth.
func GenerateMermaidFile(circuit *circuitfactory.Circuit, mermaidFile string, depth int) error {
graph, err := circuit.Tree.GetSubGraph(runtime.NewComponentID(runtime.RootComponentID), depth)
if err != nil {
return err
}
m := circuitfactory.MermaidGraph(graph)
f, err := os.Create(mermaidFile)
if err != nil {
log.Error().Err(err).Msg("error creating file")
return err
}
defer f.Close()
_, err = f.WriteString(m)
if err != nil {
log.Error().Err(err).Msg("error writing to file")
return err
}
return nil
}
// CompilePolicy compiles the policy and returns the circuit.
func CompilePolicy(name string, policyBytes []byte) (*circuitfactory.Circuit, *languagev1.Policy, error) {
ctx := context.Background()
// FIXME This ValidateAndCompile function validates the policy as a whole –
// circuit, but also the other resource classifiers, fluxmeters. This
// command is called "circuit-compiler" though, so it is bit... surprising.
// If we compiled just a circuit, we could drop dependency on
// `controlplane` package.
circuit, policy, err := controlplane.ValidateAndCompile(ctx, name, policyBytes)
if err != nil {
return nil, nil, err
}
return circuit, policy, nil
}
// FetchPolicyFromCR extracts the spec key from a CR and saves it to a temp file.
func FetchPolicyFromCR(crPath string) (string, error) {
// extract spec key from CR and save it a temp file
// call compilePolicy with the temp file
// delete the temp file
crFile, err := os.ReadFile(crPath)
if err != nil {
log.Error().Err(err).Msg("failed to read CR file")
return "", err
}
// unmarshal yaml to map struct and extract spec key
var cr map[string]interface{}
err = yaml.Unmarshal(crFile, &cr)
if err != nil {
log.Error().Err(err).Msg("failed to unmarshal CR file")
return "", err
}
spec, ok := cr["spec"]
if !ok {
log.Error().Msg("failed to find spec key in CR file")
return "", err
}
// marshal spec to yaml
specYaml, err := yaml.Marshal(spec)
if err != nil {
log.Error().Err(err).Msg("failed to marshal spec key in CR file")
return "", err
}
// get filename from path
filename := filepath.Base(crPath)
// create temp file
tmpfile, err := os.CreateTemp("", filename)
if err != nil {
log.Error().Err(err).Msg("failed to create temp file")
return "", err
}
// write spec to temp file
_, err = tmpfile.Write(specYaml)
if err != nil {
log.Error().Err(err).Msg("failed to write to temp file")
return "", err
}
// close temp file
err = tmpfile.Close()
if err != nil {
log.Error().Err(err).Msg("failed to close temp file")
return "", err
}
return tmpfile.Name(), nil
}
// GetKubeConfig prepares Kubernetes config to connect with the cluster using provided or default kube config file location.
func GetKubeConfig(kubeConfig string) (*rest.Config, error) {
if kubeConfig == "" {
if kubeConfigEnv, exists := os.LookupEnv("KUBECONFIG"); exists {
kubeConfig = kubeConfigEnv
} else {
homeDir, err := os.UserHomeDir()
if err != nil {
return nil, err
}
kubeConfig = filepath.Join(homeDir, ".kube", "config")
}
log.Trace().Msgf("Using Kubernetes config '%s'", kubeConfig)
}
restConfig, err := clientcmd.BuildConfigFromFlags("", kubeConfig)
if err != nil {
return nil, fmt.Errorf("failed to connect to Kubernetes. Error: %s", err.Error())
}
kubeRestConfig := restConfig
return kubeRestConfig, nil
}
// ValidateWithJSONSchema validates the given document (YAML) against the given JSON schema.
func ValidateWithJSONSchema(rootSchema string, schemas []string, documentFile string) error {
// load schema
schemaLoader := gojsonschema.NewSchemaLoader()
// check whether schemas exist
for _, schema := range schemas {
if _, err := os.Stat(schema); os.IsNotExist(err) {
log.Warn().Msgf("Schema %s does not exist. Skipping validation", schema)
return nil
}
err := schemaLoader.AddSchemas(gojsonschema.NewReferenceLoader("file://" + schema))
if err != nil {
log.Error().Err(err).Msgf("Failed to add schema %s", schema)
return err
}
}
if _, err := os.Stat(rootSchema); os.IsNotExist(err) {
log.Warn().Msgf("Schema %s does not exist. Skipping validation", rootSchema)
return nil
}
schema, err := schemaLoader.Compile(gojsonschema.NewReferenceLoader("file://" + rootSchema))
if err != nil {
log.Error().Err(err).Msgf("Failed to compile schema %s", rootSchema)
return err
}
// marshal documentFile to json and load it
documentYamlBytes, err := os.ReadFile(documentFile)
if err != nil {
log.Error().Err(err).Msgf("Failed to read document %s", documentFile)
return err
}
documentJSON, err := yaml.YAMLToJSON(documentYamlBytes)
if err != nil {
log.Error().Err(err).Msgf("Failed to convert document %s to JSON", documentFile)
return err
}
documentLoader := gojsonschema.NewBytesLoader(documentJSON)
// validate document
result, err := schema.Validate(documentLoader)
if err != nil {
log.Error().Err(err).Msgf("Failed to validate document %s", documentFile)
return err
}
if !result.Valid() {
merr := fmt.Errorf("the document %s is not valid", documentFile)
for _, desc := range result.Errors() {
errorMessage := fmt.Sprintf("- %s", desc)
merr = multierror.Append(merr, errors.New(errorMessage))
log.Error().Err(err).Msg(errorMessage)
}
return merr
}
return nil
}
// IsBlueprintDeprecated whether the policyDir is deprecated
// it reads metadata.yaml and checks for deprecated key
// the value of that key is the deprecation message.
func IsBlueprintDeprecated(policyDir string) (bool, string) {
metadataPath := filepath.Join(policyDir, "metadata.yaml")
metadataFile, err := os.ReadFile(metadataPath)
if err != nil {
log.Warn().Err(err).Msgf("failed to read metadata.yaml file in %s", policyDir)
return false, ""
}
var metadata map[string]interface{}
err = yaml.Unmarshal(metadataFile, &metadata)
if err != nil {
log.Warn().Err(err).Msgf("failed to unmarshal metadata.yaml file in %s", policyDir)
return false, ""
}
deprecated, ok := metadata["deprecated"]
if !ok {
return false, ""
}
return true, deprecated.(string)
}
// GetControllerDeployment returns the deployment of the Aperture Controller.
func GetControllerDeployment(kubeRestConfig *rest.Config, namespace string) (*appsv1.Deployment, error) {
clientSet, err := kubernetes.NewForConfig(kubeRestConfig)
if err != nil {
return nil, fmt.Errorf("failed to create new ClientSet: %w", err)
}
deployment, err := clientSet.AppsV1().Deployments(namespace).List(context.Background(), metav1.ListOptions{
LabelSelector: labels.Set{"app.kubernetes.io/component": "aperture-controller"}.String(),
})
if err != nil {
if apierrors.IsNotFound(err) {
return nil, fmt.Errorf(
"no deployment with name 'aperture-controller' found on the Kubernetes cluster. The policy can be only applied in the namespace where the Aperture Controller is running")
}
return nil, fmt.Errorf("failed to fetch namespace of Aperture Controller in Kubernetes: %w", err)
}
if len(deployment.Items) != 1 {
return nil, errors.New("no deployment with name 'aperture-controller' found on the Kubernetes cluster. The policy can be only applied in the namespace where the Aperture Controller is running")
}
return &deployment.Items[0], nil
}