forked from kubernetes-sigs/kind
/
createcontext.go
167 lines (137 loc) · 5.2 KB
/
createcontext.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package create
import (
"fmt"
"os"
"strings"
"time"
log "github.com/sirupsen/logrus"
"sigs.k8s.io/kind/pkg/cluster/config"
"sigs.k8s.io/kind/pkg/cluster/internal/meta"
"sigs.k8s.io/kind/pkg/cluster/nodes"
"sigs.k8s.io/kind/pkg/docker"
logutil "sigs.k8s.io/kind/pkg/log"
)
// Context is a superset of cluster.Context implementing helpers internal to Context.Create()
type Context struct {
*meta.ClusterMeta
// other fields
Status *logutil.Status
Config *config.Config
*DerivedConfig
Retain bool // if we should retain nodes after failing to create.
WaitForReady time.Duration // Wait for the control plane node to be ready.
}
// Exec actions on kubernetes-in-docker cluster
// TODO(bentheelder): refactor this further
// Actions are repetitive, high level abstractions/workflows composed
// by one or more lower level tasks, that automatically adapt to the
// current cluster topology
func (cc *Context) Exec(nodeList map[string]*nodes.Node, actions []string, wait time.Duration) error {
// init the exec context and logging
ec := &execContext{
Context: cc,
nodes: nodeList,
waitForReady: wait,
}
ec.status = logutil.NewStatus(os.Stdout)
ec.status.MaybeWrapLogrus(log.StandardLogger())
defer ec.status.End(false)
// Create an ExecutionPlan that applies the given actions to the
// topology defined in the config
executionPlan, err := newExecutionPlan(ec.DerivedConfig, actions)
if err != nil {
return err
}
// Executes all the selected action
for _, plannedTask := range executionPlan {
ec.status.Start(fmt.Sprintf("[%s] %s", plannedTask.Node.Name, plannedTask.Task.Description))
err := plannedTask.Task.Run(ec, plannedTask.Node)
if err != nil {
// in case of error, the execution plan is halted
log.Error(err)
return err
}
}
ec.status.End(true)
return nil
}
// EnsureNodeImages ensures that the node images used by the create
// configuration are present
func (cc *Context) EnsureNodeImages() {
var images = map[string]bool{}
// For all the nodes defined in the `kind` config
for _, configNode := range cc.AllReplicas() {
if _, ok := images[configNode.Image]; ok {
continue
}
// prints user friendly message
image := configNode.Image
if strings.Contains(image, "@sha256:") {
image = strings.Split(image, "@sha256:")[0]
}
cc.Status.Start(fmt.Sprintf("Ensuring node image (%s) 🖼", image))
// attempt to explicitly pull the image if it doesn't exist locally
// we don't care if this errors, we'll still try to run which also pulls
_, _ = docker.PullIfNotPresent(configNode.Image, 4)
// marks the images as already pulled
images[configNode.Image] = true
}
}
// ProvisionNodes takes care of creating all the containers
// that will host `kind` nodes
func (cc *Context) ProvisionNodes() (nodeList map[string]*nodes.Node, err error) {
nodeList = map[string]*nodes.Node{}
// For all the nodes defined in the `kind` config
for _, configNode := range cc.AllReplicas() {
cc.Status.Start(fmt.Sprintf("[%s] Creating node container 📦", configNode.Name))
// create the node into a container (docker run, but it is paused, see createNode)
var name = fmt.Sprintf("kind-%s-%s", cc.Name(), configNode.Name)
var node *nodes.Node
switch configNode.Role {
case config.ControlPlaneRole:
node, err = nodes.CreateControlPlaneNode(name, configNode.Image, cc.ClusterLabel())
case config.WorkerRole:
node, err = nodes.CreateWorkerNode(name, configNode.Image, cc.ClusterLabel())
}
if err != nil {
return nodeList, err
}
nodeList[configNode.Name] = node
cc.Status.Start(fmt.Sprintf("[%s] Fixing mounts 🗻", configNode.Name))
// we need to change a few mounts once we have the container
// we'd do this ahead of time if we could, but --privileged implies things
// that don't seem to be configurable, and we need that flag
if err := node.FixMounts(); err != nil {
// TODO(bentheelder): logging here
return nodeList, err
}
cc.Status.Start(fmt.Sprintf("[%s] Starting systemd 🖥", configNode.Name))
// signal the node container entrypoint to continue booting into systemd
if err := node.SignalStart(); err != nil {
// TODO(bentheelder): logging here
return nodeList, err
}
cc.Status.Start(fmt.Sprintf("[%s] Waiting for docker to be ready 🐋", configNode.Name))
// wait for docker to be ready
if !node.WaitForDocker(time.Now().Add(time.Second * 30)) {
// TODO(bentheelder): logging here
return nodeList, fmt.Errorf("timed out waiting for docker to be ready on node")
}
// load the docker image artifacts into the docker daemon
cc.Status.Start(fmt.Sprintf("[%s] Pre-loading images 🐋", configNode.Name))
node.LoadImages()
}
return nodeList, nil
}