Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

pre-load CNI #331

Merged
merged 2 commits into from Feb 23, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
248 changes: 248 additions & 0 deletions pkg/build/node/const.go
@@ -0,0 +1,248 @@
/*
Copyright 2019 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package node

// these are well known paths within the node image
const (
// TODO: refactor kubernetesVersionLocation to a common internal package
kubernetesVersionLocation = "/kind/version"
defaultCNIManifestLocation = "/kind/manifests/default-cni.yaml"
)

/*
The default CNI manifest and images are from weave currently.

To update these:
- find the latest stable release at https://github.com/weaveworks/weave/releases
- copy the weave-daemonset-k8s-1.8.yaml to the defaultCNIManifest string
- add a comment to the beginning of the string with the source URL
- update the defaultCNIImages array to include the images in the manifest
- update the comment below with the release URL

Current version: https://github.com/weaveworks/weave/releases/tag/v2.5.1
*/

var defaultCNIImages = []string{"weaveworks/weave-kube:2.5.1", "weaveworks/weave-npc:2.5.1"}

const defaultCNIManifest = `# https://github.com/weaveworks/weave/releases/download/v2.5.1/weave-daemonset-k8s-1.8.yaml
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: weave-net
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- 'networking.k8s.io'
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- nodes/status
verbs:
- patch
- update
- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: weave-net
labels:
name: weave-net
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- weave-net
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
roleRef:
kind: Role
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
- apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
spec:
# Wait 5 seconds to let pod connect before rolling next pod
minReadySeconds: 5
template:
metadata:
labels:
name: weave-net
spec:
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-kube:2.5.1'
readinessProbe:
httpGet:
host: 127.0.0.1
path: /status
port: 6784
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- name: weavedb
mountPath: /weavedb
- name: cni-bin
mountPath: /host/opt
- name: cni-bin2
mountPath: /host/home
- name: cni-conf
mountPath: /host/etc
- name: dbus
mountPath: /host/var/lib/dbus
- name: lib-modules
mountPath: /lib/modules
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
- name: weave-npc
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-npc:2.5.1'
#npc-args
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
hostNetwork: true
hostPID: true
restartPolicy: Always
securityContext:
seLinuxOptions: {}
serviceAccountName: weave-net
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- name: weavedb
hostPath:
path: /var/lib/weave
- name: cni-bin
hostPath:
path: /opt
- name: cni-bin2
hostPath:
path: /home
- name: cni-conf
hostPath:
path: /etc
- name: dbus
hostPath:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
updateStrategy:
type: RollingUpdate
`
70 changes: 40 additions & 30 deletions pkg/build/node/node.go
Expand Up @@ -322,40 +322,26 @@ func (c *BuildContext) prePullImages(dir, containerID string) error {
// first get the images we actually built
builtImages, err := c.getBuiltImages()
if err != nil {
log.Errorf("Image build Failed! %v", err)
log.Errorf("Image build Failed! Failed to get built images: %v", err)
return err
}

// helpers to run things in the build container
execInBuild := func(command ...string) error {
cmd := exec.Command("docker",
append(
[]string{"exec", containerID},
command...,
)...,
)
cmder := docker.ContainerCmder(containerID)
inheritOutputAndRun := func(cmd exec.Cmd) error {
exec.InheritOutput(cmd)
return cmd.Run()
}
combinedOutputLinesInBuild := func(command ...string) ([]string, error) {
cmd := exec.Command("docker",
append(
[]string{"exec", containerID},
command...,
)...,
)
return exec.CombinedOutputLines(cmd)
}

// get the Kubernetes version we installed on the node
// we need this to ask kubeadm what images we need
rawVersion, err := combinedOutputLinesInBuild("cat", "/kind/version")
rawVersion, err := exec.CombinedOutputLines(cmder.Command("cat", kubernetesVersionLocation))
if err != nil {
log.Errorf("Image build Failed! %v", err)
log.Errorf("Image build Failed! Failed to get Kubernetes version: %v", err)
return err
}
if len(rawVersion) != 1 {
log.Errorf("Image build Failed! %v", err)
log.Errorf("Image build Failed! Failed to get Kubernetes version: %v", err)
return errors.New("invalid kubernetes version file")
}

Expand All @@ -376,21 +362,44 @@ func (c *BuildContext) prePullImages(dir, containerID string) error {
}
}

// write the default CNI manifest
// NOTE: the paths inside the container should use the path package
// and not filepath (!), we want posixy paths in the linux container, NOT
// whatever path format the host uses. For paths on the host we use filepath
if err := inheritOutputAndRun(cmder.Command(
"mkdir", "-p", path.Dir(defaultCNIManifestLocation),
)); err != nil {
log.Errorf("Image build Failed! Failed write default CNI Manifest: %v", err)
return err
}
if err := cmder.Command(
"cp", "/dev/stdin", defaultCNIManifestLocation,
).SetStdin(
strings.NewReader(defaultCNIManifest),
).Run(); err != nil {
log.Errorf("Image build Failed! Failed write default CNI Manifest: %v", err)
return err
}

// gets the list of images required by kubeadm
requiredImages, err := combinedOutputLinesInBuild(
requiredImages, err := exec.CombinedOutputLines(cmder.Command(
"kubeadm", "config", "images", "list", "--kubernetes-version", rawVersion[0],
)
))
if err != nil {
return err
}

// all builds should isntall the default CNI images currently
requiredImages = append(requiredImages, defaultCNIImages...)

// Create "images" subdir.
imagesDir := path.Join(dir, "bits", "images")
if err := os.MkdirAll(imagesDir, 0777); err != nil {
log.Errorf("Image build Failed! Failed create local images dir: %v", err)
return errors.Wrap(err, "failed to make images dir")
}

movePulled := []string{"mv"}
pulled := []string{}
for i, image := range requiredImages {
if !builtImages.Has(image) {
fmt.Printf("Pulling: %s\n", image)
Expand All @@ -405,23 +414,24 @@ func (c *BuildContext) prePullImages(dir, containerID string) error {
if err != nil {
return err
}
movePulled = append(movePulled, fmt.Sprintf("/build/bits/images/%s", pullName))
pulled = append(pulled, fmt.Sprintf("/build/bits/images/%s", pullName))
}
}

// Create the /kind/images directory inside the container.
if err = execInBuild("mkdir", "-p", DockerImageArchives); err != nil {
log.Errorf("Image build Failed! %v", err)
if err = inheritOutputAndRun(cmder.Command("mkdir", "-p", DockerImageArchives)); err != nil {
log.Errorf("Image build Failed! Failed create images dir: %v", err)
return err
}
movePulled = append(movePulled, DockerImageArchives)
if err := execInBuild(movePulled...); err != nil {
pulled = append(pulled, DockerImageArchives)
if err := inheritOutputAndRun(cmder.Command("mv", pulled...)); err != nil {
return err
}

// make sure we own the tarballs
// TODO(bentheelder): someday we might need a different user ...
if err = execInBuild("chown", "-R", "root:root", DockerImageArchives); err != nil {
log.Errorf("Image build Failed! %v", err)
if err = inheritOutputAndRun(cmder.Command("chown", "-R", "root:root", DockerImageArchives)); err != nil {
log.Errorf("Image build Failed! Failed chown images dir %v", err)
return err
}
return nil
Expand Down