Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pre-pull cilium and kube-proxy in warming mode #11258

Merged
merged 2 commits into from Apr 19, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
10 changes: 10 additions & 0 deletions nodeup/pkg/model/context.go
Expand Up @@ -644,3 +644,13 @@ func (c *NodeupModelContext) CNIBinDir() string {
func (c *NodeupModelContext) CNIConfDir() string {
return "/etc/cni/net.d/"
}

func (c *NodeupModelContext) WarmPullImage(ctx *fi.ModelBuilderContext, imageName string) {
if c.ConfigurationMode == "Warming" {
image := &nodetasks.PullImageTask{
Name: imageName,
Runtime: c.Cluster.Spec.ContainerRuntime,
}
ctx.AddTask(image)
}
}
16 changes: 11 additions & 5 deletions nodeup/pkg/model/kube_proxy.go
Expand Up @@ -61,6 +61,8 @@ func (b *KubeProxyBuilder) Build(c *fi.ModelBuilderContext) error {
}
}

b.WarmPullImage(c, kubeProxyImage(b.NodeupModelContext))

{
pod, err := b.buildPod()
if err != nil {
Expand Down Expand Up @@ -185,11 +187,7 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
flags = append(flags, `--resource-container=""`)
}

image := c.Image
if b.Architecture != architectures.ArchitectureAmd64 {
image = strings.Replace(image, "-amd64", "-"+string(b.Architecture), 1)
}

image := kubeProxyImage(b.NodeupModelContext)
container := &v1.Container{
Name: "kube-proxy",
Image: image,
Expand Down Expand Up @@ -312,3 +310,11 @@ func tolerateMasterTaints() []v1.Toleration {

return tolerations
}

func kubeProxyImage(b *NodeupModelContext) string {
image := b.Cluster.Spec.KubeProxy.Image
if b.Architecture != architectures.ArchitectureAmd64 {
image = strings.Replace(image, "-amd64", "-"+string(b.Architecture), 1)
}
return image
}
8 changes: 8 additions & 0 deletions nodeup/pkg/model/kube_proxy_test.go
Expand Up @@ -177,3 +177,11 @@ func TestKubeProxyBuilderARM64(t *testing.T) {
return builder.Build(target)
})
}
func TestKubeProxyBuilderWarmPool(t *testing.T) {
RunGoldenTest(t, "tests/golden/minimal", "warmpool", func(nodeupModelContext *NodeupModelContext, target *fi.ModelBuilderContext) error {
nodeupModelContext.ConfigurationMode = "Warming"
builder := KubeProxyBuilder{NodeupModelContext: nodeupModelContext}
builder.Architecture = architectures.ArchitectureArm64
return builder.Build(target)
})
}
8 changes: 6 additions & 2 deletions nodeup/pkg/model/networking/cilium.go
Expand Up @@ -36,7 +36,7 @@ var _ fi.ModelBuilder = &CiliumBuilder{}

// Build is responsible for configuring the network cni
func (b *CiliumBuilder) Build(c *fi.ModelBuilderContext) error {
networking := b.Cluster.Spec.Networking
cilium := b.Cluster.Spec.Networking.Cilium

// As long as the Cilium Etcd cluster exists, we should do this
if apiModel.UseCiliumEtcd(b.Cluster) {
Expand All @@ -45,14 +45,18 @@ func (b *CiliumBuilder) Build(c *fi.ModelBuilderContext) error {
}
}

if networking.Cilium == nil {
if cilium == nil {
return nil
}

if err := b.buildBPFMount(c); err != nil {
return err
}

image := "docker.io/cilium/cilium:" + cilium.Version

b.WarmPullImage(c, image)

return nil

}
Expand Down
143 changes: 143 additions & 0 deletions nodeup/pkg/model/tests/golden/minimal/tasks-warmpool.yaml
@@ -0,0 +1,143 @@
contents: |
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-app: kube-proxy
tier: node
name: kube-proxy
namespace: kube-system
spec:
containers:
- args:
- --cluster-cidr=100.96.0.0/11
- --conntrack-max-per-core=131072
- --hostname-override=@aws
- --kubeconfig=/var/lib/kube-proxy/kubeconfig
- --master=https://127.0.0.1
- --oom-score-adj=-998
- --v=2
- --logtostderr=false
- --alsologtostderr
- --log-file=/var/log/kube-proxy.log
command:
- /usr/local/bin/kube-proxy
image: k8s.gcr.io/kube-proxy:v1.18.0
name: kube-proxy
resources:
requests:
cpu: 100m
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/log/kube-proxy.log
name: logfile
- mountPath: /var/lib/kube-proxy/kubeconfig
name: kubeconfig
readOnly: true
- mountPath: /lib/modules
name: modules
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-hosts
readOnly: true
- mountPath: /run/xtables.lock
name: iptableslock
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /var/log/kube-proxy.log
name: logfile
- hostPath:
path: /var/lib/kube-proxy/kubeconfig
name: kubeconfig
- hostPath:
path: /lib/modules
name: modules
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-hosts
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: iptableslock
status: {}
path: /etc/kubernetes/manifests/kube-proxy.manifest
type: file
---
beforeServices:
- kubelet.service
contents:
task:
CA:
task:
Name: kube-proxy
signer: ca
subject:
CommonName: system:kube-proxy
type: client
Cert:
task:
Name: kube-proxy
signer: ca
subject:
CommonName: system:kube-proxy
type: client
Key:
task:
Name: kube-proxy
signer: ca
subject:
CommonName: system:kube-proxy
type: client
Name: kube-proxy
ServerURL: https://127.0.0.1
mode: "0400"
path: /var/lib/kube-proxy/kubeconfig
type: file
---
contents: ""
ifNotExists: true
mode: "0400"
path: /var/log/kube-proxy.log
type: file
---
Name: kube-proxy
signer: ca
subject:
CommonName: system:kube-proxy
type: client
---
CA:
task:
Name: kube-proxy
signer: ca
subject:
CommonName: system:kube-proxy
type: client
Cert:
task:
Name: kube-proxy
signer: ca
subject:
CommonName: system:kube-proxy
type: client
Key:
task:
Name: kube-proxy
signer: ca
subject:
CommonName: system:kube-proxy
type: client
Name: kube-proxy
ServerURL: https://127.0.0.1
---
Name: k8s.gcr.io/kube-proxy:v1.18.0
Runtime: docker
1 change: 1 addition & 0 deletions upup/pkg/fi/nodeup/nodetasks/BUILD.bazel

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

86 changes: 86 additions & 0 deletions upup/pkg/fi/nodeup/nodetasks/pull_image.go
@@ -0,0 +1,86 @@
/*
Copyright 2021 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package nodetasks

import (
"fmt"
"os/exec"
"strings"

"k8s.io/klog/v2"
"k8s.io/kops/upup/pkg/fi"
)

// PullImageTask is responsible for pulling a docker image
type PullImageTask struct {
Name string
Runtime string
}

var _ fi.Task = &PullImageTask{}
var _ fi.HasDependencies = &PullImageTask{}

func (t *PullImageTask) GetDependencies(tasks map[string]fi.Task) []fi.Task {
// ImagePullTask depends on the container runtime service to ensure we
// sideload images after the container runtime is completely updated and
// configured.
var deps []fi.Task
for _, v := range tasks {
if svc, ok := v.(*Service); ok && svc.Name == containerdService {
deps = append(deps, v)
}
if svc, ok := v.(*Service); ok && svc.Name == dockerService {
deps = append(deps, v)
}
}
return deps
}

func (t *PullImageTask) GetName() *string {
if t.Name == "" {
return nil
}
return &t.Name
}

func (e *PullImageTask) Run(c *fi.Context) error {
runtime := e.Runtime
if runtime != "docker" && runtime != "containerd" {
return fmt.Errorf("no runtime specified")
}

// Pull the container image
var args []string
switch runtime {
case "docker":
args = []string{"docker", "pull", e.Name}
case "containerd":
args = []string{"ctr", "--namespace", "k8s.io", "images", "pull", e.Name}
default:
return fmt.Errorf("unknown container runtime: %s", runtime)
}
human := strings.Join(args, " ")

klog.Infof("running command %s", human)
cmd := exec.Command(args[0], args[1:]...)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("error pulling docker image with '%s': %v: %s", human, err, string(output))
}

return nil
}
2 changes: 1 addition & 1 deletion upup/pkg/fi/nodeup/nodetasks/service.go
Expand Up @@ -75,7 +75,7 @@ func (p *Service) GetDependencies(tasks map[string]fi.Task) []fi.Task {
switch v := v.(type) {
case *Package, *UpdatePackages, *UserTask, *GroupTask, *Chattr, *BindMount, *Archive:
deps = append(deps, v)
case *Service, *LoadImageTask, *IssueCert, *BootstrapClientTask, *KubeConfig:
case *Service, *LoadImageTask, *PullImageTask, *IssueCert, *BootstrapClientTask, *KubeConfig:
// ignore
case *File:
if len(v.BeforeServices) > 0 {
Expand Down