From 668f3fc6976742bcdb927395a421eef87dd444ff Mon Sep 17 00:00:00 2001 From: Sascha Grunert Date: Wed, 25 Aug 2021 16:17:22 +0200 Subject: [PATCH] Support CRI v1 and v1alpha2 at the same time The idea is to use intermediate internal types to be able to switch between the `v1` and `v1alpha2` protocol versions. The kubelet selects the correct version during connection establishment with the remote runtime. `v1` is the preferred version, whereas `v1alpha2` is now being deprecated. Additional things done: - Added APIVersion() to runtime service - Add --cri-version kubelet configuration flag - Move cri-api types from staging to internal Signed-off-by: Sascha Grunert --- cmd/kubemark/hollow-node.go | 6 +- pkg/kubelet/apis/cri/conversion_v1.go | 950 +++++ pkg/kubelet/apis/cri/conversion_v1alpha2.go | 950 +++++ .../apis => pkg/kubelet/apis/cri}/services.go | 64 +- .../apis/cri}/testing/fake_image_service.go | 40 +- .../apis/cri}/testing/fake_runtime_service.go | 110 +- .../kubelet/apis/cri}/testing/utils.go | 6 +- pkg/kubelet/apis/cri/types.go | 3742 +++++++++++++++++ pkg/kubelet/cm/container_manager.go | 2 +- pkg/kubelet/cm/container_manager_linux.go | 2 +- pkg/kubelet/cm/container_manager_stub.go | 2 +- .../cm/container_manager_unsupported.go | 2 +- pkg/kubelet/cm/container_manager_windows.go | 2 +- pkg/kubelet/cm/cpumanager/cpu_manager.go | 6 +- pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 4 +- pkg/kubelet/cm/fake_container_manager.go | 2 +- .../cm/fake_internal_container_lifecycle.go | 4 +- .../cm/internal_container_lifecycle.go | 4 +- .../cm/internal_container_lifecycle_linux.go | 4 +- ...nternal_container_lifecycle_unsupported.go | 4 +- .../internal_container_lifecycle_windows.go | 4 +- .../cm/memorymanager/memory_manager.go | 4 +- .../cm/memorymanager/memory_manager_test.go | 4 +- pkg/kubelet/config/flags.go | 4 + pkg/kubelet/container/helpers.go | 12 +- pkg/kubelet/container/runtime.go | 8 +- pkg/kubelet/container/testing/fake_runtime.go | 4 +- .../container/testing/fake_runtime_helper.go | 6 +- pkg/kubelet/container/testing/runtime_mock.go | 6 +- .../cri/remote/fake/fake_image_service.go | 22 +- pkg/kubelet/cri/remote/fake/fake_runtime.go | 6 +- pkg/kubelet/cri/remote/remote_image.go | 174 +- pkg/kubelet/cri/remote/remote_runtime.go | 739 +++- pkg/kubelet/cri/remote/remote_runtime_test.go | 14 +- pkg/kubelet/cri/remote/utils.go | 6 +- pkg/kubelet/cri/remote/utils_test.go | 46 +- .../cri/streaming/.import-restrictions | 1 + pkg/kubelet/cri/streaming/server.go | 30 +- pkg/kubelet/cri/streaming/server_test.go | 26 +- pkg/kubelet/dockershim/docker_service.go | 2 +- pkg/kubelet/dockershim/docker_streaming.go | 2 +- pkg/kubelet/dockershim/streaming/errors.go | 59 + .../dockershim/streaming/request_cache.go | 149 + pkg/kubelet/dockershim/streaming/server.go | 385 ++ pkg/kubelet/images/helpers.go | 4 +- pkg/kubelet/images/image_manager.go | 4 +- pkg/kubelet/images/puller.go | 10 +- pkg/kubelet/images/types.go | 4 +- pkg/kubelet/kubelet.go | 19 +- pkg/kubelet/kubelet_dockershim.go | 16 + pkg/kubelet/kubelet_network.go | 4 +- pkg/kubelet/kubelet_pods.go | 16 +- pkg/kubelet/kubelet_pods_linux_test.go | 16 +- pkg/kubelet/kuberuntime/convert.go | 8 +- pkg/kubelet/kuberuntime/convert_test.go | 26 +- .../kuberuntime/fake_kuberuntime_manager.go | 2 +- pkg/kubelet/kuberuntime/helpers.go | 100 +- pkg/kubelet/kuberuntime/helpers_test.go | 106 +- .../kuberuntime/instrumented_services.go | 51 +- .../kuberuntime/instrumented_services_test.go | 18 +- .../kuberuntime/kuberuntime_container.go | 54 +- .../kuberuntime_container_linux.go | 26 +- .../kuberuntime_container_linux_test.go | 62 +- .../kuberuntime/kuberuntime_container_test.go | 26 +- .../kuberuntime_container_unsupported.go | 6 +- .../kuberuntime_container_windows.go | 14 +- pkg/kubelet/kuberuntime/kuberuntime_gc.go | 11 +- .../kuberuntime/kuberuntime_gc_test.go | 168 +- pkg/kubelet/kuberuntime/kuberuntime_image.go | 8 +- .../kuberuntime/kuberuntime_image_test.go | 12 +- .../kuberuntime/kuberuntime_manager.go | 19 +- .../kuberuntime/kuberuntime_manager_test.go | 96 +- .../kuberuntime/kuberuntime_sandbox.go | 54 +- .../kuberuntime/kuberuntime_sandbox_linux.go | 10 +- .../kuberuntime_sandbox_linux_test.go | 18 +- .../kuberuntime/kuberuntime_sandbox_test.go | 22 +- .../kuberuntime_sandbox_unsupported.go | 4 +- .../kuberuntime_sandbox_windows.go | 4 +- pkg/kubelet/kuberuntime/logs/logs.go | 21 +- pkg/kubelet/kuberuntime/logs/logs_test.go | 40 +- pkg/kubelet/kuberuntime/security_context.go | 30 +- pkg/kubelet/logs/container_log_manager.go | 7 +- .../logs/container_log_manager_test.go | 32 +- pkg/kubelet/network/dns/dns.go | 14 +- pkg/kubelet/network/dns/dns_test.go | 16 +- pkg/kubelet/pleg/generic.go | 4 +- pkg/kubelet/pod_workers.go | 4 +- pkg/kubelet/server/server_test.go | 8 +- pkg/kubelet/stats/cri_stats_provider.go | 63 +- pkg/kubelet/stats/cri_stats_provider_test.go | 142 +- pkg/kubelet/stats/provider.go | 2 +- pkg/kubemark/hollow_kubelet.go | 2 +- staging/src/k8s.io/cri-api/go.mod | 6 - staging/src/k8s.io/cri-api/go.sum | 17 - test/e2e_node/container_manager_test.go | 8 +- test/e2e_node/cpu_manager_test.go | 4 +- test/e2e_node/garbage_collector_test.go | 7 +- test/e2e_node/image_list.go | 7 +- test/e2e_node/topology_manager_test.go | 4 +- test/e2e_node/util.go | 6 +- vendor/modules.txt | 4 +- 101 files changed, 7982 insertions(+), 1103 deletions(-) create mode 100644 pkg/kubelet/apis/cri/conversion_v1.go create mode 100644 pkg/kubelet/apis/cri/conversion_v1alpha2.go rename {staging/src/k8s.io/cri-api/pkg/apis => pkg/kubelet/apis/cri}/services.go (67%) rename {staging/src/k8s.io/cri-api/pkg/apis => pkg/kubelet/apis/cri}/testing/fake_image_service.go (79%) rename {staging/src/k8s.io/cri-api/pkg/apis => pkg/kubelet/apis/cri}/testing/fake_runtime_service.go (82%) rename {staging/src/k8s.io/cri-api/pkg/apis => pkg/kubelet/apis/cri}/testing/utils.go (84%) create mode 100644 pkg/kubelet/apis/cri/types.go create mode 100644 pkg/kubelet/dockershim/streaming/errors.go create mode 100644 pkg/kubelet/dockershim/streaming/request_cache.go create mode 100644 pkg/kubelet/dockershim/streaming/server.go diff --git a/cmd/kubemark/hollow-node.go b/cmd/kubemark/hollow-node.go index ee6690ff82ef..3cbd6c2fa64d 100644 --- a/cmd/kubemark/hollow-node.go +++ b/cmd/kubemark/hollow-node.go @@ -25,8 +25,8 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" - internalapi "k8s.io/cri-api/pkg/apis" "k8s.io/klog/v2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -228,14 +228,14 @@ func run(cmd *cobra.Command, config *hollowNodeConfig) error { return fmt.Errorf("Failed to start fake runtime, error: %w", err) } defer fakeRemoteRuntime.Stop() - runtimeService, err := remote.NewRemoteRuntimeService(endpoint, 15*time.Second) + runtimeService, err := remote.NewRemoteRuntimeService(endpoint, 15*time.Second, "") if err != nil { return fmt.Errorf("Failed to init runtime service, error: %w", err) } var imageService internalapi.ImageManagerService = fakeRemoteRuntime.ImageService if config.UseHostImageService { - imageService, err = remote.NewRemoteImageService(f.RemoteImageEndpoint, 15*time.Second) + imageService, err = remote.NewRemoteImageService(f.RemoteImageEndpoint, 15*time.Second, runtimeService.APIVersion()) if err != nil { return fmt.Errorf("Failed to init image service, error: %w", err) } diff --git a/pkg/kubelet/apis/cri/conversion_v1.go b/pkg/kubelet/apis/cri/conversion_v1.go new file mode 100644 index 000000000000..a9e9fc70a4ab --- /dev/null +++ b/pkg/kubelet/apis/cri/conversion_v1.go @@ -0,0 +1,950 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is meant to provide runtime/v1 type conversions into the +// internal intermediate types of this package. +package cri + +import v1 "k8s.io/cri-api/pkg/apis/runtime/v1" + +func FromV1VersionResponse(from *v1.VersionResponse) *VersionResponse { + if from == nil { + return nil + } + + return &VersionResponse{ + Version: from.Version, + RuntimeName: from.RuntimeName, + RuntimeVersion: from.RuntimeVersion, + RuntimeApiVersion: from.RuntimeApiVersion, + } +} + +func FromV1PodSandboxStatus(from *v1.PodSandboxStatus) *PodSandboxStatus { + if from == nil { + return nil + } + + to := &PodSandboxStatus{ + Id: from.Id, + State: PodSandboxState(from.State), + CreatedAt: from.CreatedAt, + Labels: from.Labels, + Annotations: from.Annotations, + RuntimeHandler: from.RuntimeHandler, + } + + if from.Metadata != nil { + to.Metadata = FromV1PodSandboxMetadata(from.Metadata) + } + if from.Network != nil { + to.Network = &PodSandboxNetworkStatus{ + Ip: from.Network.Ip, + } + additionalIps := []*PodIP{} + for _, x := range from.Network.AdditionalIps { + additionalIps = append(additionalIps, &PodIP{Ip: x.Ip}) + } + to.Network.AdditionalIps = additionalIps + } + if from.Linux != nil { + to.Linux = &LinuxPodSandboxStatus{} + if from.Linux.Namespaces != nil { + to.Linux.Namespaces = &Namespace{} + if from.Linux.Namespaces.Options != nil { + to.Linux.Namespaces.Options = &NamespaceOption{ + Network: NamespaceMode(from.Linux.Namespaces.Options.Network), + Pid: NamespaceMode(from.Linux.Namespaces.Options.Pid), + Ipc: NamespaceMode(from.Linux.Namespaces.Options.Ipc), + TargetId: from.Linux.Namespaces.Options.TargetId, + } + } + } + } + + return to +} + +func FromV1PodSandboxMetadata(from *v1.PodSandboxMetadata) *PodSandboxMetadata { + if from == nil { + return nil + } + return &PodSandboxMetadata{ + Name: from.Name, + Uid: from.Uid, + Namespace: from.Namespace, + Attempt: from.Attempt, + } +} + +func FromV1ContainerMetadata(from *v1.ContainerMetadata) *ContainerMetadata { + if from == nil { + return nil + } + return &ContainerMetadata{ + Name: from.Name, + Attempt: from.Attempt, + } +} + +func FromV1PodSandboxes(from []*v1.PodSandbox) (items []*PodSandbox) { + for _, x := range from { + if x == nil { + continue + } + sandbox := &PodSandbox{ + Id: x.Id, + State: PodSandboxState(x.State), + CreatedAt: x.CreatedAt, + Labels: x.Labels, + Annotations: x.Annotations, + RuntimeHandler: x.RuntimeHandler, + } + if x.Metadata != nil { + sandbox.Metadata = FromV1PodSandboxMetadata(x.Metadata) + } + + items = append(items, sandbox) + } + return items +} + +func FromV1Containers(from []*v1.Container) (items []*Container) { + for _, x := range from { + if x == nil { + continue + } + container := &Container{ + Id: x.Id, + PodSandboxId: x.PodSandboxId, + State: ContainerState(x.State), + ImageRef: x.ImageRef, + CreatedAt: x.CreatedAt, + Labels: x.Labels, + Annotations: x.Annotations, + } + if x.Metadata != nil { + container.Metadata = FromV1ContainerMetadata(x.Metadata) + } + if x.Image != nil { + container.Image = &ImageSpec{ + Image: x.Image.Image, + Annotations: x.Image.Annotations, + } + } + + items = append(items, container) + } + return items +} + +func FromV1ContainerStatus(from *v1.ContainerStatus) *ContainerStatus { + if from == nil { + return nil + } + + to := &ContainerStatus{ + Id: from.Id, + State: ContainerState(from.State), + CreatedAt: from.CreatedAt, + StartedAt: from.StartedAt, + FinishedAt: from.FinishedAt, + ExitCode: from.ExitCode, + ImageRef: from.ImageRef, + Reason: from.Reason, + Message: from.Message, + Labels: from.Labels, + Annotations: from.Annotations, + LogPath: from.LogPath, + Metadata: &ContainerMetadata{}, + Image: &ImageSpec{}, + } + if from.Image != nil { + to.Image = &ImageSpec{ + Image: from.Image.Image, + Annotations: from.Image.Annotations, + } + } + if from.Metadata != nil { + to.Metadata = FromV1ContainerMetadata(from.Metadata) + } + + mounts := []*Mount{} + for _, x := range from.Mounts { + mounts = append(mounts, &Mount{ + ContainerPath: x.ContainerPath, + HostPath: x.HostPath, + Readonly: x.Readonly, + SelinuxRelabel: x.SelinuxRelabel, + Propagation: MountPropagation(x.Propagation), + }) + } + to.Mounts = mounts + + return to +} + +func FromV1ExecResponse(from *v1.ExecResponse) *ExecResponse { + if from == nil { + return nil + } + + return &ExecResponse{Url: from.Url} +} + +func FromV1AttachResponse(from *v1.AttachResponse) *AttachResponse { + if from == nil { + return nil + } + + return &AttachResponse{Url: from.Url} +} + +func FromV1PortForwardResponse(from *v1.PortForwardResponse) *PortForwardResponse { + if from == nil { + return nil + } + + return &PortForwardResponse{Url: from.Url} +} + +func FromV1RuntimeStatus(from *v1.RuntimeStatus) *RuntimeStatus { + if from == nil { + return nil + } + + conditions := []*RuntimeCondition{} + for _, x := range from.Conditions { + conditions = append(conditions, &RuntimeCondition{ + Type: x.Type, + Status: x.Status, + Reason: x.Reason, + Message: x.Message, + }) + } + + return &RuntimeStatus{ + Conditions: conditions, + } +} + +func FromV1ContainerStats(from *v1.ContainerStats) *ContainerStats { + if from == nil { + return nil + } + + to := &ContainerStats{} + if from.Attributes != nil { + to.Attributes = &ContainerAttributes{ + Id: from.Attributes.Id, + Labels: from.Attributes.Labels, + Annotations: from.Attributes.Annotations, + } + if from.Attributes.Metadata != nil { + to.Attributes.Metadata = FromV1ContainerMetadata(from.Attributes.Metadata) + } + } + if from.Cpu != nil { + to.Cpu = &CpuUsage{ + Timestamp: from.Cpu.Timestamp, + } + if from.Cpu.UsageCoreNanoSeconds != nil { + to.Cpu.UsageCoreNanoSeconds = &UInt64Value{ + Value: from.Cpu.UsageCoreNanoSeconds.Value, + } + } + } + if from.Memory != nil { + to.Memory = &MemoryUsage{ + Timestamp: from.Memory.Timestamp, + } + if from.Memory.WorkingSetBytes != nil { + to.Memory.WorkingSetBytes = &UInt64Value{ + Value: from.Memory.WorkingSetBytes.Value, + } + } + } + if from.WritableLayer != nil { + to.WritableLayer = FromV1FilesystemUsage(from.WritableLayer) + } + + return to +} + +func FromV1FilesystemUsage(from *v1.FilesystemUsage) *FilesystemUsage { + if from == nil { + return nil + } + + to := &FilesystemUsage{ + Timestamp: from.Timestamp, + } + if from.FsId != nil { + to.FsId = &FilesystemIdentifier{ + Mountpoint: from.FsId.Mountpoint, + } + } + if from.UsedBytes != nil { + to.UsedBytes = &UInt64Value{ + Value: from.UsedBytes.Value, + } + } + if from.InodesUsed != nil { + to.InodesUsed = &UInt64Value{ + Value: from.InodesUsed.Value, + } + } + + return to +} + +func FromV1FilesystemUsageList(from []*v1.FilesystemUsage) (items []*FilesystemUsage) { + for _, x := range from { + if x == nil { + continue + } + items = append(items, FromV1FilesystemUsage(x)) + } + + return items +} + +func FromV1ContainerStatsList(from []*v1.ContainerStats) (items []*ContainerStats) { + for _, x := range from { + if x == nil { + continue + } + items = append(items, FromV1ContainerStats(x)) + } + + return items +} + +func FromV1PodSandboxStats(from *v1.PodSandboxStats) *PodSandboxStats { + if from == nil { + return nil + } + + to := &PodSandboxStats{} + if from.Attributes != nil { + to.Attributes = &PodSandboxAttributes{ + Id: from.Attributes.Id, + Labels: from.Attributes.Labels, + Annotations: from.Attributes.Annotations, + } + if from.Attributes.Metadata != nil { + to.Attributes.Metadata = FromV1PodSandboxMetadata(from.Attributes.Metadata) + } + } + if from.Linux != nil { + to.Linux = &LinuxPodSandboxStats{} + + if from.Linux.Cpu != nil { + to.Linux.Cpu = &CpuUsage{ + Timestamp: from.Linux.Cpu.Timestamp, + } + if from.Linux.Cpu.UsageCoreNanoSeconds != nil { + to.Linux.Cpu.UsageCoreNanoSeconds = &UInt64Value{ + Value: from.Linux.Cpu.UsageCoreNanoSeconds.Value, + } + } + } + if from.Linux.Memory != nil { + to.Linux.Memory = &MemoryUsage{ + Timestamp: from.Linux.Memory.Timestamp, + } + if from.Linux.Memory.WorkingSetBytes != nil { + to.Linux.Memory.WorkingSetBytes = &UInt64Value{ + Value: from.Linux.Memory.WorkingSetBytes.Value, + } + } + } + if from.Linux.Network != nil { + to.Linux.Network = &NetworkUsage{ + Timestamp: from.Linux.Network.Timestamp, + } + if from.Linux.Network.DefaultInterface != nil { + to.Linux.Network.DefaultInterface = &NetworkInterfaceUsage{ + Name: from.Linux.Network.DefaultInterface.Name, + } + if from.Linux.Network.DefaultInterface.RxBytes != nil { + to.Linux.Network.DefaultInterface.RxBytes = &UInt64Value{ + Value: from.Linux.Network.DefaultInterface.RxBytes.Value, + } + } + if from.Linux.Network.DefaultInterface.RxErrors != nil { + to.Linux.Network.DefaultInterface.RxErrors = &UInt64Value{ + Value: from.Linux.Network.DefaultInterface.RxErrors.Value, + } + } + if from.Linux.Network.DefaultInterface.TxBytes != nil { + to.Linux.Network.DefaultInterface.TxBytes = &UInt64Value{ + Value: from.Linux.Network.DefaultInterface.TxBytes.Value, + } + } + if from.Linux.Network.DefaultInterface.TxErrors != nil { + to.Linux.Network.DefaultInterface.TxErrors = &UInt64Value{ + Value: from.Linux.Network.DefaultInterface.TxErrors.Value, + } + } + } + } + if from.Linux.Process != nil { + to.Linux.Process = &ProcessUsage{ + Timestamp: from.Linux.Network.Timestamp, + } + if from.Linux.Process.ProcessCount != nil { + to.Linux.Process.ProcessCount = &UInt64Value{ + Value: from.Linux.Process.ProcessCount.Value, + } + } + } + } + + return to +} + +func FromV1PodSandboxStatsList(from []*v1.PodSandboxStats) (items []*PodSandboxStats) { + for _, x := range from { + if x == nil { + continue + } + items = append(items, FromV1PodSandboxStats(x)) + } + + return items +} + +func FromV1Image(from *v1.Image) *Image { + if from == nil { + return nil + } + + to := &Image{ + Id: from.Id, + RepoTags: from.RepoTags, + RepoDigests: from.RepoDigests, + Size_: from.Size_, + Username: from.Username, + } + if from.Uid != nil { + to.Uid = &Int64Value{ + Value: from.Uid.Value, + } + } + if from.Spec != nil { + to.Spec = &ImageSpec{ + Image: from.Spec.Image, + Annotations: from.Spec.Annotations, + } + } + + return to +} + +func FromV1ImageList(from []*v1.Image) (items []*Image) { + for _, x := range from { + if x == nil { + continue + } + items = append(items, FromV1Image(x)) + } + + return items +} + +func V1PodSandboxConfig(from *PodSandboxConfig) *v1.PodSandboxConfig { + if from == nil { + return nil + } + to := &v1.PodSandboxConfig{ + Hostname: from.Hostname, + LogDirectory: from.LogDirectory, + Labels: from.Labels, + Annotations: from.Annotations, + Linux: &v1.LinuxPodSandboxConfig{ + SecurityContext: NewV1LinuxSandboxSecurityContext(), + }, + } + + if from.DnsConfig != nil { + to.DnsConfig = &v1.DNSConfig{ + Servers: from.DnsConfig.Servers, + Searches: from.DnsConfig.Searches, + Options: from.DnsConfig.Options, + } + } + if from.Metadata != nil { + to.Metadata = &v1.PodSandboxMetadata{ + Name: from.Metadata.Name, + Uid: from.Metadata.Uid, + Namespace: from.Metadata.Namespace, + Attempt: from.Metadata.Attempt, + } + } + portMappings := []*v1.PortMapping{} + for _, x := range from.PortMappings { + portMappings = append(portMappings, &v1.PortMapping{ + Protocol: v1.Protocol(x.Protocol), + ContainerPort: x.ContainerPort, + HostPort: x.HostPort, + HostIp: x.HostIp, + }) + } + to.PortMappings = portMappings + if from.Linux != nil { // nolint: dupl + to.Linux = &v1.LinuxPodSandboxConfig{ + CgroupParent: from.Linux.CgroupParent, + Sysctls: from.Linux.Sysctls, + SecurityContext: NewV1LinuxSandboxSecurityContext(), + } + if from.Linux.Overhead != nil { + to.Linux.Overhead = &v1.LinuxContainerResources{ + CpuPeriod: from.Linux.Overhead.CpuPeriod, + CpuQuota: from.Linux.Overhead.CpuQuota, + CpuShares: from.Linux.Overhead.CpuShares, + MemoryLimitInBytes: from.Linux.Overhead.MemoryLimitInBytes, + OomScoreAdj: from.Linux.Overhead.OomScoreAdj, + CpusetCpus: from.Linux.Overhead.CpusetCpus, + CpusetMems: from.Linux.Overhead.CpusetMems, + } + hugepageLimits := []*v1.HugepageLimit{} + for _, x := range from.Linux.Overhead.HugepageLimits { + hugepageLimits = append(hugepageLimits, &v1.HugepageLimit{ + PageSize: x.PageSize, + Limit: x.Limit, + }) + } + to.Linux.Overhead.HugepageLimits = hugepageLimits + } + if from.Linux.Resources != nil { + to.Linux.Resources = &v1.LinuxContainerResources{ + CpuPeriod: from.Linux.Resources.CpuPeriod, + CpuQuota: from.Linux.Resources.CpuQuota, + CpuShares: from.Linux.Resources.CpuShares, + MemoryLimitInBytes: from.Linux.Resources.MemoryLimitInBytes, + OomScoreAdj: from.Linux.Resources.OomScoreAdj, + CpusetCpus: from.Linux.Resources.CpusetCpus, + CpusetMems: from.Linux.Resources.CpusetMems, + } + hugepageLimits := []*v1.HugepageLimit{} + for _, x := range from.Linux.Resources.HugepageLimits { + hugepageLimits = append(hugepageLimits, &v1.HugepageLimit{ + PageSize: x.PageSize, + Limit: x.Limit, + }) + } + to.Linux.Resources.HugepageLimits = hugepageLimits + } + if from.Linux.SecurityContext != nil { + to.Linux.SecurityContext = &v1.LinuxSandboxSecurityContext{ + SeccompProfilePath: from.Linux.SecurityContext.SeccompProfilePath, + SupplementalGroups: from.Linux.SecurityContext.SupplementalGroups, + ReadonlyRootfs: from.Linux.SecurityContext.ReadonlyRootfs, + Privileged: from.Linux.SecurityContext.Privileged, + NamespaceOptions: &v1.NamespaceOption{}, + SelinuxOptions: &v1.SELinuxOption{}, + } + if from.Linux.SecurityContext.Seccomp != nil { + to.Linux.SecurityContext.Seccomp = &v1.SecurityProfile{ + ProfileType: v1.SecurityProfile_ProfileType(from.Linux.SecurityContext.Seccomp.ProfileType), + LocalhostRef: from.Linux.SecurityContext.Seccomp.LocalhostRef, + } + } + if from.Linux.SecurityContext.Apparmor != nil { + to.Linux.SecurityContext.Apparmor = &v1.SecurityProfile{ + ProfileType: v1.SecurityProfile_ProfileType(from.Linux.SecurityContext.Apparmor.ProfileType), + LocalhostRef: from.Linux.SecurityContext.Apparmor.LocalhostRef, + } + } + if from.Linux.SecurityContext.NamespaceOptions != nil { + to.Linux.SecurityContext.NamespaceOptions = &v1.NamespaceOption{ + Network: v1.NamespaceMode(from.Linux.SecurityContext.NamespaceOptions.Network), + Pid: v1.NamespaceMode(from.Linux.SecurityContext.NamespaceOptions.Pid), + Ipc: v1.NamespaceMode(from.Linux.SecurityContext.NamespaceOptions.Ipc), + TargetId: from.Linux.SecurityContext.NamespaceOptions.TargetId, + } + } + if from.Linux.SecurityContext.SelinuxOptions != nil { + to.Linux.SecurityContext.SelinuxOptions = &v1.SELinuxOption{ + User: from.Linux.SecurityContext.SelinuxOptions.User, + Role: from.Linux.SecurityContext.SelinuxOptions.Role, + Type: from.Linux.SecurityContext.SelinuxOptions.Type, + Level: from.Linux.SecurityContext.SelinuxOptions.Level, + } + } + if from.Linux.SecurityContext.RunAsUser != nil { + to.Linux.SecurityContext.RunAsUser = &v1.Int64Value{ + Value: from.Linux.SecurityContext.RunAsUser.Value, + } + } + if from.Linux.SecurityContext.RunAsGroup != nil { + to.Linux.SecurityContext.RunAsGroup = &v1.Int64Value{ + Value: from.Linux.SecurityContext.RunAsGroup.Value, + } + } + } + } + + return to +} + +func NewV1LinuxSandboxSecurityContext() *v1.LinuxSandboxSecurityContext { + return &v1.LinuxSandboxSecurityContext{ + NamespaceOptions: &v1.NamespaceOption{}, + SelinuxOptions: &v1.SELinuxOption{}, + RunAsUser: &v1.Int64Value{}, + RunAsGroup: &v1.Int64Value{}, + } +} + +func V1PodSandboxFilter(from *PodSandboxFilter) *v1.PodSandboxFilter { + if from == nil { + return nil + } + to := &v1.PodSandboxFilter{ + Id: from.Id, + LabelSelector: from.LabelSelector, + } + if from.State != nil { + to.State = &v1.PodSandboxStateValue{ + State: v1.PodSandboxState(from.State.State), + } + } + + return to +} + +func V1ContainerConfig(from *ContainerConfig) *v1.ContainerConfig { + if from == nil { + return nil + } + to := &v1.ContainerConfig{ + Command: from.Command, + Args: from.Args, + WorkingDir: from.WorkingDir, + Labels: from.Labels, + Annotations: from.Annotations, + LogPath: from.LogPath, + Stdin: from.Stdin, + StdinOnce: from.StdinOnce, + Tty: from.Tty, + Linux: NewV1LinuxContainerConfig(), + } + if from.Metadata != nil { + to.Metadata = &v1.ContainerMetadata{ + Name: from.Metadata.Name, + Attempt: from.Metadata.Attempt, + } + } + if from.Image != nil { + to.Image = &v1.ImageSpec{ + Image: from.Image.Image, + Annotations: from.Image.Annotations, + } + } + if from.Linux != nil { + to.Linux = NewV1LinuxContainerConfig() + if from.Linux.Resources != nil { + to.Linux.Resources = &v1.LinuxContainerResources{ + CpuPeriod: from.Linux.Resources.CpuPeriod, + CpuQuota: from.Linux.Resources.CpuQuota, + CpuShares: from.Linux.Resources.CpuShares, + MemoryLimitInBytes: from.Linux.Resources.MemoryLimitInBytes, + OomScoreAdj: from.Linux.Resources.OomScoreAdj, + CpusetCpus: from.Linux.Resources.CpusetCpus, + CpusetMems: from.Linux.Resources.CpusetMems, + } + hugepageLimits := []*v1.HugepageLimit{} + for _, x := range from.Linux.Resources.HugepageLimits { + hugepageLimits = append(hugepageLimits, &v1.HugepageLimit{ + PageSize: x.PageSize, + Limit: x.Limit, + }) + } + to.Linux.Resources.HugepageLimits = hugepageLimits + } + if from.Linux.SecurityContext != nil { + to.Linux.SecurityContext = &v1.LinuxContainerSecurityContext{ + RunAsUsername: from.Linux.SecurityContext.RunAsUsername, + ApparmorProfile: from.Linux.SecurityContext.ApparmorProfile, + SeccompProfilePath: from.Linux.SecurityContext.SeccompProfilePath, + MaskedPaths: from.Linux.SecurityContext.MaskedPaths, + ReadonlyPaths: from.Linux.SecurityContext.ReadonlyPaths, + SupplementalGroups: from.Linux.SecurityContext.SupplementalGroups, + Privileged: from.Linux.SecurityContext.Privileged, + ReadonlyRootfs: from.Linux.SecurityContext.ReadonlyRootfs, + NoNewPrivs: from.Linux.SecurityContext.NoNewPrivs, + Capabilities: &v1.Capability{}, + NamespaceOptions: &v1.NamespaceOption{}, + SelinuxOptions: &v1.SELinuxOption{}, + } + if from.Linux.SecurityContext.Capabilities != nil { + to.Linux.SecurityContext.Capabilities = &v1.Capability{ + AddCapabilities: from.Linux.SecurityContext.Capabilities.AddCapabilities, + DropCapabilities: from.Linux.SecurityContext.Capabilities.DropCapabilities, + } + } + if from.Linux.SecurityContext.Seccomp != nil { + to.Linux.SecurityContext.Seccomp = &v1.SecurityProfile{ + ProfileType: v1.SecurityProfile_ProfileType(from.Linux.SecurityContext.Seccomp.ProfileType), + LocalhostRef: from.Linux.SecurityContext.Seccomp.LocalhostRef, + } + } + if from.Linux.SecurityContext.Apparmor != nil { + to.Linux.SecurityContext.Apparmor = &v1.SecurityProfile{ + ProfileType: v1.SecurityProfile_ProfileType(from.Linux.SecurityContext.Apparmor.ProfileType), + LocalhostRef: from.Linux.SecurityContext.Apparmor.LocalhostRef, + } + } + if from.Linux.SecurityContext.NamespaceOptions != nil { + to.Linux.SecurityContext.NamespaceOptions = &v1.NamespaceOption{ + Network: v1.NamespaceMode(from.Linux.SecurityContext.NamespaceOptions.Network), + Pid: v1.NamespaceMode(from.Linux.SecurityContext.NamespaceOptions.Pid), + Ipc: v1.NamespaceMode(from.Linux.SecurityContext.NamespaceOptions.Ipc), + TargetId: from.Linux.SecurityContext.NamespaceOptions.TargetId, + } + } + if from.Linux.SecurityContext.SelinuxOptions != nil { + to.Linux.SecurityContext.SelinuxOptions = &v1.SELinuxOption{ + User: from.Linux.SecurityContext.SelinuxOptions.User, + Role: from.Linux.SecurityContext.SelinuxOptions.Role, + Type: from.Linux.SecurityContext.SelinuxOptions.Type, + Level: from.Linux.SecurityContext.SelinuxOptions.Level, + } + } + if from.Linux.SecurityContext.RunAsUser != nil { + to.Linux.SecurityContext.RunAsUser = &v1.Int64Value{ + Value: from.Linux.SecurityContext.RunAsUser.Value, + } + } + if from.Linux.SecurityContext.RunAsGroup != nil { + to.Linux.SecurityContext.RunAsGroup = &v1.Int64Value{ + Value: from.Linux.SecurityContext.RunAsGroup.Value, + } + } + } + } + envs := []*v1.KeyValue{} + for _, x := range from.Envs { + envs = append(envs, &v1.KeyValue{ + Key: x.Key, + Value: x.Value, + }) + } + to.Envs = envs + + mounts := []*v1.Mount{} + for _, x := range from.Mounts { + mounts = append(mounts, &v1.Mount{ + ContainerPath: x.ContainerPath, + HostPath: x.HostPath, + Readonly: x.Readonly, + SelinuxRelabel: x.SelinuxRelabel, + Propagation: v1.MountPropagation(x.Propagation), + }) + } + to.Mounts = mounts + + devices := []*v1.Device{} + for _, x := range from.Devices { + devices = append(devices, &v1.Device{ + ContainerPath: x.ContainerPath, + HostPath: x.HostPath, + Permissions: x.Permissions, + }) + } + to.Devices = devices + + return to +} + +func NewV1LinuxContainerConfig() *v1.LinuxContainerConfig { + return &v1.LinuxContainerConfig{ + Resources: &v1.LinuxContainerResources{}, + SecurityContext: NewV1LinuxContainerSecurityContext(), + } +} + +func NewV1LinuxContainerSecurityContext() *v1.LinuxContainerSecurityContext { + return &v1.LinuxContainerSecurityContext{ + Capabilities: &v1.Capability{}, + NamespaceOptions: &v1.NamespaceOption{}, + SelinuxOptions: &v1.SELinuxOption{}, + RunAsUser: &v1.Int64Value{}, + RunAsGroup: &v1.Int64Value{}, + } +} + +func V1ContainerFilter(from *ContainerFilter) *v1.ContainerFilter { + if from == nil { + return nil + } + to := &v1.ContainerFilter{ + Id: from.Id, + LabelSelector: from.LabelSelector, + PodSandboxId: from.PodSandboxId, + } + if from.State != nil { + to.State = &v1.ContainerStateValue{ + State: v1.ContainerState(from.State.State), + } + } + + return to +} + +func V1ContainerResources(from *LinuxContainerResources) *v1.LinuxContainerResources { + if from == nil { + return nil + } + to := &v1.LinuxContainerResources{ + CpuPeriod: from.CpuPeriod, + CpuQuota: from.CpuQuota, + CpuShares: from.CpuShares, + MemoryLimitInBytes: from.MemoryLimitInBytes, + OomScoreAdj: from.OomScoreAdj, + CpusetCpus: from.CpusetCpus, + CpusetMems: from.CpusetMems, + } + hugePageLimits := []*v1.HugepageLimit{} + for _, x := range from.HugepageLimits { + hugePageLimits = append(hugePageLimits, &v1.HugepageLimit{ + PageSize: x.PageSize, + Limit: x.Limit, + }) + } + to.HugepageLimits = hugePageLimits + + return to +} + +func V1ExecRequest(from *ExecRequest) *v1.ExecRequest { + if from == nil { + return nil + } + + return &v1.ExecRequest{ + ContainerId: from.ContainerId, + Cmd: from.Cmd, + Tty: from.Tty, + Stdin: from.Stdin, + Stdout: from.Stdout, + Stderr: from.Stderr, + } +} + +func V1AttachRequest(from *AttachRequest) *v1.AttachRequest { + if from == nil { + return nil + } + + return &v1.AttachRequest{ + ContainerId: from.ContainerId, + Stdin: from.Stdin, + Tty: from.Tty, + Stdout: from.Stdout, + Stderr: from.Stderr, + } +} + +func V1PortForwardRequest(from *PortForwardRequest) *v1.PortForwardRequest { + if from == nil { + return nil + } + + return &v1.PortForwardRequest{ + PodSandboxId: from.PodSandboxId, + Port: from.Port, + } +} + +func V1RuntimeConfig(from *RuntimeConfig) *v1.RuntimeConfig { + if from == nil { + return nil + } + + to := &v1.RuntimeConfig{} + + if from.NetworkConfig != nil { + to.NetworkConfig = &v1.NetworkConfig{PodCidr: from.NetworkConfig.PodCidr} + } + + return to +} + +func V1ContainerStatsFilter(from *ContainerStatsFilter) *v1.ContainerStatsFilter { + if from == nil { + return nil + } + + return &v1.ContainerStatsFilter{ + Id: from.Id, + LabelSelector: from.LabelSelector, + PodSandboxId: from.PodSandboxId, + } +} + +func V1PodSandboxStatsFilter(from *PodSandboxStatsFilter) *v1.PodSandboxStatsFilter { + if from == nil { + return nil + } + + return &v1.PodSandboxStatsFilter{ + Id: from.Id, + LabelSelector: from.LabelSelector, + } +} + +func V1ImageFilter(from *ImageFilter) *v1.ImageFilter { + if from == nil { + return nil + } + + to := &v1.ImageFilter{} + + if from.Image != nil { + to.Image = V1ImageSpec(from.Image) + } + + return to +} + +func V1ImageSpec(from *ImageSpec) *v1.ImageSpec { + if from == nil { + return nil + } + + return &v1.ImageSpec{ + Image: from.Image, + Annotations: from.Annotations, + } +} + +func V1AuthConfig(from *AuthConfig) *v1.AuthConfig { + if from == nil { + return nil + } + + return &v1.AuthConfig{ + Username: from.Username, + Password: from.Password, + Auth: from.Auth, + ServerAddress: from.ServerAddress, + IdentityToken: from.IdentityToken, + RegistryToken: from.RegistryToken, + } +} diff --git a/pkg/kubelet/apis/cri/conversion_v1alpha2.go b/pkg/kubelet/apis/cri/conversion_v1alpha2.go new file mode 100644 index 000000000000..6d81e55a35ea --- /dev/null +++ b/pkg/kubelet/apis/cri/conversion_v1alpha2.go @@ -0,0 +1,950 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is meant to provide runtime/v1alpha2 type conversions into the +// internal intermediate types of this package. +package cri + +import "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + +func FromV1alpha2VersionResponse(from *v1alpha2.VersionResponse) *VersionResponse { + if from == nil { + return nil + } + + return &VersionResponse{ + Version: from.Version, + RuntimeName: from.RuntimeName, + RuntimeVersion: from.RuntimeVersion, + RuntimeApiVersion: from.RuntimeApiVersion, + } +} + +func FromV1alpha2PodSandboxStatus(from *v1alpha2.PodSandboxStatus) *PodSandboxStatus { + if from == nil { + return nil + } + + to := &PodSandboxStatus{ + Id: from.Id, + State: PodSandboxState(from.State), + CreatedAt: from.CreatedAt, + Labels: from.Labels, + Annotations: from.Annotations, + RuntimeHandler: from.RuntimeHandler, + } + + if from.Metadata != nil { + to.Metadata = FromV1alpha2PodSandboxMetadata(from.Metadata) + } + if from.Network != nil { + to.Network = &PodSandboxNetworkStatus{ + Ip: from.Network.Ip, + } + additionalIps := []*PodIP{} + for _, x := range from.Network.AdditionalIps { + additionalIps = append(additionalIps, &PodIP{Ip: x.Ip}) + } + to.Network.AdditionalIps = additionalIps + } + if from.Linux != nil { + to.Linux = &LinuxPodSandboxStatus{} + if from.Linux.Namespaces != nil { + to.Linux.Namespaces = &Namespace{} + if from.Linux.Namespaces.Options != nil { + to.Linux.Namespaces.Options = &NamespaceOption{ + Network: NamespaceMode(from.Linux.Namespaces.Options.Network), + Pid: NamespaceMode(from.Linux.Namespaces.Options.Pid), + Ipc: NamespaceMode(from.Linux.Namespaces.Options.Ipc), + TargetId: from.Linux.Namespaces.Options.TargetId, + } + } + } + } + + return to +} + +func FromV1alpha2PodSandboxMetadata(from *v1alpha2.PodSandboxMetadata) *PodSandboxMetadata { + if from == nil { + return nil + } + return &PodSandboxMetadata{ + Name: from.Name, + Uid: from.Uid, + Namespace: from.Namespace, + Attempt: from.Attempt, + } +} + +func FromV1alpha2ContainerMetadata(from *v1alpha2.ContainerMetadata) *ContainerMetadata { + if from == nil { + return nil + } + return &ContainerMetadata{ + Name: from.Name, + Attempt: from.Attempt, + } +} + +func FromV1alpha2PodSandboxes(from []*v1alpha2.PodSandbox) (items []*PodSandbox) { + for _, x := range from { + if x == nil { + continue + } + sandbox := &PodSandbox{ + Id: x.Id, + State: PodSandboxState(x.State), + CreatedAt: x.CreatedAt, + Labels: x.Labels, + Annotations: x.Annotations, + RuntimeHandler: x.RuntimeHandler, + } + if x.Metadata != nil { + sandbox.Metadata = FromV1alpha2PodSandboxMetadata(x.Metadata) + } + + items = append(items, sandbox) + } + return items +} + +func FromV1alpha2Containers(from []*v1alpha2.Container) (items []*Container) { + for _, x := range from { + if x == nil { + continue + } + container := &Container{ + Id: x.Id, + PodSandboxId: x.PodSandboxId, + State: ContainerState(x.State), + ImageRef: x.ImageRef, + CreatedAt: x.CreatedAt, + Labels: x.Labels, + Annotations: x.Annotations, + } + if x.Metadata != nil { + container.Metadata = FromV1alpha2ContainerMetadata(x.Metadata) + } + if x.Image != nil { + container.Image = &ImageSpec{ + Image: x.Image.Image, + Annotations: x.Image.Annotations, + } + } + + items = append(items, container) + } + return items +} + +func FromV1alpha2ContainerStatus(from *v1alpha2.ContainerStatus) *ContainerStatus { + if from == nil { + return nil + } + + to := &ContainerStatus{ + Id: from.Id, + State: ContainerState(from.State), + CreatedAt: from.CreatedAt, + StartedAt: from.StartedAt, + FinishedAt: from.FinishedAt, + ExitCode: from.ExitCode, + ImageRef: from.ImageRef, + Reason: from.Reason, + Message: from.Message, + Labels: from.Labels, + Annotations: from.Annotations, + LogPath: from.LogPath, + Metadata: &ContainerMetadata{}, + Image: &ImageSpec{}, + } + if from.Image != nil { + to.Image = &ImageSpec{ + Image: from.Image.Image, + Annotations: from.Image.Annotations, + } + } + if from.Metadata != nil { + to.Metadata = FromV1alpha2ContainerMetadata(from.Metadata) + } + + mounts := []*Mount{} + for _, x := range from.Mounts { + mounts = append(mounts, &Mount{ + ContainerPath: x.ContainerPath, + HostPath: x.HostPath, + Readonly: x.Readonly, + SelinuxRelabel: x.SelinuxRelabel, + Propagation: MountPropagation(x.Propagation), + }) + } + to.Mounts = mounts + + return to +} + +func FromV1alpha2ExecResponse(from *v1alpha2.ExecResponse) *ExecResponse { + if from == nil { + return nil + } + + return &ExecResponse{Url: from.Url} +} + +func FromV1alpha2AttachResponse(from *v1alpha2.AttachResponse) *AttachResponse { + if from == nil { + return nil + } + + return &AttachResponse{Url: from.Url} +} + +func FromV1alpha2PortForwardResponse(from *v1alpha2.PortForwardResponse) *PortForwardResponse { + if from == nil { + return nil + } + + return &PortForwardResponse{Url: from.Url} +} + +func FromV1alpha2RuntimeStatus(from *v1alpha2.RuntimeStatus) *RuntimeStatus { + if from == nil { + return nil + } + + conditions := []*RuntimeCondition{} + for _, x := range from.Conditions { + conditions = append(conditions, &RuntimeCondition{ + Type: x.Type, + Status: x.Status, + Reason: x.Reason, + Message: x.Message, + }) + } + + return &RuntimeStatus{ + Conditions: conditions, + } +} + +func FromV1alpha2ContainerStats(from *v1alpha2.ContainerStats) *ContainerStats { + if from == nil { + return nil + } + + to := &ContainerStats{} + if from.Attributes != nil { + to.Attributes = &ContainerAttributes{ + Id: from.Attributes.Id, + Labels: from.Attributes.Labels, + Annotations: from.Attributes.Annotations, + } + if from.Attributes.Metadata != nil { + to.Attributes.Metadata = FromV1alpha2ContainerMetadata(from.Attributes.Metadata) + } + } + if from.Cpu != nil { + to.Cpu = &CpuUsage{ + Timestamp: from.Cpu.Timestamp, + } + if from.Cpu.UsageCoreNanoSeconds != nil { + to.Cpu.UsageCoreNanoSeconds = &UInt64Value{ + Value: from.Cpu.UsageCoreNanoSeconds.Value, + } + } + } + if from.Memory != nil { + to.Memory = &MemoryUsage{ + Timestamp: from.Memory.Timestamp, + } + if from.Memory.WorkingSetBytes != nil { + to.Memory.WorkingSetBytes = &UInt64Value{ + Value: from.Memory.WorkingSetBytes.Value, + } + } + } + if from.WritableLayer != nil { + to.WritableLayer = FromV1alpha2FilesystemUsage(from.WritableLayer) + } + + return to +} + +func FromV1alpha2FilesystemUsage(from *v1alpha2.FilesystemUsage) *FilesystemUsage { + if from == nil { + return nil + } + + to := &FilesystemUsage{ + Timestamp: from.Timestamp, + } + if from.FsId != nil { + to.FsId = &FilesystemIdentifier{ + Mountpoint: from.FsId.Mountpoint, + } + } + if from.UsedBytes != nil { + to.UsedBytes = &UInt64Value{ + Value: from.UsedBytes.Value, + } + } + if from.InodesUsed != nil { + to.InodesUsed = &UInt64Value{ + Value: from.InodesUsed.Value, + } + } + + return to +} + +func FromV1alpha2FilesystemUsageList(from []*v1alpha2.FilesystemUsage) (items []*FilesystemUsage) { + for _, x := range from { + if x == nil { + continue + } + items = append(items, FromV1alpha2FilesystemUsage(x)) + } + + return items +} + +func FromV1alpha2ContainerStatsList(from []*v1alpha2.ContainerStats) (items []*ContainerStats) { + for _, x := range from { + if x == nil { + continue + } + items = append(items, FromV1alpha2ContainerStats(x)) + } + + return items +} + +func FromV1alpha2PodSandboxStats(from *v1alpha2.PodSandboxStats) *PodSandboxStats { + if from == nil { + return nil + } + + to := &PodSandboxStats{} + if from.Attributes != nil { + to.Attributes = &PodSandboxAttributes{ + Id: from.Attributes.Id, + Labels: from.Attributes.Labels, + Annotations: from.Attributes.Annotations, + } + if from.Attributes.Metadata != nil { + to.Attributes.Metadata = FromV1alpha2PodSandboxMetadata(from.Attributes.Metadata) + } + } + if from.Linux != nil { + to.Linux = &LinuxPodSandboxStats{} + + if from.Linux.Cpu != nil { + to.Linux.Cpu = &CpuUsage{ + Timestamp: from.Linux.Cpu.Timestamp, + } + if from.Linux.Cpu.UsageCoreNanoSeconds != nil { + to.Linux.Cpu.UsageCoreNanoSeconds = &UInt64Value{ + Value: from.Linux.Cpu.UsageCoreNanoSeconds.Value, + } + } + } + if from.Linux.Memory != nil { + to.Linux.Memory = &MemoryUsage{ + Timestamp: from.Linux.Memory.Timestamp, + } + if from.Linux.Memory.WorkingSetBytes != nil { + to.Linux.Memory.WorkingSetBytes = &UInt64Value{ + Value: from.Linux.Memory.WorkingSetBytes.Value, + } + } + } + if from.Linux.Network != nil { + to.Linux.Network = &NetworkUsage{ + Timestamp: from.Linux.Network.Timestamp, + } + if from.Linux.Network.DefaultInterface != nil { + to.Linux.Network.DefaultInterface = &NetworkInterfaceUsage{ + Name: from.Linux.Network.DefaultInterface.Name, + } + if from.Linux.Network.DefaultInterface.RxBytes != nil { + to.Linux.Network.DefaultInterface.RxBytes = &UInt64Value{ + Value: from.Linux.Network.DefaultInterface.RxBytes.Value, + } + } + if from.Linux.Network.DefaultInterface.RxErrors != nil { + to.Linux.Network.DefaultInterface.RxErrors = &UInt64Value{ + Value: from.Linux.Network.DefaultInterface.RxErrors.Value, + } + } + if from.Linux.Network.DefaultInterface.TxBytes != nil { + to.Linux.Network.DefaultInterface.TxBytes = &UInt64Value{ + Value: from.Linux.Network.DefaultInterface.TxBytes.Value, + } + } + if from.Linux.Network.DefaultInterface.TxErrors != nil { + to.Linux.Network.DefaultInterface.TxErrors = &UInt64Value{ + Value: from.Linux.Network.DefaultInterface.TxErrors.Value, + } + } + } + } + if from.Linux.Process != nil { + to.Linux.Process = &ProcessUsage{ + Timestamp: from.Linux.Network.Timestamp, + } + if from.Linux.Process.ProcessCount != nil { + to.Linux.Process.ProcessCount = &UInt64Value{ + Value: from.Linux.Process.ProcessCount.Value, + } + } + } + } + + return to +} + +func FromV1alpha2PodSandboxStatsList(from []*v1alpha2.PodSandboxStats) (items []*PodSandboxStats) { + for _, x := range from { + if x == nil { + continue + } + items = append(items, FromV1alpha2PodSandboxStats(x)) + } + + return items +} + +func FromV1alpha2Image(from *v1alpha2.Image) *Image { + if from == nil { + return nil + } + + to := &Image{ + Id: from.Id, + RepoTags: from.RepoTags, + RepoDigests: from.RepoDigests, + Size_: from.Size_, + Username: from.Username, + } + if from.Uid != nil { + to.Uid = &Int64Value{ + Value: from.Uid.Value, + } + } + if from.Spec != nil { + to.Spec = &ImageSpec{ + Image: from.Spec.Image, + Annotations: from.Spec.Annotations, + } + } + + return to +} + +func FromV1alpha2ImageList(from []*v1alpha2.Image) (items []*Image) { + for _, x := range from { + if x == nil { + continue + } + items = append(items, FromV1alpha2Image(x)) + } + + return items +} + +func V1alpha2PodSandboxConfig(from *PodSandboxConfig) *v1alpha2.PodSandboxConfig { + if from == nil { + return nil + } + to := &v1alpha2.PodSandboxConfig{ + Hostname: from.Hostname, + LogDirectory: from.LogDirectory, + Labels: from.Labels, + Annotations: from.Annotations, + Linux: &v1alpha2.LinuxPodSandboxConfig{ + SecurityContext: NewV1alpha2LinuxSandboxSecurityContext(), + }, + } + + if from.DnsConfig != nil { + to.DnsConfig = &v1alpha2.DNSConfig{ + Servers: from.DnsConfig.Servers, + Searches: from.DnsConfig.Searches, + Options: from.DnsConfig.Options, + } + } + if from.Metadata != nil { + to.Metadata = &v1alpha2.PodSandboxMetadata{ + Name: from.Metadata.Name, + Uid: from.Metadata.Uid, + Namespace: from.Metadata.Namespace, + Attempt: from.Metadata.Attempt, + } + } + portMappings := []*v1alpha2.PortMapping{} + for _, x := range from.PortMappings { + portMappings = append(portMappings, &v1alpha2.PortMapping{ + Protocol: v1alpha2.Protocol(x.Protocol), + ContainerPort: x.ContainerPort, + HostPort: x.HostPort, + HostIp: x.HostIp, + }) + } + to.PortMappings = portMappings + if from.Linux != nil { // nolint: dupl + to.Linux = &v1alpha2.LinuxPodSandboxConfig{ + CgroupParent: from.Linux.CgroupParent, + Sysctls: from.Linux.Sysctls, + SecurityContext: NewV1alpha2LinuxSandboxSecurityContext(), + } + if from.Linux.Overhead != nil { + to.Linux.Overhead = &v1alpha2.LinuxContainerResources{ + CpuPeriod: from.Linux.Overhead.CpuPeriod, + CpuQuota: from.Linux.Overhead.CpuQuota, + CpuShares: from.Linux.Overhead.CpuShares, + MemoryLimitInBytes: from.Linux.Overhead.MemoryLimitInBytes, + OomScoreAdj: from.Linux.Overhead.OomScoreAdj, + CpusetCpus: from.Linux.Overhead.CpusetCpus, + CpusetMems: from.Linux.Overhead.CpusetMems, + } + hugepageLimits := []*v1alpha2.HugepageLimit{} + for _, x := range from.Linux.Overhead.HugepageLimits { + hugepageLimits = append(hugepageLimits, &v1alpha2.HugepageLimit{ + PageSize: x.PageSize, + Limit: x.Limit, + }) + } + to.Linux.Overhead.HugepageLimits = hugepageLimits + } + if from.Linux.Resources != nil { + to.Linux.Resources = &v1alpha2.LinuxContainerResources{ + CpuPeriod: from.Linux.Resources.CpuPeriod, + CpuQuota: from.Linux.Resources.CpuQuota, + CpuShares: from.Linux.Resources.CpuShares, + MemoryLimitInBytes: from.Linux.Resources.MemoryLimitInBytes, + OomScoreAdj: from.Linux.Resources.OomScoreAdj, + CpusetCpus: from.Linux.Resources.CpusetCpus, + CpusetMems: from.Linux.Resources.CpusetMems, + } + hugepageLimits := []*v1alpha2.HugepageLimit{} + for _, x := range from.Linux.Resources.HugepageLimits { + hugepageLimits = append(hugepageLimits, &v1alpha2.HugepageLimit{ + PageSize: x.PageSize, + Limit: x.Limit, + }) + } + to.Linux.Resources.HugepageLimits = hugepageLimits + } + if from.Linux.SecurityContext != nil { + to.Linux.SecurityContext = &v1alpha2.LinuxSandboxSecurityContext{ + SeccompProfilePath: from.Linux.SecurityContext.SeccompProfilePath, + SupplementalGroups: from.Linux.SecurityContext.SupplementalGroups, + ReadonlyRootfs: from.Linux.SecurityContext.ReadonlyRootfs, + Privileged: from.Linux.SecurityContext.Privileged, + NamespaceOptions: &v1alpha2.NamespaceOption{}, + SelinuxOptions: &v1alpha2.SELinuxOption{}, + } + if from.Linux.SecurityContext.Seccomp != nil { + to.Linux.SecurityContext.Seccomp = &v1alpha2.SecurityProfile{ + ProfileType: v1alpha2.SecurityProfile_ProfileType(from.Linux.SecurityContext.Seccomp.ProfileType), + LocalhostRef: from.Linux.SecurityContext.Seccomp.LocalhostRef, + } + } + if from.Linux.SecurityContext.Apparmor != nil { + to.Linux.SecurityContext.Apparmor = &v1alpha2.SecurityProfile{ + ProfileType: v1alpha2.SecurityProfile_ProfileType(from.Linux.SecurityContext.Apparmor.ProfileType), + LocalhostRef: from.Linux.SecurityContext.Apparmor.LocalhostRef, + } + } + if from.Linux.SecurityContext.NamespaceOptions != nil { + to.Linux.SecurityContext.NamespaceOptions = &v1alpha2.NamespaceOption{ + Network: v1alpha2.NamespaceMode(from.Linux.SecurityContext.NamespaceOptions.Network), + Pid: v1alpha2.NamespaceMode(from.Linux.SecurityContext.NamespaceOptions.Pid), + Ipc: v1alpha2.NamespaceMode(from.Linux.SecurityContext.NamespaceOptions.Ipc), + TargetId: from.Linux.SecurityContext.NamespaceOptions.TargetId, + } + } + if from.Linux.SecurityContext.SelinuxOptions != nil { + to.Linux.SecurityContext.SelinuxOptions = &v1alpha2.SELinuxOption{ + User: from.Linux.SecurityContext.SelinuxOptions.User, + Role: from.Linux.SecurityContext.SelinuxOptions.Role, + Type: from.Linux.SecurityContext.SelinuxOptions.Type, + Level: from.Linux.SecurityContext.SelinuxOptions.Level, + } + } + if from.Linux.SecurityContext.RunAsUser != nil { + to.Linux.SecurityContext.RunAsUser = &v1alpha2.Int64Value{ + Value: from.Linux.SecurityContext.RunAsUser.Value, + } + } + if from.Linux.SecurityContext.RunAsGroup != nil { + to.Linux.SecurityContext.RunAsGroup = &v1alpha2.Int64Value{ + Value: from.Linux.SecurityContext.RunAsGroup.Value, + } + } + } + } + + return to +} + +func NewV1alpha2LinuxSandboxSecurityContext() *v1alpha2.LinuxSandboxSecurityContext { + return &v1alpha2.LinuxSandboxSecurityContext{ + NamespaceOptions: &v1alpha2.NamespaceOption{}, + SelinuxOptions: &v1alpha2.SELinuxOption{}, + RunAsUser: &v1alpha2.Int64Value{}, + RunAsGroup: &v1alpha2.Int64Value{}, + } +} + +func V1alpha2PodSandboxFilter(from *PodSandboxFilter) *v1alpha2.PodSandboxFilter { + if from == nil { + return nil + } + to := &v1alpha2.PodSandboxFilter{ + Id: from.Id, + LabelSelector: from.LabelSelector, + } + if from.State != nil { + to.State = &v1alpha2.PodSandboxStateValue{ + State: v1alpha2.PodSandboxState(from.State.State), + } + } + + return to +} + +func V1alpha2ContainerConfig(from *ContainerConfig) *v1alpha2.ContainerConfig { + if from == nil { + return nil + } + to := &v1alpha2.ContainerConfig{ + Command: from.Command, + Args: from.Args, + WorkingDir: from.WorkingDir, + Labels: from.Labels, + Annotations: from.Annotations, + LogPath: from.LogPath, + Stdin: from.Stdin, + StdinOnce: from.StdinOnce, + Tty: from.Tty, + Linux: NewV1alpha2LinuxContainerConfig(), + } + if from.Metadata != nil { + to.Metadata = &v1alpha2.ContainerMetadata{ + Name: from.Metadata.Name, + Attempt: from.Metadata.Attempt, + } + } + if from.Image != nil { + to.Image = &v1alpha2.ImageSpec{ + Image: from.Image.Image, + Annotations: from.Image.Annotations, + } + } + if from.Linux != nil { + to.Linux = NewV1alpha2LinuxContainerConfig() + if from.Linux.Resources != nil { + to.Linux.Resources = &v1alpha2.LinuxContainerResources{ + CpuPeriod: from.Linux.Resources.CpuPeriod, + CpuQuota: from.Linux.Resources.CpuQuota, + CpuShares: from.Linux.Resources.CpuShares, + MemoryLimitInBytes: from.Linux.Resources.MemoryLimitInBytes, + OomScoreAdj: from.Linux.Resources.OomScoreAdj, + CpusetCpus: from.Linux.Resources.CpusetCpus, + CpusetMems: from.Linux.Resources.CpusetMems, + } + hugepageLimits := []*v1alpha2.HugepageLimit{} + for _, x := range from.Linux.Resources.HugepageLimits { + hugepageLimits = append(hugepageLimits, &v1alpha2.HugepageLimit{ + PageSize: x.PageSize, + Limit: x.Limit, + }) + } + to.Linux.Resources.HugepageLimits = hugepageLimits + } + if from.Linux.SecurityContext != nil { + to.Linux.SecurityContext = &v1alpha2.LinuxContainerSecurityContext{ + RunAsUsername: from.Linux.SecurityContext.RunAsUsername, + ApparmorProfile: from.Linux.SecurityContext.ApparmorProfile, + SeccompProfilePath: from.Linux.SecurityContext.SeccompProfilePath, + MaskedPaths: from.Linux.SecurityContext.MaskedPaths, + ReadonlyPaths: from.Linux.SecurityContext.ReadonlyPaths, + SupplementalGroups: from.Linux.SecurityContext.SupplementalGroups, + Privileged: from.Linux.SecurityContext.Privileged, + ReadonlyRootfs: from.Linux.SecurityContext.ReadonlyRootfs, + NoNewPrivs: from.Linux.SecurityContext.NoNewPrivs, + Capabilities: &v1alpha2.Capability{}, + NamespaceOptions: &v1alpha2.NamespaceOption{}, + SelinuxOptions: &v1alpha2.SELinuxOption{}, + } + if from.Linux.SecurityContext.Capabilities != nil { + to.Linux.SecurityContext.Capabilities = &v1alpha2.Capability{ + AddCapabilities: from.Linux.SecurityContext.Capabilities.AddCapabilities, + DropCapabilities: from.Linux.SecurityContext.Capabilities.DropCapabilities, + } + } + if from.Linux.SecurityContext.Seccomp != nil { + to.Linux.SecurityContext.Seccomp = &v1alpha2.SecurityProfile{ + ProfileType: v1alpha2.SecurityProfile_ProfileType(from.Linux.SecurityContext.Seccomp.ProfileType), + LocalhostRef: from.Linux.SecurityContext.Seccomp.LocalhostRef, + } + } + if from.Linux.SecurityContext.Apparmor != nil { + to.Linux.SecurityContext.Apparmor = &v1alpha2.SecurityProfile{ + ProfileType: v1alpha2.SecurityProfile_ProfileType(from.Linux.SecurityContext.Apparmor.ProfileType), + LocalhostRef: from.Linux.SecurityContext.Apparmor.LocalhostRef, + } + } + if from.Linux.SecurityContext.NamespaceOptions != nil { + to.Linux.SecurityContext.NamespaceOptions = &v1alpha2.NamespaceOption{ + Network: v1alpha2.NamespaceMode(from.Linux.SecurityContext.NamespaceOptions.Network), + Pid: v1alpha2.NamespaceMode(from.Linux.SecurityContext.NamespaceOptions.Pid), + Ipc: v1alpha2.NamespaceMode(from.Linux.SecurityContext.NamespaceOptions.Ipc), + TargetId: from.Linux.SecurityContext.NamespaceOptions.TargetId, + } + } + if from.Linux.SecurityContext.SelinuxOptions != nil { + to.Linux.SecurityContext.SelinuxOptions = &v1alpha2.SELinuxOption{ + User: from.Linux.SecurityContext.SelinuxOptions.User, + Role: from.Linux.SecurityContext.SelinuxOptions.Role, + Type: from.Linux.SecurityContext.SelinuxOptions.Type, + Level: from.Linux.SecurityContext.SelinuxOptions.Level, + } + } + if from.Linux.SecurityContext.RunAsUser != nil { + to.Linux.SecurityContext.RunAsUser = &v1alpha2.Int64Value{ + Value: from.Linux.SecurityContext.RunAsUser.Value, + } + } + if from.Linux.SecurityContext.RunAsGroup != nil { + to.Linux.SecurityContext.RunAsGroup = &v1alpha2.Int64Value{ + Value: from.Linux.SecurityContext.RunAsGroup.Value, + } + } + } + } + envs := []*v1alpha2.KeyValue{} + for _, x := range from.Envs { + envs = append(envs, &v1alpha2.KeyValue{ + Key: x.Key, + Value: x.Value, + }) + } + to.Envs = envs + + mounts := []*v1alpha2.Mount{} + for _, x := range from.Mounts { + mounts = append(mounts, &v1alpha2.Mount{ + ContainerPath: x.ContainerPath, + HostPath: x.HostPath, + Readonly: x.Readonly, + SelinuxRelabel: x.SelinuxRelabel, + Propagation: v1alpha2.MountPropagation(x.Propagation), + }) + } + to.Mounts = mounts + + devices := []*v1alpha2.Device{} + for _, x := range from.Devices { + devices = append(devices, &v1alpha2.Device{ + ContainerPath: x.ContainerPath, + HostPath: x.HostPath, + Permissions: x.Permissions, + }) + } + to.Devices = devices + + return to +} + +func NewV1alpha2LinuxContainerConfig() *v1alpha2.LinuxContainerConfig { + return &v1alpha2.LinuxContainerConfig{ + Resources: &v1alpha2.LinuxContainerResources{}, + SecurityContext: NewV1alpha2LinuxContainerSecurityContext(), + } +} + +func NewV1alpha2LinuxContainerSecurityContext() *v1alpha2.LinuxContainerSecurityContext { + return &v1alpha2.LinuxContainerSecurityContext{ + Capabilities: &v1alpha2.Capability{}, + NamespaceOptions: &v1alpha2.NamespaceOption{}, + SelinuxOptions: &v1alpha2.SELinuxOption{}, + RunAsUser: &v1alpha2.Int64Value{}, + RunAsGroup: &v1alpha2.Int64Value{}, + } +} + +func V1alpha2ContainerFilter(from *ContainerFilter) *v1alpha2.ContainerFilter { + if from == nil { + return nil + } + to := &v1alpha2.ContainerFilter{ + Id: from.Id, + LabelSelector: from.LabelSelector, + PodSandboxId: from.PodSandboxId, + } + if from.State != nil { + to.State = &v1alpha2.ContainerStateValue{ + State: v1alpha2.ContainerState(from.State.State), + } + } + + return to +} + +func V1alpha2ContainerResources(from *LinuxContainerResources) *v1alpha2.LinuxContainerResources { + if from == nil { + return nil + } + to := &v1alpha2.LinuxContainerResources{ + CpuPeriod: from.CpuPeriod, + CpuQuota: from.CpuQuota, + CpuShares: from.CpuShares, + MemoryLimitInBytes: from.MemoryLimitInBytes, + OomScoreAdj: from.OomScoreAdj, + CpusetCpus: from.CpusetCpus, + CpusetMems: from.CpusetMems, + } + hugePageLimits := []*v1alpha2.HugepageLimit{} + for _, x := range from.HugepageLimits { + hugePageLimits = append(hugePageLimits, &v1alpha2.HugepageLimit{ + PageSize: x.PageSize, + Limit: x.Limit, + }) + } + to.HugepageLimits = hugePageLimits + + return to +} + +func V1alpha2ExecRequest(from *ExecRequest) *v1alpha2.ExecRequest { + if from == nil { + return nil + } + + return &v1alpha2.ExecRequest{ + ContainerId: from.ContainerId, + Cmd: from.Cmd, + Tty: from.Tty, + Stdin: from.Stdin, + Stdout: from.Stdout, + Stderr: from.Stderr, + } +} + +func V1alpha2AttachRequest(from *AttachRequest) *v1alpha2.AttachRequest { + if from == nil { + return nil + } + + return &v1alpha2.AttachRequest{ + ContainerId: from.ContainerId, + Stdin: from.Stdin, + Tty: from.Tty, + Stdout: from.Stdout, + Stderr: from.Stderr, + } +} + +func V1alpha2PortForwardRequest(from *PortForwardRequest) *v1alpha2.PortForwardRequest { + if from == nil { + return nil + } + + return &v1alpha2.PortForwardRequest{ + PodSandboxId: from.PodSandboxId, + Port: from.Port, + } +} + +func V1alpha2RuntimeConfig(from *RuntimeConfig) *v1alpha2.RuntimeConfig { + if from == nil { + return nil + } + + to := &v1alpha2.RuntimeConfig{} + + if from.NetworkConfig != nil { + to.NetworkConfig = &v1alpha2.NetworkConfig{PodCidr: from.NetworkConfig.PodCidr} + } + + return to +} + +func V1alpha2ContainerStatsFilter(from *ContainerStatsFilter) *v1alpha2.ContainerStatsFilter { + if from == nil { + return nil + } + + return &v1alpha2.ContainerStatsFilter{ + Id: from.Id, + LabelSelector: from.LabelSelector, + PodSandboxId: from.PodSandboxId, + } +} + +func V1alpha2PodSandboxStatsFilter(from *PodSandboxStatsFilter) *v1alpha2.PodSandboxStatsFilter { + if from == nil { + return nil + } + + return &v1alpha2.PodSandboxStatsFilter{ + Id: from.Id, + LabelSelector: from.LabelSelector, + } +} + +func V1alpha2ImageFilter(from *ImageFilter) *v1alpha2.ImageFilter { + if from == nil { + return nil + } + + to := &v1alpha2.ImageFilter{} + + if from.Image != nil { + to.Image = V1alpha2ImageSpec(from.Image) + } + + return to +} + +func V1alpha2ImageSpec(from *ImageSpec) *v1alpha2.ImageSpec { + if from == nil { + return nil + } + + return &v1alpha2.ImageSpec{ + Image: from.Image, + Annotations: from.Annotations, + } +} + +func V1alpha2AuthConfig(from *AuthConfig) *v1alpha2.AuthConfig { + if from == nil { + return nil + } + + return &v1alpha2.AuthConfig{ + Username: from.Username, + Password: from.Password, + Auth: from.Auth, + ServerAddress: from.ServerAddress, + IdentityToken: from.IdentityToken, + RegistryToken: from.RegistryToken, + } +} diff --git a/staging/src/k8s.io/cri-api/pkg/apis/services.go b/pkg/kubelet/apis/cri/services.go similarity index 67% rename from staging/src/k8s.io/cri-api/pkg/apis/services.go rename to pkg/kubelet/apis/cri/services.go index 608b8190c948..5f5cf36a138b 100644 --- a/staging/src/k8s.io/cri-api/pkg/apis/services.go +++ b/pkg/kubelet/apis/cri/services.go @@ -14,25 +14,28 @@ See the License for the specific language governing permissions and limitations under the License. */ +// This package is originally inherited by k8s.io/cri-api/pkg/apis: +// https://github.com/kubernetes/kubernetes/blob/2dc2b1e/staging/src/k8s.io/cri-api/pkg/apis/services.go +// +// We made this API kubelet internal, because it maps to the internal +// intermediate CRI types of the kubelet. package cri import ( "time" - - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" ) // RuntimeVersioner contains methods for runtime name, version and API version. type RuntimeVersioner interface { // Version returns the runtime name, runtime version and runtime API version - Version(apiVersion string) (*runtimeapi.VersionResponse, error) + Version(apiVersion string) (*VersionResponse, error) } // ContainerManager contains methods to manipulate containers managed by a // container runtime. The methods are thread-safe. type ContainerManager interface { // CreateContainer creates a new container in specified PodSandbox. - CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) + CreateContainer(podSandboxID string, config *ContainerConfig, sandboxConfig *PodSandboxConfig) (string, error) // StartContainer starts the container. StartContainer(containerID string) error // StopContainer stops a running container with a grace period (i.e., timeout). @@ -40,18 +43,18 @@ type ContainerManager interface { // RemoveContainer removes the container. RemoveContainer(containerID string) error // ListContainers lists all containers by filters. - ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) + ListContainers(filter *ContainerFilter) ([]*Container, error) // ContainerStatus returns the status of the container. - ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) + ContainerStatus(containerID string) (*ContainerStatus, error) // UpdateContainerResources updates the cgroup resources for the container. - UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources) error + UpdateContainerResources(containerID string, resources *LinuxContainerResources) error // ExecSync executes a command in the container, and returns the stdout output. // If command exits with a non-zero exit code, an error is returned. ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) // Exec prepares a streaming endpoint to execute a command in the container, and returns the address. - Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) + Exec(*ExecRequest) (*ExecResponse, error) // Attach prepares a streaming endpoint to attach to a running container, and returns the address. - Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) + Attach(req *AttachRequest) (*AttachResponse, error) // ReopenContainerLog asks runtime to reopen the stdout/stderr log file // for the container. If it returns error, new container log file MUST NOT // be created. @@ -63,7 +66,7 @@ type ContainerManager interface { type PodSandboxManager interface { // RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure // the sandbox is in ready state. - RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) + RunPodSandbox(config *PodSandboxConfig, runtimeHandler string) (string, error) // StopPodSandbox stops the sandbox. If there are any running containers in the // sandbox, they should be force terminated. StopPodSandbox(podSandboxID string) error @@ -71,11 +74,11 @@ type PodSandboxManager interface { // sandbox, they should be forcibly removed. RemovePodSandbox(podSandboxID string) error // PodSandboxStatus returns the Status of the PodSandbox. - PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) + PodSandboxStatus(podSandboxID string) (*PodSandboxStatus, error) // ListPodSandbox returns a list of Sandbox. - ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) + ListPodSandbox(filter *PodSandboxFilter) ([]*PodSandbox, error) // PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address. - PortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) + PortForward(*PortForwardRequest) (*PortForwardResponse, error) } // ContainerStatsManager contains methods for retrieving the container @@ -83,14 +86,14 @@ type PodSandboxManager interface { type ContainerStatsManager interface { // ContainerStats returns stats of the container. If the container does not // exist, the call returns an error. - ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) + ContainerStats(containerID string) (*ContainerStats, error) // ListContainerStats returns stats of all running containers. - ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) + ListContainerStats(filter *ContainerStatsFilter) ([]*ContainerStats, error) // PodSandboxStats returns stats of the pod. If the pod does not // exist, the call returns an error. - PodSandboxStats(podSandboxID string) (*runtimeapi.PodSandboxStats, error) + PodSandboxStats(podSandboxID string) (*PodSandboxStats, error) // ListPodSandboxStats returns stats of all running pods. - ListPodSandboxStats(filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) + ListPodSandboxStats(filter *PodSandboxStatsFilter) ([]*PodSandboxStats, error) } // RuntimeService interface should be implemented by a container runtime. @@ -102,23 +105,36 @@ type RuntimeService interface { ContainerStatsManager // UpdateRuntimeConfig updates runtime configuration if specified - UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error + UpdateRuntimeConfig(runtimeConfig *RuntimeConfig) error // Status returns the status of the runtime. - Status() (*runtimeapi.RuntimeStatus, error) + Status() (*RuntimeStatus, error) + // Retrieve the currently used CRI API version. + APIVersion() APIVersion } +// APIVersion is the type for the CRI API version. +type APIVersion string + +const ( + // APIVersionV1 references the v1 CRI API. + APIVersionV1 APIVersion = "v1" + + // APIVersionV1 references the v1alpha2 CRI API. + APIVersionV1alpha2 APIVersion = "v1alpha2" +) + // ImageManagerService interface should be implemented by a container image // manager. // The methods should be thread-safe. type ImageManagerService interface { // ListImages lists the existing images. - ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) + ListImages(filter *ImageFilter) ([]*Image, error) // ImageStatus returns the status of the image. - ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) + ImageStatus(image *ImageSpec) (*Image, error) // PullImage pulls an image with the authentication config. - PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) + PullImage(image *ImageSpec, auth *AuthConfig, podSandboxConfig *PodSandboxConfig) (string, error) // RemoveImage removes the image. - RemoveImage(image *runtimeapi.ImageSpec) error + RemoveImage(image *ImageSpec) error // ImageFsInfo returns information of the filesystem that is used to store images. - ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) + ImageFsInfo() ([]*FilesystemUsage, error) } diff --git a/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_image_service.go b/pkg/kubelet/apis/cri/testing/fake_image_service.go similarity index 79% rename from staging/src/k8s.io/cri-api/pkg/apis/testing/fake_image_service.go rename to pkg/kubelet/apis/cri/testing/fake_image_service.go index 17100abd3e5d..35dea75a52b2 100644 --- a/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_image_service.go +++ b/pkg/kubelet/apis/cri/testing/fake_image_service.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" ) // FakeImageService fakes the image service. @@ -32,11 +32,11 @@ type FakeImageService struct { FakeImageSize uint64 Called []string Errors map[string][]error - Images map[string]*runtimeapi.Image + Images map[string]*internalapi.Image pulledImages []*pulledImage - FakeFilesystemUsage []*runtimeapi.FilesystemUsage + FakeFilesystemUsage []*internalapi.FilesystemUsage } // SetFakeImages sets the list of fake images for the FakeImageService. @@ -44,21 +44,21 @@ func (r *FakeImageService) SetFakeImages(images []string) { r.Lock() defer r.Unlock() - r.Images = make(map[string]*runtimeapi.Image) + r.Images = make(map[string]*internalapi.Image) for _, image := range images { r.Images[image] = r.makeFakeImage( - &runtimeapi.ImageSpec{ + &internalapi.ImageSpec{ Image: image, Annotations: make(map[string]string)}) } } // SetFakeImagesWithAnnotations sets the list of fake images for the FakeImageService with annotations. -func (r *FakeImageService) SetFakeImagesWithAnnotations(imageSpecs []*runtimeapi.ImageSpec) { +func (r *FakeImageService) SetFakeImagesWithAnnotations(imageSpecs []*internalapi.ImageSpec) { r.Lock() defer r.Unlock() - r.Images = make(map[string]*runtimeapi.Image) + r.Images = make(map[string]*internalapi.Image) for _, imageSpec := range imageSpecs { r.Images[imageSpec.Image] = r.makeFakeImage(imageSpec) } @@ -73,7 +73,7 @@ func (r *FakeImageService) SetFakeImageSize(size uint64) { } // SetFakeFilesystemUsage sets the FilesystemUsage for FakeImageService. -func (r *FakeImageService) SetFakeFilesystemUsage(usage []*runtimeapi.FilesystemUsage) { +func (r *FakeImageService) SetFakeFilesystemUsage(usage []*internalapi.FilesystemUsage) { r.Lock() defer r.Unlock() @@ -85,12 +85,12 @@ func NewFakeImageService() *FakeImageService { return &FakeImageService{ Called: make([]string, 0), Errors: make(map[string][]error), - Images: make(map[string]*runtimeapi.Image), + Images: make(map[string]*internalapi.Image), } } -func (r *FakeImageService) makeFakeImage(image *runtimeapi.ImageSpec) *runtimeapi.Image { - return &runtimeapi.Image{ +func (r *FakeImageService) makeFakeImage(image *internalapi.ImageSpec) *internalapi.Image { + return &internalapi.Image{ Id: image.Image, Size_: r.FakeImageSize, Spec: image, @@ -131,7 +131,7 @@ func (r *FakeImageService) popError(f string) error { } // ListImages returns the list of images from FakeImageService or error if it was previously set. -func (r *FakeImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) { +func (r *FakeImageService) ListImages(filter *internalapi.ImageFilter) ([]*internalapi.Image, error) { r.Lock() defer r.Unlock() @@ -140,7 +140,7 @@ func (r *FakeImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtim return nil, err } - images := make([]*runtimeapi.Image, 0) + images := make([]*internalapi.Image, 0) for _, img := range r.Images { if filter != nil && filter.Image != nil { if !stringInSlice(filter.Image.Image, img.RepoTags) { @@ -154,7 +154,7 @@ func (r *FakeImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtim } // ImageStatus returns the status of the image from the FakeImageService. -func (r *FakeImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) { +func (r *FakeImageService) ImageStatus(image *internalapi.ImageSpec) (*internalapi.Image, error) { r.Lock() defer r.Unlock() @@ -167,7 +167,7 @@ func (r *FakeImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi } // PullImage emulate pulling the image from the FakeImageService. -func (r *FakeImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (r *FakeImageService) PullImage(image *internalapi.ImageSpec, auth *internalapi.AuthConfig, podSandboxConfig *internalapi.PodSandboxConfig) (string, error) { r.Lock() defer r.Unlock() @@ -188,7 +188,7 @@ func (r *FakeImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimea } // RemoveImage removes image from the FakeImageService. -func (r *FakeImageService) RemoveImage(image *runtimeapi.ImageSpec) error { +func (r *FakeImageService) RemoveImage(image *internalapi.ImageSpec) error { r.Lock() defer r.Unlock() @@ -204,7 +204,7 @@ func (r *FakeImageService) RemoveImage(image *runtimeapi.ImageSpec) error { } // ImageFsInfo returns information of the filesystem that is used to store images. -func (r *FakeImageService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) { +func (r *FakeImageService) ImageFsInfo() ([]*internalapi.FilesystemUsage, error) { r.Lock() defer r.Unlock() @@ -217,7 +217,7 @@ func (r *FakeImageService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) } // AssertImagePulledWithAuth validates whether the image was pulled with auth and asserts if it wasn't. -func (r *FakeImageService) AssertImagePulledWithAuth(t *testing.T, image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, failMsg string) { +func (r *FakeImageService) AssertImagePulledWithAuth(t *testing.T, image *internalapi.ImageSpec, auth *internalapi.AuthConfig, failMsg string) { r.Lock() defer r.Unlock() expected := &pulledImage{imageSpec: image, authConfig: auth} @@ -225,6 +225,6 @@ func (r *FakeImageService) AssertImagePulledWithAuth(t *testing.T, image *runtim } type pulledImage struct { - imageSpec *runtimeapi.ImageSpec - authConfig *runtimeapi.AuthConfig + imageSpec *internalapi.ImageSpec + authConfig *internalapi.AuthConfig } diff --git a/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go b/pkg/kubelet/apis/cri/testing/fake_runtime_service.go similarity index 82% rename from staging/src/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go rename to pkg/kubelet/apis/cri/testing/fake_runtime_service.go index 3df0ec96ee56..07dfb6a95586 100644 --- a/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go +++ b/pkg/kubelet/apis/cri/testing/fake_runtime_service.go @@ -22,7 +22,7 @@ import ( "sync" "time" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" ) var ( @@ -36,10 +36,10 @@ var ( FakePodSandboxIPs = []string{"192.168.192.168"} ) -// FakePodSandbox is the fake implementation of runtimeapi.PodSandboxStatus. +// FakePodSandbox is the fake implementation of internalapi.PodSandboxStatus. type FakePodSandbox struct { // PodSandboxStatus contains the runtime information for a sandbox. - runtimeapi.PodSandboxStatus + internalapi.PodSandboxStatus // RuntimeHandler is the runtime handler that was issued with the RunPodSandbox request. RuntimeHandler string } @@ -47,10 +47,10 @@ type FakePodSandbox struct { // FakeContainer is a fake container. type FakeContainer struct { // ContainerStatus contains the runtime information for a container. - runtimeapi.ContainerStatus + internalapi.ContainerStatus // LinuxResources contains the resources specific to linux containers. - LinuxResources *runtimeapi.LinuxContainerResources + LinuxResources *internalapi.LinuxContainerResources // the sandbox id of this container SandboxID string @@ -63,11 +63,11 @@ type FakeRuntimeService struct { Called []string Errors map[string][]error - FakeStatus *runtimeapi.RuntimeStatus + FakeStatus *internalapi.RuntimeStatus Containers map[string]*FakeContainer Sandboxes map[string]*FakePodSandbox - FakeContainerStats map[string]*runtimeapi.ContainerStats - FakePodSandboxStats map[string]*runtimeapi.PodSandboxStats + FakeContainerStats map[string]*internalapi.ContainerStats + FakePodSandboxStats map[string]*internalapi.PodSandboxStats ErrorOnSandboxCreate bool } @@ -156,13 +156,13 @@ func NewFakeRuntimeService() *FakeRuntimeService { Errors: make(map[string][]error), Containers: make(map[string]*FakeContainer), Sandboxes: make(map[string]*FakePodSandbox), - FakeContainerStats: make(map[string]*runtimeapi.ContainerStats), - FakePodSandboxStats: make(map[string]*runtimeapi.PodSandboxStats), + FakeContainerStats: make(map[string]*internalapi.ContainerStats), + FakePodSandboxStats: make(map[string]*internalapi.PodSandboxStats), } } // Version returns version information from the FakeRuntimeService. -func (r *FakeRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) { +func (r *FakeRuntimeService) Version(apiVersion string) (*internalapi.VersionResponse, error) { r.Lock() defer r.Unlock() @@ -171,7 +171,7 @@ func (r *FakeRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResp return nil, err } - return &runtimeapi.VersionResponse{ + return &internalapi.VersionResponse{ Version: FakeVersion, RuntimeName: FakeRuntimeName, RuntimeVersion: FakeVersion, @@ -179,8 +179,12 @@ func (r *FakeRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResp }, nil } +func (r *FakeRuntimeService) APIVersion() internalapi.APIVersion { + return internalapi.APIVersionV1 +} + // Status returns runtime status of the FakeRuntimeService. -func (r *FakeRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) { +func (r *FakeRuntimeService) Status() (*internalapi.RuntimeStatus, error) { r.Lock() defer r.Unlock() @@ -193,7 +197,7 @@ func (r *FakeRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) { } // RunPodSandbox emulates the run of the pod sandbox in the FakeRuntimeService. -func (r *FakeRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) { +func (r *FakeRuntimeService) RunPodSandbox(config *internalapi.PodSandboxConfig, runtimeHandler string) (string, error) { r.Lock() defer r.Unlock() @@ -211,19 +215,19 @@ func (r *FakeRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, podSandboxID := BuildSandboxName(config.Metadata) createdAt := time.Now().UnixNano() r.Sandboxes[podSandboxID] = &FakePodSandbox{ - PodSandboxStatus: runtimeapi.PodSandboxStatus{ + PodSandboxStatus: internalapi.PodSandboxStatus{ Id: podSandboxID, Metadata: config.Metadata, - State: runtimeapi.PodSandboxState_SANDBOX_READY, + State: internalapi.PodSandboxState_SANDBOX_READY, CreatedAt: createdAt, - Network: &runtimeapi.PodSandboxNetworkStatus{ + Network: &internalapi.PodSandboxNetworkStatus{ Ip: FakePodSandboxIPs[0], }, // Without setting sandboxStatus's Linux.Namespaces.Options, kubeGenericRuntimeManager's podSandboxChanged will consider it as network // namespace changed and always recreate sandbox which causes pod creation failed. // Ref `sandboxStatus.GetLinux().GetNamespaces().GetOptions().GetNetwork() != networkNamespaceForPod(pod)` in podSandboxChanged function. - Linux: &runtimeapi.LinuxPodSandboxStatus{ - Namespaces: &runtimeapi.Namespace{ + Linux: &internalapi.LinuxPodSandboxStatus{ + Namespaces: &internalapi.Namespace{ Options: config.GetLinux().GetSecurityContext().GetNamespaceOptions(), }, }, @@ -235,9 +239,9 @@ func (r *FakeRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, } // assign additional IPs additionalIPs := FakePodSandboxIPs[1:] - additionalPodIPs := make([]*runtimeapi.PodIP, 0, len(additionalIPs)) + additionalPodIPs := make([]*internalapi.PodIP, 0, len(additionalIPs)) for _, ip := range additionalIPs { - additionalPodIPs = append(additionalPodIPs, &runtimeapi.PodIP{ + additionalPodIPs = append(additionalPodIPs, &internalapi.PodIP{ Ip: ip, }) } @@ -256,7 +260,7 @@ func (r *FakeRuntimeService) StopPodSandbox(podSandboxID string) error { } if s, ok := r.Sandboxes[podSandboxID]; ok { - s.State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY + s.State = internalapi.PodSandboxState_SANDBOX_NOTREADY } else { return fmt.Errorf("pod sandbox %s not found", podSandboxID) } @@ -281,7 +285,7 @@ func (r *FakeRuntimeService) RemovePodSandbox(podSandboxID string) error { } // PodSandboxStatus returns pod sandbox status from the FakeRuntimeService. -func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) { +func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string) (*internalapi.PodSandboxStatus, error) { r.Lock() defer r.Unlock() @@ -300,7 +304,7 @@ func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeapi. } // ListPodSandbox returns the list of pod sandboxes in the FakeRuntimeService. -func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) { +func (r *FakeRuntimeService) ListPodSandbox(filter *internalapi.PodSandboxFilter) ([]*internalapi.PodSandbox, error) { r.Lock() defer r.Unlock() @@ -309,7 +313,7 @@ func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) return nil, err } - result := make([]*runtimeapi.PodSandbox, 0) + result := make([]*internalapi.PodSandbox, 0) for id, s := range r.Sandboxes { if filter != nil { if filter.Id != "" && filter.Id != id { @@ -323,7 +327,7 @@ func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) } } - result = append(result, &runtimeapi.PodSandbox{ + result = append(result, &internalapi.PodSandbox{ Id: s.Id, Metadata: s.Metadata, State: s.State, @@ -338,7 +342,7 @@ func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) } // PortForward emulates the set up of port forward in the FakeRuntimeService. -func (r *FakeRuntimeService) PortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { +func (r *FakeRuntimeService) PortForward(*internalapi.PortForwardRequest) (*internalapi.PortForwardResponse, error) { r.Lock() defer r.Unlock() @@ -347,11 +351,11 @@ func (r *FakeRuntimeService) PortForward(*runtimeapi.PortForwardRequest) (*runti return nil, err } - return &runtimeapi.PortForwardResponse{}, nil + return &internalapi.PortForwardResponse{}, nil } // CreateContainer emulates container creation in the FakeRuntimeService. -func (r *FakeRuntimeService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (r *FakeRuntimeService) CreateContainer(podSandboxID string, config *internalapi.ContainerConfig, sandboxConfig *internalapi.PodSandboxConfig) (string, error) { r.Lock() defer r.Unlock() @@ -364,10 +368,10 @@ func (r *FakeRuntimeService) CreateContainer(podSandboxID string, config *runtim // fixed BuildContainerName() for easily making fake containers. containerID := BuildContainerName(config.Metadata, podSandboxID) createdAt := time.Now().UnixNano() - createdState := runtimeapi.ContainerState_CONTAINER_CREATED + createdState := internalapi.ContainerState_CONTAINER_CREATED imageRef := config.Image.Image r.Containers[containerID] = &FakeContainer{ - ContainerStatus: runtimeapi.ContainerStatus{ + ContainerStatus: internalapi.ContainerStatus{ Id: containerID, Metadata: config.Metadata, Image: config.Image, @@ -400,7 +404,7 @@ func (r *FakeRuntimeService) StartContainer(containerID string) error { } // Set container to running. - c.State = runtimeapi.ContainerState_CONTAINER_RUNNING + c.State = internalapi.ContainerState_CONTAINER_RUNNING c.StartedAt = time.Now().UnixNano() return nil @@ -423,7 +427,7 @@ func (r *FakeRuntimeService) StopContainer(containerID string, timeout int64) er // Set container to exited state. finishedAt := time.Now().UnixNano() - exitedState := runtimeapi.ContainerState_CONTAINER_EXITED + exitedState := internalapi.ContainerState_CONTAINER_EXITED c.State = exitedState c.FinishedAt = finishedAt @@ -447,7 +451,7 @@ func (r *FakeRuntimeService) RemoveContainer(containerID string) error { } // ListContainers returns the list of containers in the FakeRuntimeService. -func (r *FakeRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { +func (r *FakeRuntimeService) ListContainers(filter *internalapi.ContainerFilter) ([]*internalapi.Container, error) { r.Lock() defer r.Unlock() @@ -456,7 +460,7 @@ func (r *FakeRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) return nil, err } - result := make([]*runtimeapi.Container, 0) + result := make([]*internalapi.Container, 0) for _, s := range r.Containers { if filter != nil { if filter.Id != "" && filter.Id != s.Id { @@ -473,7 +477,7 @@ func (r *FakeRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) } } - result = append(result, &runtimeapi.Container{ + result = append(result, &internalapi.Container{ Id: s.Id, CreatedAt: s.CreatedAt, PodSandboxId: s.SandboxID, @@ -490,7 +494,7 @@ func (r *FakeRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) } // ContainerStatus returns the container status given the container ID in FakeRuntimeService. -func (r *FakeRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) { +func (r *FakeRuntimeService) ContainerStatus(containerID string) (*internalapi.ContainerStatus, error) { r.Lock() defer r.Unlock() @@ -509,7 +513,7 @@ func (r *FakeRuntimeService) ContainerStatus(containerID string) (*runtimeapi.Co } // UpdateContainerResources returns the container resource in the FakeRuntimeService. -func (r *FakeRuntimeService) UpdateContainerResources(string, *runtimeapi.LinuxContainerResources) error { +func (r *FakeRuntimeService) UpdateContainerResources(string, *internalapi.LinuxContainerResources) error { r.Lock() defer r.Unlock() @@ -528,7 +532,7 @@ func (r *FakeRuntimeService) ExecSync(containerID string, cmd []string, timeout } // Exec emulates the execution of a command in a container in the FakeRuntimeService. -func (r *FakeRuntimeService) Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { +func (r *FakeRuntimeService) Exec(*internalapi.ExecRequest) (*internalapi.ExecResponse, error) { r.Lock() defer r.Unlock() @@ -537,11 +541,11 @@ func (r *FakeRuntimeService) Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResp return nil, err } - return &runtimeapi.ExecResponse{}, nil + return &internalapi.ExecResponse{}, nil } // Attach emulates the attach request in the FakeRuntimeService. -func (r *FakeRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { +func (r *FakeRuntimeService) Attach(req *internalapi.AttachRequest) (*internalapi.AttachResponse, error) { r.Lock() defer r.Unlock() @@ -550,11 +554,11 @@ func (r *FakeRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi. return nil, err } - return &runtimeapi.AttachResponse{}, nil + return &internalapi.AttachResponse{}, nil } // UpdateRuntimeConfig emulates the update of a runtime config for the FakeRuntimeService. -func (r *FakeRuntimeService) UpdateRuntimeConfig(runtimeCOnfig *runtimeapi.RuntimeConfig) error { +func (r *FakeRuntimeService) UpdateRuntimeConfig(runtimeCOnfig *internalapi.RuntimeConfig) error { r.Lock() defer r.Unlock() @@ -563,18 +567,18 @@ func (r *FakeRuntimeService) UpdateRuntimeConfig(runtimeCOnfig *runtimeapi.Runti } // SetFakeContainerStats sets the fake container stats in the FakeRuntimeService. -func (r *FakeRuntimeService) SetFakeContainerStats(containerStats []*runtimeapi.ContainerStats) { +func (r *FakeRuntimeService) SetFakeContainerStats(containerStats []*internalapi.ContainerStats) { r.Lock() defer r.Unlock() - r.FakeContainerStats = make(map[string]*runtimeapi.ContainerStats) + r.FakeContainerStats = make(map[string]*internalapi.ContainerStats) for _, s := range containerStats { r.FakeContainerStats[s.Attributes.Id] = s } } // ContainerStats returns the container stats in the FakeRuntimeService. -func (r *FakeRuntimeService) ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) { +func (r *FakeRuntimeService) ContainerStats(containerID string) (*internalapi.ContainerStats, error) { r.Lock() defer r.Unlock() @@ -591,7 +595,7 @@ func (r *FakeRuntimeService) ContainerStats(containerID string) (*runtimeapi.Con } // ListContainerStats returns the list of all container stats given the filter in the FakeRuntimeService. -func (r *FakeRuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { +func (r *FakeRuntimeService) ListContainerStats(filter *internalapi.ContainerStatsFilter) ([]*internalapi.ContainerStats, error) { r.Lock() defer r.Unlock() @@ -600,7 +604,7 @@ func (r *FakeRuntimeService) ListContainerStats(filter *runtimeapi.ContainerStat return nil, err } - var result []*runtimeapi.ContainerStats + var result []*internalapi.ContainerStats for _, c := range r.Containers { if filter != nil { if filter.Id != "" && filter.Id != c.Id { @@ -624,18 +628,18 @@ func (r *FakeRuntimeService) ListContainerStats(filter *runtimeapi.ContainerStat } // SetFakePodSandboxStats sets the fake pod sandbox stats in the FakeRuntimeService. -func (r *FakeRuntimeService) SetFakePodSandboxStats(podStats []*runtimeapi.PodSandboxStats) { +func (r *FakeRuntimeService) SetFakePodSandboxStats(podStats []*internalapi.PodSandboxStats) { r.Lock() defer r.Unlock() - r.FakePodSandboxStats = make(map[string]*runtimeapi.PodSandboxStats) + r.FakePodSandboxStats = make(map[string]*internalapi.PodSandboxStats) for _, s := range podStats { r.FakePodSandboxStats[s.Attributes.Id] = s } } // PodSandboxStats returns the sandbox stats in the FakeRuntimeService. -func (r *FakeRuntimeService) PodSandboxStats(podSandboxID string) (*runtimeapi.PodSandboxStats, error) { +func (r *FakeRuntimeService) PodSandboxStats(podSandboxID string) (*internalapi.PodSandboxStats, error) { r.Lock() defer r.Unlock() @@ -652,7 +656,7 @@ func (r *FakeRuntimeService) PodSandboxStats(podSandboxID string) (*runtimeapi.P } // ListPodSandboxStats returns the list of all pod sandbox stats given the filter in the FakeRuntimeService. -func (r *FakeRuntimeService) ListPodSandboxStats(filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) { +func (r *FakeRuntimeService) ListPodSandboxStats(filter *internalapi.PodSandboxStatsFilter) ([]*internalapi.PodSandboxStats, error) { r.Lock() defer r.Unlock() @@ -661,7 +665,7 @@ func (r *FakeRuntimeService) ListPodSandboxStats(filter *runtimeapi.PodSandboxSt return nil, err } - var result []*runtimeapi.PodSandboxStats + var result []*internalapi.PodSandboxStats for _, sb := range r.Sandboxes { if filter != nil { if filter.Id != "" && filter.Id != sb.Id { diff --git a/staging/src/k8s.io/cri-api/pkg/apis/testing/utils.go b/pkg/kubelet/apis/cri/testing/utils.go similarity index 84% rename from staging/src/k8s.io/cri-api/pkg/apis/testing/utils.go rename to pkg/kubelet/apis/cri/testing/utils.go index 5b3814e9de37..020da194dc21 100644 --- a/staging/src/k8s.io/cri-api/pkg/apis/testing/utils.go +++ b/pkg/kubelet/apis/cri/testing/utils.go @@ -19,17 +19,17 @@ package testing import ( "fmt" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" ) // BuildContainerName creates a unique container name string. -func BuildContainerName(metadata *runtimeapi.ContainerMetadata, sandboxID string) string { +func BuildContainerName(metadata *internalapi.ContainerMetadata, sandboxID string) string { // include the sandbox ID to make the container ID unique. return fmt.Sprintf("%s_%s_%d", sandboxID, metadata.Name, metadata.Attempt) } // BuildSandboxName creates a unique sandbox name string. -func BuildSandboxName(metadata *runtimeapi.PodSandboxMetadata) string { +func BuildSandboxName(metadata *internalapi.PodSandboxMetadata) string { return fmt.Sprintf("%s_%s_%s_%d", metadata.Name, metadata.Namespace, metadata.Uid, metadata.Attempt) } diff --git a/pkg/kubelet/apis/cri/types.go b/pkg/kubelet/apis/cri/types.go new file mode 100644 index 000000000000..349b05ea64ca --- /dev/null +++ b/pkg/kubelet/apis/cri/types.go @@ -0,0 +1,3742 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package contains intermediate types which will be mapped to their +// corresponding CRI `v1` or `v1alpha2` types. Base for this package are the v1 +// definitions. +package cri + +// This file contains all constants defined in CRI. + +// Required runtime condition type. +const ( + // RuntimeReady means the runtime is up and ready to accept basic containers. + RuntimeReady = "RuntimeReady" + // NetworkReady means the runtime network is up and ready to accept containers which require network. + NetworkReady = "NetworkReady" +) + +// LogStreamType is the type of the stream in CRI container log. +type LogStreamType string + +const ( + // Stdout is the stream type for stdout. + Stdout LogStreamType = "stdout" + // Stderr is the stream type for stderr. + Stderr LogStreamType = "stderr" +) + +// LogTag is the tag of a log line in CRI container log. +// Currently defined log tags: +// * First tag: Partial/Full - P/F. +// The field in the container log format can be extended to include multiple +// tags by using a delimiter, but changes should be rare. If it becomes clear +// that better extensibility is desired, a more extensible format (e.g., json) +// should be adopted as a replacement and/or addition. +type LogTag string + +const ( + // LogTagPartial means the line is part of multiple lines. + LogTagPartial LogTag = "P" + // LogTagFull means the line is a single full line or the end of multiple lines. + LogTagFull LogTag = "F" + // LogTagDelimiter is the delimiter for different log tags. + LogTagDelimiter = ":" +) + +type Protocol int32 + +const ( + Protocol_TCP Protocol = 0 + Protocol_UDP Protocol = 1 + Protocol_SCTP Protocol = 2 +) + +var Protocol_name = map[int32]string{ + 0: "TCP", + 1: "UDP", + 2: "SCTP", +} + +var Protocol_value = map[string]int32{ + "TCP": 0, + "UDP": 1, + "SCTP": 2, +} + +type MountPropagation int32 + +const ( + // No mount propagation ("private" in Linux terminology). + MountPropagation_PROPAGATION_PRIVATE MountPropagation = 0 + // Mounts get propagated from the host to the container ("rslave" in Linux). + MountPropagation_PROPAGATION_HOST_TO_CONTAINER MountPropagation = 1 + // Mounts get propagated from the host to the container and from the + // container to the host ("rshared" in Linux). + MountPropagation_PROPAGATION_BIDIRECTIONAL MountPropagation = 2 +) + +var MountPropagation_name = map[int32]string{ + 0: "PROPAGATION_PRIVATE", + 1: "PROPAGATION_HOST_TO_CONTAINER", + 2: "PROPAGATION_BIDIRECTIONAL", +} + +var MountPropagation_value = map[string]int32{ + "PROPAGATION_PRIVATE": 0, + "PROPAGATION_HOST_TO_CONTAINER": 1, + "PROPAGATION_BIDIRECTIONAL": 2, +} + +// A NamespaceMode describes the intended namespace configuration for each +// of the namespaces (Network, PID, IPC) in NamespaceOption. Runtimes should +// map these modes as appropriate for the technology underlying the runtime. +type NamespaceMode int32 + +const ( + // A POD namespace is common to all containers in a pod. + // For example, a container with a PID namespace of POD expects to view + // all of the processes in all of the containers in the pod. + NamespaceMode_POD NamespaceMode = 0 + // A CONTAINER namespace is restricted to a single container. + // For example, a container with a PID namespace of CONTAINER expects to + // view only the processes in that container. + NamespaceMode_CONTAINER NamespaceMode = 1 + // A NODE namespace is the namespace of the Kubernetes node. + // For example, a container with a PID namespace of NODE expects to view + // all of the processes on the host running the kubelet. + NamespaceMode_NODE NamespaceMode = 2 + // TARGET targets the namespace of another container. When this is specified, + // a target_id must be specified in NamespaceOption and refer to a container + // previously created with NamespaceMode CONTAINER. This containers namespace + // will be made to match that of container target_id. + // For example, a container with a PID namespace of TARGET expects to view + // all of the processes that container target_id can view. + NamespaceMode_TARGET NamespaceMode = 3 +) + +var NamespaceMode_name = map[int32]string{ + 0: "POD", + 1: "CONTAINER", + 2: "NODE", + 3: "TARGET", +} + +var NamespaceMode_value = map[string]int32{ + "POD": 0, + "CONTAINER": 1, + "NODE": 2, + "TARGET": 3, +} + +type PodSandboxState int32 + +const ( + PodSandboxState_SANDBOX_READY PodSandboxState = 0 + PodSandboxState_SANDBOX_NOTREADY PodSandboxState = 1 +) + +var PodSandboxState_name = map[int32]string{ + 0: "SANDBOX_READY", + 1: "SANDBOX_NOTREADY", +} + +var PodSandboxState_value = map[string]int32{ + "SANDBOX_READY": 0, + "SANDBOX_NOTREADY": 1, +} + +type ContainerState int32 + +const ( + ContainerState_CONTAINER_CREATED ContainerState = 0 + ContainerState_CONTAINER_RUNNING ContainerState = 1 + ContainerState_CONTAINER_EXITED ContainerState = 2 + ContainerState_CONTAINER_UNKNOWN ContainerState = 3 +) + +var ContainerState_name = map[int32]string{ + 0: "CONTAINER_CREATED", + 1: "CONTAINER_RUNNING", + 2: "CONTAINER_EXITED", + 3: "CONTAINER_UNKNOWN", +} + +var ContainerState_value = map[string]int32{ + "CONTAINER_CREATED": 0, + "CONTAINER_RUNNING": 1, + "CONTAINER_EXITED": 2, + "CONTAINER_UNKNOWN": 3, +} + +// Available profile types. +type SecurityProfile_ProfileType int32 + +const ( + // The container runtime default profile should be used. + SecurityProfile_RuntimeDefault SecurityProfile_ProfileType = 0 + // Disable the feature for the sandbox or the container. + SecurityProfile_Unconfined SecurityProfile_ProfileType = 1 + // A pre-defined profile on the node should be used. + SecurityProfile_Localhost SecurityProfile_ProfileType = 2 +) + +var SecurityProfile_ProfileType_name = map[int32]string{ + 0: "RuntimeDefault", + 1: "Unconfined", + 2: "Localhost", +} + +var SecurityProfile_ProfileType_value = map[string]int32{ + "RuntimeDefault": 0, + "Unconfined": 1, + "Localhost": 2, +} + +type VersionRequest struct { + // Version of the kubelet runtime API. + Version string `json:"version,omitempty"` +} + +type VersionResponse struct { + // Version of the kubelet runtime API. + Version string `json:"version,omitempty"` + // Name of the container runtime. + RuntimeName string `json:"runtime_name,omitempty"` + // Version of the container runtime. The string must be + // semver-compatible. + RuntimeVersion string `json:"runtime_version,omitempty"` + // API version of the container runtime. The string must be + // semver-compatible. + RuntimeApiVersion string `json:"runtime_api_version,omitempty"` +} + +// DNSConfig specifies the DNS servers and search domains of a sandbox. +type DNSConfig struct { + // List of DNS servers of the cluster. + Servers []string `json:"servers,omitempty"` + // List of DNS search domains of the cluster. + Searches []string `json:"searches,omitempty"` + // List of DNS options. See https://linux.die.net/man/5/resolv.conf + // for all available options. + Options []string `json:"options,omitempty"` +} + +// PortMapping specifies the port mapping configurations of a sandbox. +type PortMapping struct { + // Protocol of the port mapping. + Protocol Protocol `json:"protocol,omitempty"` + // Port number within the container. Default: 0 (not specified). + ContainerPort int32 `json:"container_port,omitempty"` + // Port number on the host. Default: 0 (not specified). + HostPort int32 `json:"host_port,omitempty"` + // Host IP. + HostIp string `json:"host_ip,omitempty"` +} + +// Mount specifies a host volume to mount into a container. +type Mount struct { + // Path of the mount within the container. + ContainerPath string `json:"container_path,omitempty"` + // Path of the mount on the host. If the hostPath doesn't exist, then runtimes + // should report error. If the hostpath is a symbolic link, runtimes should + // follow the symlink and mount the real destination to container. + HostPath string `json:"host_path,omitempty"` + // If set, the mount is read-only. + Readonly bool `json:"readonly,omitempty"` + // If set, the mount needs SELinux relabeling. + SelinuxRelabel bool `json:"selinux_relabel,omitempty"` + // Requested propagation mode. + Propagation MountPropagation `json:"propagation,omitempty"` +} + +// NamespaceOption provides options for Linux namespaces. +type NamespaceOption struct { + // Network namespace for this container/sandbox. + // Note: There is currently no way to set CONTAINER scoped network in the Kubernetes API. + // Namespaces currently set by the kubelet: POD, NODE + Network NamespaceMode `json:"network,omitempty"` + // PID namespace for this container/sandbox. + // Note: The CRI default is POD, but the v1.PodSpec default is CONTAINER. + // The kubelet's runtime manager will set this to CONTAINER explicitly for v1 pods. + // Namespaces currently set by the kubelet: POD, CONTAINER, NODE, TARGET + Pid NamespaceMode `json:"pid,omitempty"` + // IPC namespace for this container/sandbox. + // Note: There is currently no way to set CONTAINER scoped IPC in the Kubernetes API. + // Namespaces currently set by the kubelet: POD, NODE + Ipc NamespaceMode `json:"ipc,omitempty"` + // Target Container ID for NamespaceMode of TARGET. This container must have been + // previously created in the same pod. It is not possible to specify different targets + // for each namespace. + TargetId string `json:"target_id,omitempty"` +} + +// Int64Value is the wrapper of int64. +type Int64Value struct { + // The value. + Value int64 `json:"value,omitempty"` +} + +// LinuxSandboxSecurityContext holds linux security configuration that will be +// applied to a sandbox. Note that: +// 1) It does not apply to containers in the pods. +// 2) It may not be applicable to a PodSandbox which does not contain any running +// process. +type LinuxSandboxSecurityContext struct { + // Configurations for the sandbox's namespaces. + // This will be used only if the PodSandbox uses namespace for isolation. + NamespaceOptions *NamespaceOption `json:"namespace_options,omitempty"` + // Optional SELinux context to be applied. + SelinuxOptions *SELinuxOption `json:"selinux_options,omitempty"` + // UID to run sandbox processes as, when applicable. + RunAsUser *Int64Value `json:"run_as_user,omitempty"` + // GID to run sandbox processes as, when applicable. run_as_group should only + // be specified when run_as_user is specified; otherwise, the runtime MUST error. + RunAsGroup *Int64Value `json:"run_as_group,omitempty"` + // If set, the root filesystem of the sandbox is read-only. + ReadonlyRootfs bool `json:"readonly_rootfs,omitempty"` + // List of groups applied to the first process run in the sandbox, in + // addition to the sandbox's primary GID. + SupplementalGroups []int64 `json:"supplemental_groups,omitempty"` + // Indicates whether the sandbox will be asked to run a privileged + // container. If a privileged container is to be executed within it, this + // MUST be true. + // This allows a sandbox to take additional security precautions if no + // privileged containers are expected to be run. + Privileged bool `json:"privileged,omitempty"` + // Seccomp profile for the sandbox. + Seccomp *SecurityProfile `json:"seccomp,omitempty"` + // AppArmor profile for the sandbox. + Apparmor *SecurityProfile `json:"apparmor,omitempty"` + // Seccomp profile for the sandbox, candidate values are: + // * runtime/default: the default profile for the container runtime + // * unconfined: unconfined profile, ie, no seccomp sandboxing + // * localhost/: the profile installed on the node. + // is the full path of the profile. + // Default: "", which is identical with unconfined. + SeccompProfilePath string `json:"seccomp_profile_path,omitempty"` // Deprecated: Do not use. +} + +// A security profile which can be used for sandboxes and containers. +type SecurityProfile struct { + // Indicator which `ProfileType` should be applied. + ProfileType SecurityProfile_ProfileType `json:"profile_type,omitempty"` + // Indicates that a pre-defined profile on the node should be used. + // Must only be set if `ProfileType` is `Localhost`. + // For seccomp, it must be an absolute path to the seccomp profile. + // For AppArmor, this field is the AppArmor `/` + LocalhostRef string `json:"localhost_ref,omitempty"` +} + +// LinuxPodSandboxConfig holds platform-specific configurations for Linux +// host platforms and Linux-based containers. +type LinuxPodSandboxConfig struct { + // Parent cgroup of the PodSandbox. + // The cgroupfs style syntax will be used, but the container runtime can + // convert it to systemd semantics if needed. + CgroupParent string `json:"cgroup_parent,omitempty"` + // LinuxSandboxSecurityContext holds sandbox security attributes. + SecurityContext *LinuxSandboxSecurityContext `json:"security_context,omitempty"` + // Sysctls holds linux sysctls config for the sandbox. + Sysctls map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` + // Optional overhead represents the overheads associated with this sandbox + Overhead *LinuxContainerResources `json:"overhead,omitempty"` + // Optional resources represents the sum of container resources for this sandbox + Resources *LinuxContainerResources `json:"resources,omitempty"` +} + +// PodSandboxMetadata holds all necessary information for building the sandbox name. +// The container runtime is encouraged to expose the metadata associated with the +// PodSandbox in its user interface for better user experience. For example, +// the runtime can construct a unique PodSandboxName based on the metadata. +type PodSandboxMetadata struct { + // Pod name of the sandbox. Same as the pod name in the Pod ObjectMeta. + Name string `json:"name,omitempty"` + // Pod UID of the sandbox. Same as the pod UID in the Pod ObjectMeta. + Uid string `json:"uid,omitempty"` + // Pod namespace of the sandbox. Same as the pod namespace in the Pod ObjectMeta. + Namespace string `json:"namespace,omitempty"` + // Attempt number of creating the sandbox. Default: 0. + Attempt uint32 `json:"attempt,omitempty"` +} + +// PodSandboxConfig holds all the required and optional fields for creating a +// sandbox. +type PodSandboxConfig struct { + // Metadata of the sandbox. This information will uniquely identify the + // sandbox, and the runtime should leverage this to ensure correct + // operation. The runtime may also use this information to improve UX, such + // as by constructing a readable name. + Metadata *PodSandboxMetadata `json:"metadata,omitempty"` + // Hostname of the sandbox. Hostname could only be empty when the pod + // network namespace is NODE. + Hostname string `json:"hostname,omitempty"` + // Path to the directory on the host in which container log files are + // stored. + // By default the log of a container going into the LogDirectory will be + // hooked up to STDOUT and STDERR. However, the LogDirectory may contain + // binary log files with structured logging data from the individual + // containers. For example, the files might be newline separated JSON + // structured logs, systemd-journald journal files, gRPC trace files, etc. + // E.g., + // PodSandboxConfig.LogDirectory = `/var/log/pods//` + // ContainerConfig.LogPath = `containerName/Instance#.log` + // + // WARNING: Log management and how kubelet should interface with the + // container logs are under active discussion in + // https://issues.k8s.io/24677. There *may* be future change of direction + // for logging as the discussion carries on. + LogDirectory string `json:"log_directory,omitempty"` + // DNS config for the sandbox. + DnsConfig *DNSConfig `json:"dns_config,omitempty"` + // Port mappings for the sandbox. + PortMappings []*PortMapping `json:"port_mappings,omitempty"` + // Key-value pairs that may be used to scope and select individual resources. + Labels map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map that may be set by the kubelet to store and + // retrieve arbitrary metadata. This will include any annotations set on a + // pod through the Kubernetes API. + // + // Annotations MUST NOT be altered by the runtime; the annotations stored + // here MUST be returned in the PodSandboxStatus associated with the pod + // this PodSandboxConfig creates. + // + // In general, in order to preserve a well-defined interface between the + // kubelet and the container runtime, annotations SHOULD NOT influence + // runtime behaviour. + // + // Annotations can also be useful for runtime authors to experiment with + // new features that are opaque to the Kubernetes APIs (both user-facing + // and the CRI). Whenever possible, however, runtime authors SHOULD + // consider proposing new typed fields for any new features instead. + Annotations map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` + // Optional configurations specific to Linux hosts. + Linux *LinuxPodSandboxConfig `json:"linux,omitempty"` + // Optional configurations specific to Windows hosts. + Windows *WindowsPodSandboxConfig `json:"windows,omitempty"` +} + +type RunPodSandboxRequest struct { + // Configuration for creating a PodSandbox. + Config *PodSandboxConfig `json:"config,omitempty"` + // Named runtime configuration to use for this PodSandbox. + // If the runtime handler is unknown, this request should be rejected. An + // empty string should select the default handler, equivalent to the + // behavior before this feature was added. + // See https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + RuntimeHandler string `json:"runtime_handler,omitempty"` +} + +type RunPodSandboxResponse struct { + // ID of the PodSandbox to run. + PodSandboxId string `json:"pod_sandbox_id,omitempty"` +} + +type StopPodSandboxRequest struct { + // ID of the PodSandbox to stop. + PodSandboxId string `json:"pod_sandbox_id,omitempty"` +} + +type StopPodSandboxResponse struct { +} + +type RemovePodSandboxRequest struct { + // ID of the PodSandbox to remove. + PodSandboxId string `json:"pod_sandbox_id,omitempty"` +} + +type RemovePodSandboxResponse struct { +} + +type PodSandboxStatusRequest struct { + // ID of the PodSandbox for which to retrieve status. + PodSandboxId string `json:"pod_sandbox_id,omitempty"` + // Verbose indicates whether to return extra information about the pod sandbox. + Verbose bool `json:"verbose,omitempty"` +} + +// PodIP represents an ip of a Pod +type PodIP struct { + // an ip is a string representation of an IPv4 or an IPv6 + Ip string `json:"ip,omitempty"` +} + +// PodSandboxNetworkStatus is the status of the network for a PodSandbox. +type PodSandboxNetworkStatus struct { + // IP address of the PodSandbox. + Ip string `json:"ip,omitempty"` + // list of additional ips (not inclusive of PodSandboxNetworkStatus.Ip) of the PodSandBoxNetworkStatus + AdditionalIps []*PodIP `json:"additional_ips,omitempty"` +} + +// Namespace contains paths to the namespaces. +type Namespace struct { + // Namespace options for Linux namespaces. + Options *NamespaceOption `json:"options,omitempty"` +} + +// LinuxSandboxStatus contains status specific to Linux sandboxes. +type LinuxPodSandboxStatus struct { + // Paths to the sandbox's namespaces. + Namespaces *Namespace `json:"namespaces,omitempty"` +} + +// PodSandboxStatus contains the status of the PodSandbox. +type PodSandboxStatus struct { + // ID of the sandbox. + Id string `json:"id,omitempty"` + // Metadata of the sandbox. + Metadata *PodSandboxMetadata `json:"metadata,omitempty"` + // State of the sandbox. + State PodSandboxState `json:"state,omitempty"` + // Creation timestamp of the sandbox in nanoseconds. Must be > 0. + CreatedAt int64 `json:"created_at,omitempty"` + // Network contains network status if network is handled by the runtime. + Network *PodSandboxNetworkStatus `json:"network,omitempty"` + // Linux-specific status to a pod sandbox. + Linux *LinuxPodSandboxStatus `json:"linux,omitempty"` + // Labels are key-value pairs that may be used to scope and select individual resources. + Labels map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding PodSandboxConfig used to + // instantiate the pod sandbox this status represents. + Annotations map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` + // runtime configuration used for this PodSandbox. + RuntimeHandler string `json:"runtime_handler,omitempty"` +} + +type PodSandboxStatusResponse struct { + // Status of the PodSandbox. + Status *PodSandboxStatus `json:"status,omitempty"` + // Info is extra information of the PodSandbox. The key could be arbitrary string, and + // value should be in json format. The information could include anything useful for + // debug, e.g. network namespace for linux container based container runtime. + // It should only be returned non-empty when Verbose is true. + Info map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +// PodSandboxStateValue is the wrapper of PodSandboxState. +type PodSandboxStateValue struct { + // State of the sandbox. + State PodSandboxState `json:"state,omitempty"` +} + +// PodSandboxFilter is used to filter a list of PodSandboxes. +// All those fields are combined with 'AND' +type PodSandboxFilter struct { + // ID of the sandbox. + Id string `json:"id,omitempty"` + // State of the sandbox. + State *PodSandboxStateValue `json:"state,omitempty"` + // LabelSelector to select matches. + // Only api.MatchLabels is supported for now and the requirements + // are ANDed. MatchExpressions is not supported yet. + LabelSelector map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +type ListPodSandboxRequest struct { + // PodSandboxFilter to filter a list of PodSandboxes. + Filter *PodSandboxFilter `json:"filter,omitempty"` +} + +// PodSandbox contains minimal information about a sandbox. +type PodSandbox struct { + // ID of the PodSandbox. + Id string `json:"id,omitempty"` + // Metadata of the PodSandbox. + Metadata *PodSandboxMetadata `json:"metadata,omitempty"` + // State of the PodSandbox. + State PodSandboxState `json:"state,omitempty"` + // Creation timestamps of the PodSandbox in nanoseconds. Must be > 0. + CreatedAt int64 `json:"created_at,omitempty"` + // Labels of the PodSandbox. + Labels map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding PodSandboxConfig used to + // instantiate this PodSandbox. + Annotations map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` + // runtime configuration used for this PodSandbox. + RuntimeHandler string `json:"runtime_handler,omitempty"` +} + +type ListPodSandboxResponse struct { + // List of PodSandboxes. + Items []*PodSandbox `json:"items,omitempty"` +} + +type PodSandboxStatsRequest struct { + // ID of the pod sandbox for which to retrieve stats. + PodSandboxId string `json:"pod_sandbox_id,omitempty"` +} + +type PodSandboxStatsResponse struct { + Stats *PodSandboxStats `json:"stats,omitempty"` +} + +// PodSandboxStatsFilter is used to filter pod sandboxes. +// All those fields are combined with 'AND'. +type PodSandboxStatsFilter struct { + // ID of the pod sandbox. + Id string `json:"id,omitempty"` + // LabelSelector to select matches. + // Only api.MatchLabels is supported for now and the requirements + // are ANDed. MatchExpressions is not supported yet. + LabelSelector map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +type ListPodSandboxStatsRequest struct { + // Filter for the list request. + Filter *PodSandboxStatsFilter `json:"filter,omitempty"` +} + +type ListPodSandboxStatsResponse struct { + // Stats of the pod sandbox. + Stats []*PodSandboxStats `json:"stats,omitempty"` +} + +// PodSandboxAttributes provides basic information of the pod sandbox. +type PodSandboxAttributes struct { + // ID of the pod sandbox. + Id string `json:"id,omitempty"` + // Metadata of the pod sandbox. + Metadata *PodSandboxMetadata `json:"metadata,omitempty"` + // Key-value pairs that may be used to scope and select individual resources. + Labels map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding PodSandboxStatus used to + // instantiate the PodSandbox this status represents. + Annotations map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +// PodSandboxStats provides the resource usage statistics for a pod. +// The linux or windows field will be populated depending on the platform. +type PodSandboxStats struct { + // Information of the pod. + Attributes *PodSandboxAttributes `json:"attributes,omitempty"` + // Stats from linux. + Linux *LinuxPodSandboxStats `json:"linux,omitempty"` + // Stats from windows. + Windows *WindowsPodSandboxStats `json:"windows,omitempty"` +} + +// LinuxPodSandboxStats provides the resource usage statistics for a pod sandbox on linux. +type LinuxPodSandboxStats struct { + // CPU usage gathered for the pod sandbox. + Cpu *CpuUsage `json:"cpu,omitempty"` + // Memory usage gathered for the pod sandbox. + Memory *MemoryUsage `json:"memory,omitempty"` + // Network usage gathered for the pod sandbox + Network *NetworkUsage `json:"network,omitempty"` + // Stats pertaining to processes in the pod sandbox. + Process *ProcessUsage `json:"process,omitempty"` + // Stats of containers in the measured pod sandbox. + Containers []*ContainerStats `json:"containers,omitempty"` +} + +// WindowsPodSandboxStats provides the resource usage statistics for a pod sandbox on windows +type WindowsPodSandboxStats struct { +} + +// NetworkUsage contains data about network resources. +type NetworkUsage struct { + // The time at which these stats were updated. + Timestamp int64 `json:"timestamp,omitempty"` + // Stats for the default network interface. + DefaultInterface *NetworkInterfaceUsage `json:"default_interface,omitempty"` + // Stats for all found network interfaces, excluding the default. + Interfaces []*NetworkInterfaceUsage `json:"interfaces,omitempty"` +} + +// NetworkInterfaceUsage contains resource value data about a network interface. +type NetworkInterfaceUsage struct { + // The name of the network interface. + Name string `json:"name,omitempty"` + // Cumulative count of bytes received. + RxBytes *UInt64Value `json:"rx_bytes,omitempty"` + // Cumulative count of receive errors encountered. + RxErrors *UInt64Value `json:"rx_errors,omitempty"` + // Cumulative count of bytes transmitted. + TxBytes *UInt64Value `json:"tx_bytes,omitempty"` + // Cumulative count of transmit errors encountered. + TxErrors *UInt64Value `json:"tx_errors,omitempty"` +} + +// ProcessUsage are stats pertaining to processes. +type ProcessUsage struct { + // The time at which these stats were updated. + Timestamp int64 `json:"timestamp,omitempty"` + // Number of processes. + ProcessCount *UInt64Value `json:"process_count,omitempty"` +} + +// ImageSpec is an internal representation of an image. +type ImageSpec struct { + // Container's Image field (e.g. imageID or imageDigest). + Image string `json:"image,omitempty"` + // Unstructured key-value map holding arbitrary metadata. + // ImageSpec Annotations can be used to help the runtime target specific + // images in multi-arch images. + Annotations map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +type KeyValue struct { + Key string `json:"key,omitempty"` + Value string `json:"value,omitempty"` +} + +// LinuxContainerResources specifies Linux specific configuration for +// resources. +// TODO: Consider using Resources from opencontainers/runtime-spec/specs-go +// directly. +type LinuxContainerResources struct { + // CPU CFS (Completely Fair Scheduler) period. Default: 0 (not specified). + CpuPeriod int64 `json:"cpu_period,omitempty"` + // CPU CFS (Completely Fair Scheduler) quota. Default: 0 (not specified). + CpuQuota int64 `json:"cpu_quota,omitempty"` + // CPU shares (relative weight vs. other containers). Default: 0 (not specified). + CpuShares int64 `json:"cpu_shares,omitempty"` + // Memory limit in bytes. Default: 0 (not specified). + MemoryLimitInBytes int64 `json:"memory_limit_in_bytes,omitempty"` + // OOMScoreAdj adjusts the oom-killer score. Default: 0 (not specified). + OomScoreAdj int64 `json:"oom_score_adj,omitempty"` + // CpusetCpus constrains the allowed set of logical CPUs. Default: "" (not specified). + CpusetCpus string `json:"cpuset_cpus,omitempty"` + // CpusetMems constrains the allowed set of memory nodes. Default: "" (not specified). + CpusetMems string `json:"cpuset_mems,omitempty"` + // List of HugepageLimits to limit the HugeTLB usage of container per page size. Default: nil (not specified). + HugepageLimits []*HugepageLimit `json:"hugepage_limits,omitempty"` + // Unified resources for cgroup v2. Default: nil (not specified). + // Each key/value in the map refers to the cgroup v2. + // e.g. "memory.max": "6937202688" or "io.weight": "default 100". + Unified map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` + // Memory swap limit in bytes. Default 0 (not specified). + MemorySwapLimitInBytes int64 `json:"memory_swap_limit_in_bytes,omitempty"` +} + +// HugepageLimit corresponds to the file`hugetlb..limit_in_byte` in container level cgroup. +// For example, `PageSize=1GB`, `Limit=1073741824` means setting `1073741824` bytes to hugetlb.1GB.limit_in_bytes. +type HugepageLimit struct { + // The value of PageSize has the format B (2MB, 1GB), + // and must match the of the corresponding control file found in `hugetlb..limit_in_bytes`. + // The values of are intended to be parsed using base 1024("1KB" = 1024, "1MB" = 1048576, etc). + PageSize string `json:"page_size,omitempty"` + // limit in bytes of hugepagesize HugeTLB usage. + Limit uint64 `json:"limit,omitempty"` +} + +// SELinuxOption are the labels to be applied to the container. +type SELinuxOption struct { + User string `json:"user,omitempty"` + Role string `json:"role,omitempty"` + Type string `json:"type,omitempty"` + Level string `json:"level,omitempty"` +} + +// Capability contains the container capabilities to add or drop +type Capability struct { + // List of capabilities to add. + AddCapabilities []string `json:"add_capabilities,omitempty"` + // List of capabilities to drop. + DropCapabilities []string `json:"drop_capabilities,omitempty"` +} + +// LinuxContainerSecurityContext holds linux security configuration that will be applied to a container. +type LinuxContainerSecurityContext struct { + // Capabilities to add or drop. + Capabilities *Capability `json:"capabilities,omitempty"` + // If set, run container in privileged mode. + // Privileged mode is incompatible with the following options. If + // privileged is set, the following features MAY have no effect: + // 1. capabilities + // 2. selinux_options + // 4. seccomp + // 5. apparmor + // + // Privileged mode implies the following specific options are applied: + // 1. All capabilities are added. + // 2. Sensitive paths, such as kernel module paths within sysfs, are not masked. + // 3. Any sysfs and procfs mounts are mounted RW. + // 4. AppArmor confinement is not applied. + // 5. Seccomp restrictions are not applied. + // 6. The device cgroup does not restrict access to any devices. + // 7. All devices from the host's /dev are available within the container. + // 8. SELinux restrictions are not applied (e.g. label=disabled). + Privileged bool `json:"privileged,omitempty"` + // Configurations for the container's namespaces. + // Only used if the container uses namespace for isolation. + NamespaceOptions *NamespaceOption `json:"namespace_options,omitempty"` + // SELinux context to be optionally applied. + SelinuxOptions *SELinuxOption `json:"selinux_options,omitempty"` + // UID to run the container process as. Only one of run_as_user and + // run_as_username can be specified at a time. + RunAsUser *Int64Value `json:"run_as_user,omitempty"` + // GID to run the container process as. run_as_group should only be specified + // when run_as_user or run_as_username is specified; otherwise, the runtime + // MUST error. + RunAsGroup *Int64Value `json:"run_as_group,omitempty"` + // User name to run the container process as. If specified, the user MUST + // exist in the container image (i.e. in the /etc/passwd inside the image), + // and be resolved there by the runtime; otherwise, the runtime MUST error. + RunAsUsername string `json:"run_as_username,omitempty"` + // If set, the root filesystem of the container is read-only. + ReadonlyRootfs bool `json:"readonly_rootfs,omitempty"` + // List of groups applied to the first process run in the container, in + // addition to the container's primary GID. + SupplementalGroups []int64 `json:"supplemental_groups,omitempty"` + // no_new_privs defines if the flag for no_new_privs should be set on the + // container. + NoNewPrivs bool `json:"no_new_privs,omitempty"` + // masked_paths is a slice of paths that should be masked by the container + // runtime, this can be passed directly to the OCI spec. + MaskedPaths []string `json:"masked_paths,omitempty"` + // readonly_paths is a slice of paths that should be set as readonly by the + // container runtime, this can be passed directly to the OCI spec. + ReadonlyPaths []string `json:"readonly_paths,omitempty"` + // Seccomp profile for the container. + Seccomp *SecurityProfile `json:"seccomp,omitempty"` + // AppArmor profile for the container. + Apparmor *SecurityProfile `json:"apparmor,omitempty"` + // AppArmor profile for the container, candidate values are: + // * runtime/default: equivalent to not specifying a profile. + // * unconfined: no profiles are loaded + // * localhost/: profile loaded on the node + // (localhost) by name. The possible profile names are detailed at + // https://gitlab.com/apparmor/apparmor/-/wikis/AppArmor_Core_Policy_Reference + ApparmorProfile string `json:"apparmor_profile,omitempty"` // Deprecated: Do not use. + // Seccomp profile for the container, candidate values are: + // * runtime/default: the default profile for the container runtime + // * unconfined: unconfined profile, ie, no seccomp sandboxing + // * localhost/: the profile installed on the node. + // is the full path of the profile. + // Default: "", which is identical with unconfined. + SeccompProfilePath string `json:"seccomp_profile_path,omitempty"` // Deprecated: Do not use. +} + +// LinuxContainerConfig contains platform-specific configuration for +// Linux-based containers. +type LinuxContainerConfig struct { + // Resources specification for the container. + Resources *LinuxContainerResources `json:"resources,omitempty"` + // LinuxContainerSecurityContext configuration for the container. + SecurityContext *LinuxContainerSecurityContext `json:"security_context,omitempty"` +} + +// WindowsSandboxSecurityContext holds platform-specific configurations that will be +// applied to a sandbox. +// These settings will only apply to the sandbox container. +type WindowsSandboxSecurityContext struct { + // User name to run the container process as. If specified, the user MUST + // exist in the container image and be resolved there by the runtime; + // otherwise, the runtime MUST return error. + RunAsUsername string `json:"run_as_username,omitempty"` + // The contents of the GMSA credential spec to use to run this container. + CredentialSpec string `json:"credential_spec,omitempty"` + // Indicates whether the container be asked to run as a HostProcess container. + HostProcess bool `json:"host_process,omitempty"` +} + +// WindowsPodSandboxConfig holds platform-specific configurations for Windows +// host platforms and Windows-based containers. +type WindowsPodSandboxConfig struct { + // WindowsSandboxSecurityContext holds sandbox security attributes. + SecurityContext *WindowsSandboxSecurityContext `json:"security_context,omitempty"` +} + +// WindowsContainerSecurityContext holds windows security configuration that will be applied to a container. +type WindowsContainerSecurityContext struct { + // User name to run the container process as. If specified, the user MUST + // exist in the container image and be resolved there by the runtime; + // otherwise, the runtime MUST return error. + RunAsUsername string `json:"run_as_username,omitempty"` + // The contents of the GMSA credential spec to use to run this container. + CredentialSpec string `json:"credential_spec,omitempty"` + // Indicates whether a container is to be run as a HostProcess container. + HostProcess bool `json:"host_process,omitempty"` +} + +// WindowsContainerConfig contains platform-specific configuration for +// Windows-based containers. +type WindowsContainerConfig struct { + // Resources specification for the container. + Resources *WindowsContainerResources `json:"resources,omitempty"` + // WindowsContainerSecurityContext configuration for the container. + SecurityContext *WindowsContainerSecurityContext `json:"security_context,omitempty"` +} + +// WindowsContainerResources specifies Windows specific configuration for +// resources. +type WindowsContainerResources struct { + // CPU shares (relative weight vs. other containers). Default: 0 (not specified). + CpuShares int64 `json:"cpu_shares,omitempty"` + // Number of CPUs available to the container. Default: 0 (not specified). + CpuCount int64 `json:"cpu_count,omitempty"` + // Specifies the portion of processor cycles that this container can use as a percentage times 100. + CpuMaximum int64 `json:"cpu_maximum,omitempty"` + // Memory limit in bytes. Default: 0 (not specified). + MemoryLimitInBytes int64 `json:"memory_limit_in_bytes,omitempty"` +} + +// ContainerMetadata holds all necessary information for building the container +// name. The container runtime is encouraged to expose the metadata in its user +// interface for better user experience. E.g., runtime can construct a unique +// container name based on the metadata. Note that (name, attempt) is unique +// within a sandbox for the entire lifetime of the sandbox. +type ContainerMetadata struct { + // Name of the container. Same as the container name in the PodSpec. + Name string `json:"name,omitempty"` + // Attempt number of creating the container. Default: 0. + Attempt uint32 `json:"attempt,omitempty"` +} + +// Device specifies a host device to mount into a container. +type Device struct { + // Path of the device within the container. + ContainerPath string `json:"container_path,omitempty"` + // Path of the device on the host. + HostPath string `json:"host_path,omitempty"` + // Cgroups permissions of the device, candidates are one or more of + // * r - allows container to read from the specified device. + // * w - allows container to write to the specified device. + // * m - allows container to create device files that do not yet exist. + Permissions string `json:"permissions,omitempty"` +} + +// ContainerConfig holds all the required and optional fields for creating a +// container. +type ContainerConfig struct { + // Metadata of the container. This information will uniquely identify the + // container, and the runtime should leverage this to ensure correct + // operation. The runtime may also use this information to improve UX, such + // as by constructing a readable name. + Metadata *ContainerMetadata `json:"metadata,omitempty"` + // Image to use. + Image *ImageSpec `json:"image,omitempty"` + // Command to execute (i.e., entrypoint for docker) + Command []string `json:"command,omitempty"` + // Args for the Command (i.e., command for docker) + Args []string `json:"args,omitempty"` + // Current working directory of the command. + WorkingDir string `json:"working_dir,omitempty"` + // List of environment variable to set in the container. + Envs []*KeyValue `json:"envs,omitempty"` + // Mounts for the container. + Mounts []*Mount `json:"mounts,omitempty"` + // Devices for the container. + Devices []*Device `json:"devices,omitempty"` + // Key-value pairs that may be used to scope and select individual resources. + // Label keys are of the form: + // label-key ::= prefixed-name | name + // prefixed-name ::= prefix '/' name + // prefix ::= DNS_SUBDOMAIN + // name ::= DNS_LABEL + Labels map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map that may be used by the kubelet to store and + // retrieve arbitrary metadata. + // + // Annotations MUST NOT be altered by the runtime; the annotations stored + // here MUST be returned in the ContainerStatus associated with the container + // this ContainerConfig creates. + // + // In general, in order to preserve a well-defined interface between the + // kubelet and the container runtime, annotations SHOULD NOT influence + // runtime behaviour. + Annotations map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` + // Path relative to PodSandboxConfig.LogDirectory for container to store + // the log (STDOUT and STDERR) on the host. + // E.g., + // PodSandboxConfig.LogDirectory = `/var/log/pods//` + // ContainerConfig.LogPath = `containerName/Instance#.log` + // + // WARNING: Log management and how kubelet should interface with the + // container logs are under active discussion in + // https://issues.k8s.io/24677. There *may* be future change of direction + // for logging as the discussion carries on. + LogPath string `json:"log_path,omitempty"` + // Variables for interactive containers, these have very specialized + // use-cases (e.g. debugging). + // TODO: Determine if we need to continue supporting these fields that are + // part of Kubernetes's Container Spec. + Stdin bool `json:"stdin,omitempty"` + StdinOnce bool `json:"stdin_once,omitempty"` + Tty bool `json:"tty,omitempty"` + // Configuration specific to Linux containers. + Linux *LinuxContainerConfig `json:"linux,omitempty"` + // Configuration specific to Windows containers. + Windows *WindowsContainerConfig `json:"windows,omitempty"` +} + +type CreateContainerRequest struct { + // ID of the PodSandbox in which the container should be created. + PodSandboxId string `json:"pod_sandbox_id,omitempty"` + // Config of the container. + Config *ContainerConfig `json:"config,omitempty"` + // Config of the PodSandbox. This is the same config that was passed + // to RunPodSandboxRequest to create the PodSandbox. It is passed again + // here just for easy reference. The PodSandboxConfig is immutable and + // remains the same throughout the lifetime of the pod. + SandboxConfig *PodSandboxConfig `json:"sandbox_config,omitempty"` +} + +type CreateContainerResponse struct { + // ID of the created container. + ContainerId string `json:"container_id,omitempty"` +} + +type StartContainerRequest struct { + // ID of the container to start. + ContainerId string `json:"container_id,omitempty"` +} + +type StartContainerResponse struct { +} + +type StopContainerRequest struct { + // ID of the container to stop. + ContainerId string `json:"container_id,omitempty"` + // Timeout in seconds to wait for the container to stop before forcibly + // terminating it. Default: 0 (forcibly terminate the container immediately) + Timeout int64 `json:"timeout,omitempty"` +} + +type StopContainerResponse struct { +} + +type RemoveContainerRequest struct { + // ID of the container to remove. + ContainerId string `json:"container_id,omitempty"` +} + +type RemoveContainerResponse struct { +} + +// ContainerStateValue is the wrapper of ContainerState. +type ContainerStateValue struct { + // State of the container. + State ContainerState `json:"state,omitempty"` +} + +// ContainerFilter is used to filter containers. +// All those fields are combined with 'AND' +type ContainerFilter struct { + // ID of the container. + Id string `json:"id,omitempty"` + // State of the container. + State *ContainerStateValue `json:"state,omitempty"` + // ID of the PodSandbox. + PodSandboxId string `json:"pod_sandbox_id,omitempty"` + // LabelSelector to select matches. + // Only api.MatchLabels is supported for now and the requirements + // are ANDed. MatchExpressions is not supported yet. + LabelSelector map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +type ListContainersRequest struct { + Filter *ContainerFilter `json:"filter,omitempty"` +} + +// Container provides the runtime information for a container, such as ID, hash, +// state of the container. +type Container struct { + // ID of the container, used by the container runtime to identify + // a container. + Id string `json:"id,omitempty"` + // ID of the sandbox to which this container belongs. + PodSandboxId string `json:"pod_sandbox_id,omitempty"` + // Metadata of the container. + Metadata *ContainerMetadata `json:"metadata,omitempty"` + // Spec of the image. + Image *ImageSpec `json:"image,omitempty"` + // Reference to the image in use. For most runtimes, this should be an + // image ID. + ImageRef string `json:"image_ref,omitempty"` + // State of the container. + State ContainerState `json:"state,omitempty"` + // Creation time of the container in nanoseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Key-value pairs that may be used to scope and select individual resources. + Labels map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding ContainerConfig used to + // instantiate this Container. + Annotations map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +type ListContainersResponse struct { + // List of containers. + Containers []*Container `json:"containers,omitempty"` +} + +type ContainerStatusRequest struct { + // ID of the container for which to retrieve status. + ContainerId string `json:"container_id,omitempty"` + // Verbose indicates whether to return extra information about the container. + Verbose bool `json:"verbose,omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + // ID of the container. + Id string `json:"id,omitempty"` + // Metadata of the container. + Metadata *ContainerMetadata `json:"metadata,omitempty"` + // Status of the container. + State ContainerState `json:"state,omitempty"` + // Creation time of the container in nanoseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Start time of the container in nanoseconds. Default: 0 (not specified). + StartedAt int64 `json:"started_at,omitempty"` + // Finish time of the container in nanoseconds. Default: 0 (not specified). + FinishedAt int64 `json:"finished_at,omitempty"` + // Exit code of the container. Only required when finished_at != 0. Default: 0. + ExitCode int32 `json:"exit_code,omitempty"` + // Spec of the image. + Image *ImageSpec `json:"image,omitempty"` + // Reference to the image in use. For most runtimes, this should be an + // image ID + ImageRef string `json:"image_ref,omitempty"` + // Brief CamelCase string explaining why container is in its current state. + Reason string `json:"reason,omitempty"` + // Human-readable message indicating details about why container is in its + // current state. + Message string `json:"message,omitempty"` + // Key-value pairs that may be used to scope and select individual resources. + Labels map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding ContainerConfig used to + // instantiate the Container this status represents. + Annotations map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` + // Mounts for the container. + Mounts []*Mount `json:"mounts,omitempty"` + // Log path of container. + LogPath string `json:"log_path,omitempty"` +} + +type ContainerStatusResponse struct { + // Status of the container. + Status *ContainerStatus `json:"status,omitempty"` + // Info is extra information of the Container. The key could be arbitrary string, and + // value should be in json format. The information could include anything useful for + // debug, e.g. pid for linux container based container runtime. + // It should only be returned non-empty when Verbose is true. + Info map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +type UpdateContainerResourcesRequest struct { + // ID of the container to update. + ContainerId string `json:"container_id,omitempty"` + // Resource configuration specific to Linux containers. + Linux *LinuxContainerResources `json:"linux,omitempty"` + // Resource configuration specific to Windows containers. + Windows *WindowsContainerResources `json:"windows,omitempty"` + // Unstructured key-value map holding arbitrary additional information for + // container resources updating. This can be used for specifying experimental + // resources to update or other options to use when updating the container. + Annotations map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +type UpdateContainerResourcesResponse struct { +} + +type ExecSyncRequest struct { + // ID of the container. + ContainerId string `json:"container_id,omitempty"` + // Command to execute. + Cmd []string `json:"cmd,omitempty"` + // Timeout in seconds to stop the command. Default: 0 (run forever). + Timeout int64 `json:"timeout,omitempty"` +} + +type ExecSyncResponse struct { + // Captured command stdout output. + Stdout []byte `json:"stdout,omitempty"` + // Captured command stderr output. + Stderr []byte `json:"stderr,omitempty"` + // Exit code the command finished with. Default: 0 (success). + ExitCode int32 `json:"exit_code,omitempty"` +} + +type ExecRequest struct { + // ID of the container in which to execute the command. + ContainerId string `json:"container_id,omitempty"` + // Command to execute. + Cmd []string `json:"cmd,omitempty"` + // Whether to exec the command in a TTY. + Tty bool `json:"tty,omitempty"` + // Whether to stream stdin. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + Stdin bool `json:"stdin,omitempty"` + // Whether to stream stdout. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + Stdout bool `json:"stdout,omitempty"` + // Whether to stream stderr. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + // If `tty` is true, `stderr` MUST be false. Multiplexing is not supported + // in this case. The output of stdout and stderr will be combined to a + // single stream. + Stderr bool `json:"stderr,omitempty"` +} + +type ExecResponse struct { + // Fully qualified URL of the exec streaming server. + Url string `json:"url,omitempty"` +} + +type AttachRequest struct { + // ID of the container to which to attach. + ContainerId string `json:"container_id,omitempty"` + // Whether to stream stdin. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + Stdin bool `json:"stdin,omitempty"` + // Whether the process being attached is running in a TTY. + // This must match the TTY setting in the ContainerConfig. + Tty bool `json:"tty,omitempty"` + // Whether to stream stdout. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + Stdout bool `json:"stdout,omitempty"` + // Whether to stream stderr. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + // If `tty` is true, `stderr` MUST be false. Multiplexing is not supported + // in this case. The output of stdout and stderr will be combined to a + // single stream. + Stderr bool `json:"stderr,omitempty"` +} + +type AttachResponse struct { + // Fully qualified URL of the attach streaming server. + Url string `json:"url,omitempty"` +} + +type PortForwardRequest struct { + // ID of the container to which to forward the port. + PodSandboxId string `json:"pod_sandbox_id,omitempty"` + // Port to forward. + Port []int32 `json:"port,omitempty"` +} + +type PortForwardResponse struct { + // Fully qualified URL of the port-forward streaming server. + Url string `json:"url,omitempty"` +} + +type ImageFilter struct { + // Spec of the image. + Image *ImageSpec `json:"image,omitempty"` +} + +type ListImagesRequest struct { + // Filter to list images. + Filter *ImageFilter `json:"filter,omitempty"` +} + +// Basic information about a container image. +type Image struct { + // ID of the image. + Id string `json:"id,omitempty"` + // Other names by which this image is known. + RepoTags []string `json:"repo_tags,omitempty"` + // Digests by which this image is known. + RepoDigests []string `json:"repo_digests,omitempty"` + // Size of the image in bytes. Must be > 0. + Size_ uint64 `json:"size,omitempty"` + // UID that will run the command(s). This is used as a default if no user is + // specified when creating the container. UID and the following user name + // are mutually exclusive. + Uid *Int64Value `json:"uid,omitempty"` + // User name that will run the command(s). This is used if UID is not set + // and no user is specified when creating container. + Username string `json:"username,omitempty"` + // ImageSpec for image which includes annotations + Spec *ImageSpec `json:"spec,omitempty"` +} + +type ListImagesResponse struct { + // List of images. + Images []*Image `json:"images,omitempty"` +} + +type ImageStatusRequest struct { + // Spec of the image. + Image *ImageSpec `json:"image,omitempty"` + // Verbose indicates whether to return extra information about the image. + Verbose bool `json:"verbose,omitempty"` +} + +type ImageStatusResponse struct { + // Status of the image. + Image *Image `json:"image,omitempty"` + // Info is extra information of the Image. The key could be arbitrary string, and + // value should be in json format. The information could include anything useful + // for debug, e.g. image config for oci image based container runtime. + // It should only be returned non-empty when Verbose is true. + Info map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +// AuthConfig contains authorization information for connecting to a registry. +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + ServerAddress string `json:"server_address,omitempty"` + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identity_token,omitempty"` + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registry_token,omitempty"` +} + +type PullImageRequest struct { + // Spec of the image. + Image *ImageSpec `json:"image,omitempty"` + // Authentication configuration for pulling the image. + Auth *AuthConfig `json:"auth,omitempty"` + // Config of the PodSandbox, which is used to pull image in PodSandbox context. + SandboxConfig *PodSandboxConfig `json:"sandbox_config,omitempty"` +} + +type PullImageResponse struct { + // Reference to the image in use. For most runtimes, this should be an + // image ID or digest. + ImageRef string `json:"image_ref,omitempty"` +} + +type RemoveImageRequest struct { + // Spec of the image to remove. + Image *ImageSpec `json:"image,omitempty"` +} + +type RemoveImageResponse struct { +} + +type NetworkConfig struct { + // CIDR to use for pod IP addresses. If the CIDR is empty, runtimes + // should omit it. + PodCidr string `json:"pod_cidr,omitempty"` +} + +type RuntimeConfig struct { + NetworkConfig *NetworkConfig `json:"network_config,omitempty"` +} + +type UpdateRuntimeConfigRequest struct { + RuntimeConfig *RuntimeConfig `json:"runtime_config,omitempty"` +} + +type UpdateRuntimeConfigResponse struct { +} + +// RuntimeCondition contains condition information for the runtime. +// There are 2 kinds of runtime conditions: +// 1. Required conditions: Conditions are required for kubelet to work +// properly. If any required condition is unmet, the node will be not ready. +// The required conditions include: +// * RuntimeReady: RuntimeReady means the runtime is up and ready to accept +// basic containers e.g. container only needs host network. +// * NetworkReady: NetworkReady means the runtime network is up and ready to +// accept containers which require container network. +// 2. Optional conditions: Conditions are informative to the user, but kubelet +// will not rely on. Since condition type is an arbitrary string, all conditions +// not required are optional. These conditions will be exposed to users to help +// them understand the status of the system. +type RuntimeCondition struct { + // Type of runtime condition. + Type string `json:"type,omitempty"` + // Status of the condition, one of true/false. Default: false. + Status bool `json:"status,omitempty"` + // Brief CamelCase string containing reason for the condition's last transition. + Reason string `json:"reason,omitempty"` + // Human-readable message indicating details about last transition. + Message string `json:"message,omitempty"` +} + +// RuntimeStatus is information about the current status of the runtime. +type RuntimeStatus struct { + // List of current observed runtime conditions. + Conditions []*RuntimeCondition `json:"conditions,omitempty"` +} + +type StatusRequest struct { + // Verbose indicates whether to return extra information about the runtime. + Verbose bool `json:"verbose,omitempty"` +} + +type StatusResponse struct { + // Status of the Runtime. + Status *RuntimeStatus `json:"status,omitempty"` + // Info is extra information of the Runtime. The key could be arbitrary string, and + // value should be in json format. The information could include anything useful for + // debug, e.g. plugins used by the container runtime. + // It should only be returned non-empty when Verbose is true. + Info map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +type ImageFsInfoRequest struct { +} + +// UInt64Value is the wrapper of uint64. +type UInt64Value struct { + // The value. + Value uint64 `json:"value,omitempty"` +} + +// FilesystemIdentifier uniquely identify the filesystem. +type FilesystemIdentifier struct { + // Mountpoint of a filesystem. + Mountpoint string `json:"mountpoint,omitempty"` +} + +// FilesystemUsage provides the filesystem usage information. +type FilesystemUsage struct { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + Timestamp int64 `json:"timestamp,omitempty"` + // The unique identifier of the filesystem. + FsId *FilesystemIdentifier `json:"fs_id,omitempty"` + // UsedBytes represents the bytes used for images on the filesystem. + // This may differ from the total bytes used on the filesystem and may not + // equal CapacityBytes - AvailableBytes. + UsedBytes *UInt64Value `json:"used_bytes,omitempty"` + // InodesUsed represents the inodes used by the images. + // This may not equal InodesCapacity - InodesAvailable because the underlying + // filesystem may also be used for purposes other than storing images. + InodesUsed *UInt64Value `json:"inodes_used,omitempty"` +} + +type ImageFsInfoResponse struct { + // Information of image filesystem(s). + ImageFilesystems []*FilesystemUsage `json:"image_filesystems,omitempty"` +} + +type ContainerStatsRequest struct { + // ID of the container for which to retrieve stats. + ContainerId string `json:"container_id,omitempty"` +} + +type ContainerStatsResponse struct { + // Stats of the container. + Stats *ContainerStats `json:"stats,omitempty"` +} + +type ListContainerStatsRequest struct { + // Filter for the list request. + Filter *ContainerStatsFilter `json:"filter,omitempty"` +} + +// ContainerStatsFilter is used to filter containers. +// All those fields are combined with 'AND' +type ContainerStatsFilter struct { + // ID of the container. + Id string `json:"id,omitempty"` + // ID of the PodSandbox. + PodSandboxId string `json:"pod_sandbox_id,omitempty"` + // LabelSelector to select matches. + // Only api.MatchLabels is supported for now and the requirements + // are ANDed. MatchExpressions is not supported yet. + LabelSelector map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +type ListContainerStatsResponse struct { + // Stats of the container. + Stats []*ContainerStats `json:"stats,omitempty"` +} + +// ContainerAttributes provides basic information of the container. +type ContainerAttributes struct { + // ID of the container. + Id string `json:"id,omitempty"` + // Metadata of the container. + Metadata *ContainerMetadata `json:"metadata,omitempty"` + // Key-value pairs that may be used to scope and select individual resources. + Labels map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding ContainerConfig used to + // instantiate the Container this status represents. + Annotations map[string]string `protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +// ContainerStats provides the resource usage statistics for a container. +type ContainerStats struct { + // Information of the container. + Attributes *ContainerAttributes `json:"attributes,omitempty"` + // CPU usage gathered from the container. + Cpu *CpuUsage `json:"cpu,omitempty"` + // Memory usage gathered from the container. + Memory *MemoryUsage `json:"memory,omitempty"` + // Usage of the writable layer. + WritableLayer *FilesystemUsage `json:"writable_layer,omitempty"` +} + +// CpuUsage provides the CPU usage information. +type CpuUsage struct { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + Timestamp int64 `json:"timestamp,omitempty"` + // Cumulative CPU usage (sum across all cores) since object creation. + UsageCoreNanoSeconds *UInt64Value `json:"usage_core_nano_seconds,omitempty"` + // Total CPU usage (sum of all cores) averaged over the sample window. + // The "core" unit can be interpreted as CPU core-nanoseconds per second. + UsageNanoCores *UInt64Value `json:"usage_nano_cores,omitempty"` +} + +// MemoryUsage provides the memory usage information. +type MemoryUsage struct { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + Timestamp int64 `json:"timestamp,omitempty"` + // The amount of working set memory in bytes. + WorkingSetBytes *UInt64Value `json:"working_set_bytes,omitempty"` + // Available memory for use. This is defined as the memory limit = workingSetBytes. + AvailableBytes *UInt64Value `json:"available_bytes,omitempty"` + // Total memory in use. This includes all memory regardless of when it was accessed. + UsageBytes *UInt64Value `json:"usage_bytes,omitempty"` + // The amount of anonymous and swap cache memory (includes transparent hugepages). + RssBytes *UInt64Value `json:"rss_bytes,omitempty"` + // Cumulative number of minor page faults. + PageFaults *UInt64Value `json:"page_faults,omitempty"` + // Cumulative number of major page faults. + MajorPageFaults *UInt64Value `json:"major_page_faults,omitempty"` +} + +type ReopenContainerLogRequest struct { + // ID of the container for which to reopen the log. + ContainerId string `json:"container_id,omitempty"` +} + +type ReopenContainerLogResponse struct{} + +func (m *VersionRequest) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *VersionResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *VersionResponse) GetRuntimeName() string { + if m != nil { + return m.RuntimeName + } + return "" +} + +func (m *VersionResponse) GetRuntimeVersion() string { + if m != nil { + return m.RuntimeVersion + } + return "" +} + +func (m *VersionResponse) GetRuntimeApiVersion() string { + if m != nil { + return m.RuntimeApiVersion + } + return "" +} + +func (m *DNSConfig) GetServers() []string { + if m != nil { + return m.Servers + } + return nil +} + +func (m *DNSConfig) GetSearches() []string { + if m != nil { + return m.Searches + } + return nil +} + +func (m *DNSConfig) GetOptions() []string { + if m != nil { + return m.Options + } + return nil +} + +func (m *PortMapping) GetProtocol() Protocol { + if m != nil { + return m.Protocol + } + return Protocol_TCP +} + +func (m *PortMapping) GetContainerPort() int32 { + if m != nil { + return m.ContainerPort + } + return 0 +} + +func (m *PortMapping) GetHostPort() int32 { + if m != nil { + return m.HostPort + } + return 0 +} + +func (m *PortMapping) GetHostIp() string { + if m != nil { + return m.HostIp + } + return "" +} + +func (m *Mount) GetContainerPath() string { + if m != nil { + return m.ContainerPath + } + return "" +} + +func (m *Mount) GetHostPath() string { + if m != nil { + return m.HostPath + } + return "" +} + +func (m *Mount) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *Mount) GetSelinuxRelabel() bool { + if m != nil { + return m.SelinuxRelabel + } + return false +} + +func (m *Mount) GetPropagation() MountPropagation { + if m != nil { + return m.Propagation + } + return MountPropagation_PROPAGATION_PRIVATE +} + +func (m *Namespace) GetOptions() *NamespaceOption { + if m != nil { + return m.Options + } + return nil +} + +func (m *NamespaceOption) GetNetwork() NamespaceMode { + if m != nil { + return m.Network + } + return NamespaceMode_POD +} + +func (m *NamespaceOption) GetPid() NamespaceMode { + if m != nil { + return m.Pid + } + return NamespaceMode_POD +} + +func (m *NamespaceOption) GetIpc() NamespaceMode { + if m != nil { + return m.Ipc + } + return NamespaceMode_POD +} + +func (m *NamespaceOption) GetTargetId() string { + if m != nil { + return m.TargetId + } + return "" +} + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *LinuxSandboxSecurityContext) GetNamespaceOptions() *NamespaceOption { + if m != nil { + return m.NamespaceOptions + } + return nil +} + +func (m *LinuxSandboxSecurityContext) GetSelinuxOptions() *SELinuxOption { + if m != nil { + return m.SelinuxOptions + } + return nil +} + +func (m *LinuxSandboxSecurityContext) GetRunAsUser() *Int64Value { + if m != nil { + return m.RunAsUser + } + return nil +} + +func (m *LinuxSandboxSecurityContext) GetRunAsGroup() *Int64Value { + if m != nil { + return m.RunAsGroup + } + return nil +} + +func (m *LinuxSandboxSecurityContext) GetReadonlyRootfs() bool { + if m != nil { + return m.ReadonlyRootfs + } + return false +} + +func (m *LinuxSandboxSecurityContext) GetSupplementalGroups() []int64 { + if m != nil { + return m.SupplementalGroups + } + return nil +} + +func (m *LinuxSandboxSecurityContext) GetPrivileged() bool { + if m != nil { + return m.Privileged + } + return false +} + +func (m *LinuxSandboxSecurityContext) GetSeccomp() *SecurityProfile { + if m != nil { + return m.Seccomp + } + return nil +} + +func (m *LinuxSandboxSecurityContext) GetApparmor() *SecurityProfile { + if m != nil { + return m.Apparmor + } + return nil +} + +// Deprecated: Do not use. +func (m *LinuxSandboxSecurityContext) GetSeccompProfilePath() string { + if m != nil { + return m.SeccompProfilePath + } + return "" +} + +func (m *SecurityProfile) GetProfileType() SecurityProfile_ProfileType { + if m != nil { + return m.ProfileType + } + return SecurityProfile_RuntimeDefault +} + +func (m *SecurityProfile) GetLocalhostRef() string { + if m != nil { + return m.LocalhostRef + } + return "" +} + +func (m *LinuxPodSandboxConfig) GetCgroupParent() string { + if m != nil { + return m.CgroupParent + } + return "" +} + +func (m *LinuxPodSandboxConfig) GetSecurityContext() *LinuxSandboxSecurityContext { + if m != nil { + return m.SecurityContext + } + return nil +} + +func (m *LinuxPodSandboxConfig) GetSysctls() map[string]string { + if m != nil { + return m.Sysctls + } + return nil +} + +func (m *LinuxPodSandboxConfig) GetOverhead() *LinuxContainerResources { + if m != nil { + return m.Overhead + } + return nil +} + +func (m *LinuxPodSandboxConfig) GetResources() *LinuxContainerResources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *PodSandboxMetadata) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PodSandboxMetadata) GetUid() string { + if m != nil { + return m.Uid + } + return "" +} + +func (m *PodSandboxMetadata) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *PodSandboxMetadata) GetAttempt() uint32 { + if m != nil { + return m.Attempt + } + return 0 +} + +func (m *PodSandboxConfig) GetMetadata() *PodSandboxMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *PodSandboxConfig) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +func (m *PodSandboxConfig) GetLogDirectory() string { + if m != nil { + return m.LogDirectory + } + return "" +} + +func (m *PodSandboxConfig) GetDnsConfig() *DNSConfig { + if m != nil { + return m.DnsConfig + } + return nil +} + +func (m *PodSandboxConfig) GetPortMappings() []*PortMapping { + if m != nil { + return m.PortMappings + } + return nil +} + +func (m *PodSandboxConfig) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *PodSandboxConfig) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *PodSandboxConfig) GetLinux() *LinuxPodSandboxConfig { + if m != nil { + return m.Linux + } + return nil +} + +func (m *PodSandboxConfig) GetWindows() *WindowsPodSandboxConfig { + if m != nil { + return m.Windows + } + return nil +} + +func (m *RunPodSandboxRequest) GetConfig() *PodSandboxConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *RunPodSandboxRequest) GetRuntimeHandler() string { + if m != nil { + return m.RuntimeHandler + } + return "" +} + +func (m *RunPodSandboxResponse) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *StopPodSandboxRequest) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *RemovePodSandboxRequest) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *PodSandboxStatusRequest) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *PodSandboxStatusRequest) GetVerbose() bool { + if m != nil { + return m.Verbose + } + return false +} + +func (m *PodIP) GetIp() string { + if m != nil { + return m.Ip + } + return "" +} + +func (m *PodSandboxNetworkStatus) GetIp() string { + if m != nil { + return m.Ip + } + return "" +} + +func (m *PodSandboxNetworkStatus) GetAdditionalIps() []*PodIP { + if m != nil { + return m.AdditionalIps + } + return nil +} + +func (m *LinuxPodSandboxStatus) GetNamespaces() *Namespace { + if m != nil { + return m.Namespaces + } + return nil +} + +func (m *PodSandboxStatus) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *PodSandboxStatus) GetMetadata() *PodSandboxMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *PodSandboxStatus) GetState() PodSandboxState { + if m != nil { + return m.State + } + return PodSandboxState_SANDBOX_READY +} + +func (m *PodSandboxStatus) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *PodSandboxStatus) GetNetwork() *PodSandboxNetworkStatus { + if m != nil { + return m.Network + } + return nil +} + +func (m *PodSandboxStatus) GetLinux() *LinuxPodSandboxStatus { + if m != nil { + return m.Linux + } + return nil +} + +func (m *PodSandboxStatus) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *PodSandboxStatus) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *PodSandboxStatus) GetRuntimeHandler() string { + if m != nil { + return m.RuntimeHandler + } + return "" +} + +func (m *PodSandboxStatusResponse) GetStatus() *PodSandboxStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *PodSandboxStatusResponse) GetInfo() map[string]string { + if m != nil { + return m.Info + } + return nil +} + +func (m *PodSandboxFilter) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *PodSandboxFilter) GetState() *PodSandboxStateValue { + if m != nil { + return m.State + } + return nil +} + +func (m *PodSandboxFilter) GetLabelSelector() map[string]string { + if m != nil { + return m.LabelSelector + } + return nil +} + +func (m *ListPodSandboxRequest) GetFilter() *PodSandboxFilter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *PodSandbox) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *PodSandbox) GetMetadata() *PodSandboxMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *PodSandbox) GetState() PodSandboxState { + if m != nil { + return m.State + } + return PodSandboxState_SANDBOX_READY +} + +func (m *PodSandbox) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *PodSandbox) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *PodSandbox) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *PodSandbox) GetRuntimeHandler() string { + if m != nil { + return m.RuntimeHandler + } + return "" +} + +func (m *PodSandboxStatsRequest) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *PodSandboxStatsResponse) GetStats() *PodSandboxStats { + if m != nil { + return m.Stats + } + return nil +} + +func (m *PodSandboxStatsFilter) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *PodSandboxStatsFilter) GetLabelSelector() map[string]string { + if m != nil { + return m.LabelSelector + } + return nil +} + +func (m *ListPodSandboxStatsRequest) GetFilter() *PodSandboxStatsFilter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *ListPodSandboxStatsResponse) GetStats() []*PodSandboxStats { + if m != nil { + return m.Stats + } + return nil +} + +func (m *PodSandboxStats) GetAttributes() *PodSandboxAttributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *PodSandboxStats) GetLinux() *LinuxPodSandboxStats { + if m != nil { + return m.Linux + } + return nil +} + +func (m *PodSandboxStats) GetWindows() *WindowsPodSandboxStats { + if m != nil { + return m.Windows + } + return nil +} + +func (m *LinuxPodSandboxStats) GetCpu() *CpuUsage { + if m != nil { + return m.Cpu + } + return nil +} + +func (m *LinuxPodSandboxStats) GetMemory() *MemoryUsage { + if m != nil { + return m.Memory + } + return nil +} + +func (m *LinuxPodSandboxStats) GetNetwork() *NetworkUsage { + if m != nil { + return m.Network + } + return nil +} + +func (m *LinuxPodSandboxStats) GetProcess() *ProcessUsage { + if m != nil { + return m.Process + } + return nil +} + +func (m *LinuxPodSandboxStats) GetContainers() []*ContainerStats { + if m != nil { + return m.Containers + } + return nil +} + +func (m *NetworkUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *NetworkUsage) GetDefaultInterface() *NetworkInterfaceUsage { + if m != nil { + return m.DefaultInterface + } + return nil +} + +func (m *NetworkUsage) GetInterfaces() []*NetworkInterfaceUsage { + if m != nil { + return m.Interfaces + } + return nil +} + +func (m *NetworkInterfaceUsage) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NetworkInterfaceUsage) GetRxBytes() *UInt64Value { + if m != nil { + return m.RxBytes + } + return nil +} + +func (m *NetworkInterfaceUsage) GetRxErrors() *UInt64Value { + if m != nil { + return m.RxErrors + } + return nil +} + +func (m *NetworkInterfaceUsage) GetTxBytes() *UInt64Value { + if m != nil { + return m.TxBytes + } + return nil +} + +func (m *NetworkInterfaceUsage) GetTxErrors() *UInt64Value { + if m != nil { + return m.TxErrors + } + return nil +} + +func (m *ProcessUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *ProcessUsage) GetProcessCount() *UInt64Value { + if m != nil { + return m.ProcessCount + } + return nil +} + +func (m *ImageSpec) GetImage() string { + if m != nil { + return m.Image + } + return "" +} + +func (m *ImageSpec) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *KeyValue) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *KeyValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *LinuxContainerResources) GetCpuPeriod() int64 { + if m != nil { + return m.CpuPeriod + } + return 0 +} + +func (m *LinuxContainerResources) GetCpuQuota() int64 { + if m != nil { + return m.CpuQuota + } + return 0 +} + +func (m *LinuxContainerResources) GetCpuShares() int64 { + if m != nil { + return m.CpuShares + } + return 0 +} + +func (m *LinuxContainerResources) GetMemoryLimitInBytes() int64 { + if m != nil { + return m.MemoryLimitInBytes + } + return 0 +} + +func (m *LinuxContainerResources) GetOomScoreAdj() int64 { + if m != nil { + return m.OomScoreAdj + } + return 0 +} + +func (m *LinuxContainerResources) GetCpusetCpus() string { + if m != nil { + return m.CpusetCpus + } + return "" +} + +func (m *LinuxContainerResources) GetCpusetMems() string { + if m != nil { + return m.CpusetMems + } + return "" +} + +func (m *LinuxContainerResources) GetHugepageLimits() []*HugepageLimit { + if m != nil { + return m.HugepageLimits + } + return nil +} + +func (m *LinuxContainerResources) GetUnified() map[string]string { + if m != nil { + return m.Unified + } + return nil +} + +func (m *LinuxContainerResources) GetMemorySwapLimitInBytes() int64 { + if m != nil { + return m.MemorySwapLimitInBytes + } + return 0 +} + +func (m *HugepageLimit) GetPageSize() string { + if m != nil { + return m.PageSize + } + return "" +} + +func (m *HugepageLimit) GetLimit() uint64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *SELinuxOption) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *SELinuxOption) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *SELinuxOption) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *SELinuxOption) GetLevel() string { + if m != nil { + return m.Level + } + return "" +} + +func (m *Capability) GetAddCapabilities() []string { + if m != nil { + return m.AddCapabilities + } + return nil +} + +func (m *Capability) GetDropCapabilities() []string { + if m != nil { + return m.DropCapabilities + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetCapabilities() *Capability { + if m != nil { + return m.Capabilities + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetPrivileged() bool { + if m != nil { + return m.Privileged + } + return false +} + +func (m *LinuxContainerSecurityContext) GetNamespaceOptions() *NamespaceOption { + if m != nil { + return m.NamespaceOptions + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetSelinuxOptions() *SELinuxOption { + if m != nil { + return m.SelinuxOptions + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetRunAsUser() *Int64Value { + if m != nil { + return m.RunAsUser + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetRunAsGroup() *Int64Value { + if m != nil { + return m.RunAsGroup + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetRunAsUsername() string { + if m != nil { + return m.RunAsUsername + } + return "" +} + +func (m *LinuxContainerSecurityContext) GetReadonlyRootfs() bool { + if m != nil { + return m.ReadonlyRootfs + } + return false +} + +func (m *LinuxContainerSecurityContext) GetSupplementalGroups() []int64 { + if m != nil { + return m.SupplementalGroups + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetNoNewPrivs() bool { + if m != nil { + return m.NoNewPrivs + } + return false +} + +func (m *LinuxContainerSecurityContext) GetMaskedPaths() []string { + if m != nil { + return m.MaskedPaths + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetReadonlyPaths() []string { + if m != nil { + return m.ReadonlyPaths + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetSeccomp() *SecurityProfile { + if m != nil { + return m.Seccomp + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetApparmor() *SecurityProfile { + if m != nil { + return m.Apparmor + } + return nil +} + +// Deprecated: Do not use. +func (m *LinuxContainerSecurityContext) GetApparmorProfile() string { + if m != nil { + return m.ApparmorProfile + } + return "" +} + +// Deprecated: Do not use. +func (m *LinuxContainerSecurityContext) GetSeccompProfilePath() string { + if m != nil { + return m.SeccompProfilePath + } + return "" +} + +func (m *LinuxContainerConfig) GetResources() *LinuxContainerResources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *LinuxContainerConfig) GetSecurityContext() *LinuxContainerSecurityContext { + if m != nil { + return m.SecurityContext + } + return nil +} + +func (m *WindowsSandboxSecurityContext) GetRunAsUsername() string { + if m != nil { + return m.RunAsUsername + } + return "" +} + +func (m *WindowsSandboxSecurityContext) GetCredentialSpec() string { + if m != nil { + return m.CredentialSpec + } + return "" +} + +func (m *WindowsSandboxSecurityContext) GetHostProcess() bool { + if m != nil { + return m.HostProcess + } + return false +} + +func (m *WindowsPodSandboxConfig) GetSecurityContext() *WindowsSandboxSecurityContext { + if m != nil { + return m.SecurityContext + } + return nil +} + +func (m *WindowsContainerSecurityContext) GetRunAsUsername() string { + if m != nil { + return m.RunAsUsername + } + return "" +} + +func (m *WindowsContainerSecurityContext) GetCredentialSpec() string { + if m != nil { + return m.CredentialSpec + } + return "" +} + +func (m *WindowsContainerSecurityContext) GetHostProcess() bool { + if m != nil { + return m.HostProcess + } + return false +} + +func (m *WindowsContainerConfig) GetResources() *WindowsContainerResources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *WindowsContainerConfig) GetSecurityContext() *WindowsContainerSecurityContext { + if m != nil { + return m.SecurityContext + } + return nil +} + +func (m *WindowsContainerResources) GetCpuShares() int64 { + if m != nil { + return m.CpuShares + } + return 0 +} + +func (m *WindowsContainerResources) GetCpuCount() int64 { + if m != nil { + return m.CpuCount + } + return 0 +} + +func (m *WindowsContainerResources) GetCpuMaximum() int64 { + if m != nil { + return m.CpuMaximum + } + return 0 +} + +func (m *WindowsContainerResources) GetMemoryLimitInBytes() int64 { + if m != nil { + return m.MemoryLimitInBytes + } + return 0 +} + +func (m *ContainerMetadata) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ContainerMetadata) GetAttempt() uint32 { + if m != nil { + return m.Attempt + } + return 0 +} + +func (m *Device) GetContainerPath() string { + if m != nil { + return m.ContainerPath + } + return "" +} + +func (m *Device) GetHostPath() string { + if m != nil { + return m.HostPath + } + return "" +} + +func (m *Device) GetPermissions() string { + if m != nil { + return m.Permissions + } + return "" +} + +func (m *ContainerConfig) GetMetadata() *ContainerMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *ContainerConfig) GetImage() *ImageSpec { + if m != nil { + return m.Image + } + return nil +} + +func (m *ContainerConfig) GetCommand() []string { + if m != nil { + return m.Command + } + return nil +} + +func (m *ContainerConfig) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *ContainerConfig) GetWorkingDir() string { + if m != nil { + return m.WorkingDir + } + return "" +} + +func (m *ContainerConfig) GetEnvs() []*KeyValue { + if m != nil { + return m.Envs + } + return nil +} + +func (m *ContainerConfig) GetMounts() []*Mount { + if m != nil { + return m.Mounts + } + return nil +} + +func (m *ContainerConfig) GetDevices() []*Device { + if m != nil { + return m.Devices + } + return nil +} + +func (m *ContainerConfig) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ContainerConfig) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *ContainerConfig) GetLogPath() string { + if m != nil { + return m.LogPath + } + return "" +} + +func (m *ContainerConfig) GetStdin() bool { + if m != nil { + return m.Stdin + } + return false +} + +func (m *ContainerConfig) GetStdinOnce() bool { + if m != nil { + return m.StdinOnce + } + return false +} + +func (m *ContainerConfig) GetTty() bool { + if m != nil { + return m.Tty + } + return false +} + +func (m *ContainerConfig) GetLinux() *LinuxContainerConfig { + if m != nil { + return m.Linux + } + return nil +} + +func (m *ContainerConfig) GetWindows() *WindowsContainerConfig { + if m != nil { + return m.Windows + } + return nil +} + +func (m *CreateContainerRequest) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *CreateContainerRequest) GetConfig() *ContainerConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *CreateContainerRequest) GetSandboxConfig() *PodSandboxConfig { + if m != nil { + return m.SandboxConfig + } + return nil +} + +func (m *StartContainerRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *StopContainerRequest) GetTimeout() int64 { + if m != nil { + return m.Timeout + } + return 0 +} + +func (m *ContainerFilter) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ContainerFilter) GetState() *ContainerStateValue { + if m != nil { + return m.State + } + return nil +} + +func (m *ContainerFilter) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *ContainerFilter) GetLabelSelector() map[string]string { + if m != nil { + return m.LabelSelector + } + return nil +} + +func (m *ListContainersRequest) GetFilter() *ContainerFilter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Container) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Container) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *Container) GetMetadata() *ContainerMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Container) GetImage() *ImageSpec { + if m != nil { + return m.Image + } + return nil +} + +func (m *Container) GetImageRef() string { + if m != nil { + return m.ImageRef + } + return "" +} + +func (m *Container) GetState() ContainerState { + if m != nil { + return m.State + } + return ContainerState_CONTAINER_CREATED +} + +func (m *Container) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *Container) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Container) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *ListContainersResponse) GetContainers() []*Container { + if m != nil { + return m.Containers + } + return nil +} + +func (m *ContainerStatusRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *ContainerStatus) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ContainerStatus) GetMetadata() *ContainerMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *ContainerStatus) GetState() ContainerState { + if m != nil { + return m.State + } + return ContainerState_CONTAINER_CREATED +} + +func (m *ContainerStatus) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *ContainerStatus) GetStartedAt() int64 { + if m != nil { + return m.StartedAt + } + return 0 +} + +func (m *ContainerStatus) GetFinishedAt() int64 { + if m != nil { + return m.FinishedAt + } + return 0 +} + +func (m *ContainerStatus) GetExitCode() int32 { + if m != nil { + return m.ExitCode + } + return 0 +} + +func (m *ContainerStatus) GetImage() *ImageSpec { + if m != nil { + return m.Image + } + return nil +} + +func (m *ContainerStatus) GetImageRef() string { + if m != nil { + return m.ImageRef + } + return "" +} + +func (m *ContainerStatus) GetReason() string { + if m != nil { + return m.Reason + } + return "" +} + +func (m *ContainerStatus) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *ContainerStatus) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ContainerStatus) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *ContainerStatus) GetMounts() []*Mount { + if m != nil { + return m.Mounts + } + return nil +} + +func (m *ContainerStatus) GetLogPath() string { + if m != nil { + return m.LogPath + } + return "" +} + +func (m *ContainerStatusResponse) GetStatus() *ContainerStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *ContainerStatusResponse) GetInfo() map[string]string { + if m != nil { + return m.Info + } + return nil +} + +func (m *UpdateContainerResourcesRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *UpdateContainerResourcesRequest) GetLinux() *LinuxContainerResources { + if m != nil { + return m.Linux + } + return nil +} + +func (m *UpdateContainerResourcesRequest) GetWindows() *WindowsContainerResources { + if m != nil { + return m.Windows + } + return nil +} + +func (m *UpdateContainerResourcesRequest) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *ExecSyncRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *ExecSyncRequest) GetCmd() []string { + if m != nil { + return m.Cmd + } + return nil +} + +func (m *ExecSyncRequest) GetTimeout() int64 { + if m != nil { + return m.Timeout + } + return 0 +} + +func (m *ExecSyncResponse) GetStdout() []byte { + if m != nil { + return m.Stdout + } + return nil +} + +func (m *ExecSyncResponse) GetStderr() []byte { + if m != nil { + return m.Stderr + } + return nil +} + +func (m *ExecSyncResponse) GetExitCode() int32 { + if m != nil { + return m.ExitCode + } + return 0 +} + +func (m *ExecRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *ExecRequest) GetCmd() []string { + if m != nil { + return m.Cmd + } + return nil +} + +func (m *ExecRequest) GetTty() bool { + if m != nil { + return m.Tty + } + return false +} + +func (m *ExecRequest) GetStdin() bool { + if m != nil { + return m.Stdin + } + return false +} + +func (m *ExecRequest) GetStdout() bool { + if m != nil { + return m.Stdout + } + return false +} + +func (m *ExecRequest) GetStderr() bool { + if m != nil { + return m.Stderr + } + return false +} + +func (m *ExecResponse) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *AttachRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *AttachRequest) GetStdin() bool { + if m != nil { + return m.Stdin + } + return false +} + +func (m *AttachRequest) GetTty() bool { + if m != nil { + return m.Tty + } + return false +} + +func (m *AttachRequest) GetStdout() bool { + if m != nil { + return m.Stdout + } + return false +} + +func (m *AttachRequest) GetStderr() bool { + if m != nil { + return m.Stderr + } + return false +} + +func (m *AttachResponse) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *PortForwardRequest) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *PortForwardRequest) GetPort() []int32 { + if m != nil { + return m.Port + } + return nil +} + +func (m *PortForwardResponse) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *ImageFilter) GetImage() *ImageSpec { + if m != nil { + return m.Image + } + return nil +} + +func (m *ListImagesRequest) GetFilter() *ImageFilter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Image) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Image) GetRepoTags() []string { + if m != nil { + return m.RepoTags + } + return nil +} + +func (m *Image) GetRepoDigests() []string { + if m != nil { + return m.RepoDigests + } + return nil +} + +func (m *Image) GetSize_() uint64 { + if m != nil { + return m.Size_ + } + return 0 +} + +func (m *Image) GetUid() *Int64Value { + if m != nil { + return m.Uid + } + return nil +} + +func (m *Image) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *Image) GetSpec() *ImageSpec { + if m != nil { + return m.Spec + } + return nil +} + +func (m *ListImagesResponse) GetImages() []*Image { + if m != nil { + return m.Images + } + return nil +} + +func (m *ImageStatusRequest) GetImage() *ImageSpec { + if m != nil { + return m.Image + } + return nil +} + +func (m *ImageStatusRequest) GetVerbose() bool { + if m != nil { + return m.Verbose + } + return false +} + +func (m *ImageStatusResponse) GetImage() *Image { + if m != nil { + return m.Image + } + return nil +} + +func (m *ImageStatusResponse) GetInfo() map[string]string { + if m != nil { + return m.Info + } + return nil +} + +func (m *AuthConfig) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *AuthConfig) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *AuthConfig) GetAuth() string { + if m != nil { + return m.Auth + } + return "" +} + +func (m *AuthConfig) GetServerAddress() string { + if m != nil { + return m.ServerAddress + } + return "" +} + +func (m *AuthConfig) GetIdentityToken() string { + if m != nil { + return m.IdentityToken + } + return "" +} + +func (m *AuthConfig) GetRegistryToken() string { + if m != nil { + return m.RegistryToken + } + return "" +} + +func (m *PullImageRequest) GetImage() *ImageSpec { + if m != nil { + return m.Image + } + return nil +} + +func (m *PullImageRequest) GetAuth() *AuthConfig { + if m != nil { + return m.Auth + } + return nil +} + +func (m *PullImageRequest) GetSandboxConfig() *PodSandboxConfig { + if m != nil { + return m.SandboxConfig + } + return nil +} + +func (m *PullImageResponse) GetImageRef() string { + if m != nil { + return m.ImageRef + } + return "" +} + +func (m *RemoveImageRequest) GetImage() *ImageSpec { + if m != nil { + return m.Image + } + return nil +} + +func (m *NetworkConfig) GetPodCidr() string { + if m != nil { + return m.PodCidr + } + return "" +} + +func (m *RuntimeConfig) GetNetworkConfig() *NetworkConfig { + if m != nil { + return m.NetworkConfig + } + return nil +} + +func (m *UpdateRuntimeConfigRequest) GetRuntimeConfig() *RuntimeConfig { + if m != nil { + return m.RuntimeConfig + } + return nil +} + +func (m *RuntimeCondition) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *RuntimeCondition) GetStatus() bool { + if m != nil { + return m.Status + } + return false +} + +func (m *RuntimeCondition) GetReason() string { + if m != nil { + return m.Reason + } + return "" +} + +func (m *RuntimeCondition) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *RuntimeStatus) GetConditions() []*RuntimeCondition { + if m != nil { + return m.Conditions + } + return nil +} + +func (m *StatusRequest) GetVerbose() bool { + if m != nil { + return m.Verbose + } + return false +} + +func (m *StatusResponse) GetStatus() *RuntimeStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *StatusResponse) GetInfo() map[string]string { + if m != nil { + return m.Info + } + return nil +} + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *FilesystemIdentifier) GetMountpoint() string { + if m != nil { + return m.Mountpoint + } + return "" +} + +func (m *FilesystemUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *FilesystemUsage) GetFsId() *FilesystemIdentifier { + if m != nil { + return m.FsId + } + return nil +} + +func (m *FilesystemUsage) GetUsedBytes() *UInt64Value { + if m != nil { + return m.UsedBytes + } + return nil +} + +func (m *FilesystemUsage) GetInodesUsed() *UInt64Value { + if m != nil { + return m.InodesUsed + } + return nil +} + +func (m *ImageFsInfoResponse) GetImageFilesystems() []*FilesystemUsage { + if m != nil { + return m.ImageFilesystems + } + return nil +} + +func (m *ContainerStatsRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *ContainerStatsResponse) GetStats() *ContainerStats { + if m != nil { + return m.Stats + } + return nil +} + +func (m *ListContainerStatsRequest) GetFilter() *ContainerStatsFilter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *ContainerStatsFilter) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ContainerStatsFilter) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *ContainerStatsFilter) GetLabelSelector() map[string]string { + if m != nil { + return m.LabelSelector + } + return nil +} + +func (m *ListContainerStatsResponse) GetStats() []*ContainerStats { + if m != nil { + return m.Stats + } + return nil +} + +func (m *ContainerAttributes) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ContainerAttributes) GetMetadata() *ContainerMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *ContainerAttributes) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ContainerAttributes) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *ContainerStats) GetAttributes() *ContainerAttributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *ContainerStats) GetCpu() *CpuUsage { + if m != nil { + return m.Cpu + } + return nil +} + +func (m *ContainerStats) GetMemory() *MemoryUsage { + if m != nil { + return m.Memory + } + return nil +} + +func (m *ContainerStats) GetWritableLayer() *FilesystemUsage { + if m != nil { + return m.WritableLayer + } + return nil +} + +func (m *CpuUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *CpuUsage) GetUsageCoreNanoSeconds() *UInt64Value { + if m != nil { + return m.UsageCoreNanoSeconds + } + return nil +} + +func (m *CpuUsage) GetUsageNanoCores() *UInt64Value { + if m != nil { + return m.UsageNanoCores + } + return nil +} + +func (m *MemoryUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *MemoryUsage) GetWorkingSetBytes() *UInt64Value { + if m != nil { + return m.WorkingSetBytes + } + return nil +} + +func (m *MemoryUsage) GetAvailableBytes() *UInt64Value { + if m != nil { + return m.AvailableBytes + } + return nil +} + +func (m *MemoryUsage) GetUsageBytes() *UInt64Value { + if m != nil { + return m.UsageBytes + } + return nil +} + +func (m *MemoryUsage) GetRssBytes() *UInt64Value { + if m != nil { + return m.RssBytes + } + return nil +} + +func (m *MemoryUsage) GetPageFaults() *UInt64Value { + if m != nil { + return m.PageFaults + } + return nil +} + +func (m *MemoryUsage) GetMajorPageFaults() *UInt64Value { + if m != nil { + return m.MajorPageFaults + } + return nil +} + +func (m *ReopenContainerLogRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} diff --git a/pkg/kubelet/cm/container_manager.go b/pkg/kubelet/cm/container_manager.go index 5dbe856094cb..3f6a8d5029f7 100644 --- a/pkg/kubelet/cm/container_manager.go +++ b/pkg/kubelet/cm/container_manager.go @@ -25,9 +25,9 @@ import ( "k8s.io/apimachinery/pkg/util/sets" // TODO: Migrate kubelet to either use its own internal objects or client library. v1 "k8s.io/api/core/v1" - internalapi "k8s.io/cri-api/pkg/apis" podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/apis/podresources" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 09bd752d5565..4ba1816e220b 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -49,9 +49,9 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" utilsysctl "k8s.io/component-helpers/node/utils/sysctl" - internalapi "k8s.io/cri-api/pkg/apis" podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1" kubefeatures "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/cm/admission" "k8s.io/kubernetes/pkg/kubelet/cm/containermap" diff --git a/pkg/kubelet/cm/container_manager_stub.go b/pkg/kubelet/cm/container_manager_stub.go index a5b0e523b137..730d634370f2 100644 --- a/pkg/kubelet/cm/container_manager_stub.go +++ b/pkg/kubelet/cm/container_manager_stub.go @@ -21,8 +21,8 @@ import ( "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/api/resource" - internalapi "k8s.io/cri-api/pkg/apis" podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" diff --git a/pkg/kubelet/cm/container_manager_unsupported.go b/pkg/kubelet/cm/container_manager_unsupported.go index 1a0587c36dca..2c3ce011f205 100644 --- a/pkg/kubelet/cm/container_manager_unsupported.go +++ b/pkg/kubelet/cm/container_manager_unsupported.go @@ -26,7 +26,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" - internalapi "k8s.io/cri-api/pkg/apis" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/status" diff --git a/pkg/kubelet/cm/container_manager_windows.go b/pkg/kubelet/cm/container_manager_windows.go index ec56fcf2c73b..149615ca097c 100644 --- a/pkg/kubelet/cm/container_manager_windows.go +++ b/pkg/kubelet/cm/container_manager_windows.go @@ -32,9 +32,9 @@ import ( "k8s.io/apimachinery/pkg/api/resource" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" - internalapi "k8s.io/cri-api/pkg/apis" podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1" kubefeatures "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/cm/admission" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go index e95f77e32b0f..2c5167013cdd 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cm/containermap" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" @@ -42,7 +42,7 @@ import ( type ActivePodsFunc func() []*v1.Pod type runtimeService interface { - UpdateContainerResources(id string, resources *runtimeapi.LinuxContainerResources) error + UpdateContainerResources(id string, resources *internalapi.LinuxContainerResources) error } type policyName string @@ -505,7 +505,7 @@ func (m *manager) updateContainerCPUSet(containerID string, cpus cpuset.CPUSet) // this patch-like partial resources. return m.containerRuntime.UpdateContainerResources( containerID, - &runtimeapi.LinuxContainerResources{ + &internalapi.LinuxContainerResources{ CpusetCpus: cpus.String(), }) } diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go index db0a3560a856..9aba337f1c19 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cm/containermap" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" @@ -128,7 +128,7 @@ type mockRuntimeService struct { err error } -func (rt mockRuntimeService) UpdateContainerResources(id string, resources *runtimeapi.LinuxContainerResources) error { +func (rt mockRuntimeService) UpdateContainerResources(id string, resources *internalapi.LinuxContainerResources) error { return rt.err } diff --git a/pkg/kubelet/cm/fake_container_manager.go b/pkg/kubelet/cm/fake_container_manager.go index c907301a6d4e..670e70d48425 100644 --- a/pkg/kubelet/cm/fake_container_manager.go +++ b/pkg/kubelet/cm/fake_container_manager.go @@ -22,8 +22,8 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - internalapi "k8s.io/cri-api/pkg/apis" podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" diff --git a/pkg/kubelet/cm/fake_internal_container_lifecycle.go b/pkg/kubelet/cm/fake_internal_container_lifecycle.go index 153f3377d9aa..7db6d7f01f20 100644 --- a/pkg/kubelet/cm/fake_internal_container_lifecycle.go +++ b/pkg/kubelet/cm/fake_internal_container_lifecycle.go @@ -18,7 +18,7 @@ package cm import ( "k8s.io/api/core/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" ) func NewFakeInternalContainerLifecycle() *fakeInternalContainerLifecycle { @@ -27,7 +27,7 @@ func NewFakeInternalContainerLifecycle() *fakeInternalContainerLifecycle { type fakeInternalContainerLifecycle struct{} -func (f *fakeInternalContainerLifecycle) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error { +func (f *fakeInternalContainerLifecycle) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *internalapi.ContainerConfig) error { return nil } diff --git a/pkg/kubelet/cm/internal_container_lifecycle.go b/pkg/kubelet/cm/internal_container_lifecycle.go index 92b36c2f9af8..f27e9b7294f0 100644 --- a/pkg/kubelet/cm/internal_container_lifecycle.go +++ b/pkg/kubelet/cm/internal_container_lifecycle.go @@ -19,15 +19,15 @@ package cm import ( "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" kubefeatures "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" ) type InternalContainerLifecycle interface { - PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error + PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *internalapi.ContainerConfig) error PreStartContainer(pod *v1.Pod, container *v1.Container, containerID string) error PreStopContainer(containerID string) error PostStopContainer(containerID string) error diff --git a/pkg/kubelet/cm/internal_container_lifecycle_linux.go b/pkg/kubelet/cm/internal_container_lifecycle_linux.go index 2d4ff55f6d6b..3b4217954c2e 100644 --- a/pkg/kubelet/cm/internal_container_lifecycle_linux.go +++ b/pkg/kubelet/cm/internal_container_lifecycle_linux.go @@ -24,10 +24,10 @@ import ( "strings" "k8s.io/api/core/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" ) -func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error { +func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *internalapi.ContainerConfig) error { if i.cpuManager != nil { allocatedCPUs := i.cpuManager.GetCPUAffinity(string(pod.UID), container.Name) if !allocatedCPUs.IsEmpty() { diff --git a/pkg/kubelet/cm/internal_container_lifecycle_unsupported.go b/pkg/kubelet/cm/internal_container_lifecycle_unsupported.go index 0b898cfa90b3..21676a4a7ca9 100644 --- a/pkg/kubelet/cm/internal_container_lifecycle_unsupported.go +++ b/pkg/kubelet/cm/internal_container_lifecycle_unsupported.go @@ -21,9 +21,9 @@ package cm import ( "k8s.io/api/core/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" ) -func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error { +func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *internalapi.ContainerConfig) error { return nil } diff --git a/pkg/kubelet/cm/internal_container_lifecycle_windows.go b/pkg/kubelet/cm/internal_container_lifecycle_windows.go index fb64faf7623e..3aea34d79fe5 100644 --- a/pkg/kubelet/cm/internal_container_lifecycle_windows.go +++ b/pkg/kubelet/cm/internal_container_lifecycle_windows.go @@ -21,9 +21,9 @@ package cm import ( "k8s.io/api/core/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" ) -func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error { +func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *internalapi.ContainerConfig) error { return nil } diff --git a/pkg/kubelet/cm/memorymanager/memory_manager.go b/pkg/kubelet/cm/memorymanager/memory_manager.go index 3bf4dc0bf0e9..5ffea950342a 100644 --- a/pkg/kubelet/cm/memorymanager/memory_manager.go +++ b/pkg/kubelet/cm/memorymanager/memory_manager.go @@ -25,10 +25,10 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog/v2" corev1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cm/containermap" "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" @@ -43,7 +43,7 @@ const memoryManagerStateFileName = "memory_manager_state" type ActivePodsFunc func() []*v1.Pod type runtimeService interface { - UpdateContainerResources(id string, resources *runtimeapi.LinuxContainerResources) error + UpdateContainerResources(id string, resources *internalapi.LinuxContainerResources) error } type sourcesReadyStub struct{} diff --git a/pkg/kubelet/cm/memorymanager/memory_manager_test.go b/pkg/kubelet/cm/memorymanager/memory_manager_test.go index 846b63c51f0d..d309d33fd179 100644 --- a/pkg/kubelet/cm/memorymanager/memory_manager_test.go +++ b/pkg/kubelet/cm/memorymanager/memory_manager_test.go @@ -33,8 +33,8 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cm/containermap" "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" @@ -122,7 +122,7 @@ type mockRuntimeService struct { err error } -func (rt mockRuntimeService) UpdateContainerResources(id string, resources *runtimeapi.LinuxContainerResources) error { +func (rt mockRuntimeService) UpdateContainerResources(id string, resources *internalapi.LinuxContainerResources) error { return rt.err } diff --git a/pkg/kubelet/config/flags.go b/pkg/kubelet/config/flags.go index 80d0c77f41d9..628187dd8b1a 100644 --- a/pkg/kubelet/config/flags.go +++ b/pkg/kubelet/config/flags.go @@ -21,6 +21,7 @@ import ( "github.com/spf13/pflag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + criapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" ) // ContainerRuntimeOptions defines options for the container runtime. @@ -31,6 +32,8 @@ type ContainerRuntimeOptions struct { ContainerRuntime string // RuntimeCgroups that container runtime is expected to be isolated in. RuntimeCgroups string + // CRIVersion specifies the Container Runtime Interface (CRI) version to be used. + CRIVersion string // Docker-specific options. @@ -88,6 +91,7 @@ func (s *ContainerRuntimeOptions) AddFlags(fs *pflag.FlagSet) { // General settings. fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'remote'.") fs.StringVar(&s.RuntimeCgroups, "runtime-cgroups", s.RuntimeCgroups, "Optional absolute name of cgroups to create and run the runtime in.") + fs.StringVar(&s.CRIVersion, "cri-version", s.CRIVersion, fmt.Sprintf("Specify the CRI version for the remote runtime and image endpoint to be used. Can be either empty (chosen automatically on connection) or forced to %q and %q (deprecated)", criapi.APIVersionV1, criapi.APIVersionV1alpha2)) // Docker-specific settings. fs.StringVar(&s.DockershimRootDirectory, "experimental-dockershim-root-directory", s.DockershimRootDirectory, "Path to the dockershim root directory.") diff --git a/pkg/kubelet/container/helpers.go b/pkg/kubelet/container/helpers.go index b48ba532db0d..638e93f7d43c 100644 --- a/pkg/kubelet/container/helpers.go +++ b/pkg/kubelet/container/helpers.go @@ -29,8 +29,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" podutil "k8s.io/kubernetes/pkg/api/v1/pod" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" sc "k8s.io/kubernetes/pkg/securitycontext" hashutil "k8s.io/kubernetes/pkg/util/hash" "k8s.io/kubernetes/third_party/forked/golang/expansion" @@ -46,7 +46,7 @@ type HandlerRunner interface { // able to get necessary informations like the RunContainerOptions, DNS settings, Host IP. type RuntimeHelper interface { GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (contOpts *RunContainerOptions, cleanupAction func(), err error) - GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error) + GetPodDNS(pod *v1.Pod) (dnsConfig *internalapi.DNSConfig, err error) // GetPodCgroupParent returns the CgroupName identifier, and its literal cgroupfs form on the host // of a pod. GetPodCgroupParent(pod *v1.Pod) string @@ -262,16 +262,16 @@ func ConvertPodStatusToRunningPod(runtimeName string, podStatus *PodStatus) Pod return runningPod } -// SandboxToContainerState converts runtimeapi.PodSandboxState to +// SandboxToContainerState converts internalapi.PodSandboxState to // kubecontainer.State. // This is only needed because we need to return sandboxes as if they were // kubecontainer.Containers to avoid substantial changes to PLEG. // TODO: Remove this once it becomes obsolete. -func SandboxToContainerState(state runtimeapi.PodSandboxState) State { +func SandboxToContainerState(state internalapi.PodSandboxState) State { switch state { - case runtimeapi.PodSandboxState_SANDBOX_READY: + case internalapi.PodSandboxState_SANDBOX_READY: return ContainerStateRunning - case runtimeapi.PodSandboxState_SANDBOX_NOTREADY: + case internalapi.PodSandboxState_SANDBOX_NOTREADY: return ContainerStateExited } return ContainerStateUnknown diff --git a/pkg/kubelet/container/runtime.go b/pkg/kubelet/container/runtime.go index 43d180ec6d3a..452723d46ee6 100644 --- a/pkg/kubelet/container/runtime.go +++ b/pkg/kubelet/container/runtime.go @@ -30,8 +30,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/remotecommand" "k8s.io/client-go/util/flowcontrol" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog/v2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/volume" ) @@ -137,7 +137,7 @@ type StreamingRuntime interface { type ImageService interface { // PullImage pulls an image from the network to local storage using the supplied // secrets if necessary. It returns a reference (digest or ID) to the pulled image. - PullImage(image ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) + PullImage(image ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *internalapi.PodSandboxConfig) (string, error) // GetImageRef gets the reference (digest or ID) of the image which has already been in // the local storage. It returns ("", nil) if the image isn't in the local storage. GetImageRef(image ImageSpec) (string, error) @@ -302,7 +302,7 @@ type PodStatus struct { ContainerStatuses []*Status // Status of the pod sandbox. // Only for kuberuntime now, other runtime may keep it nil. - SandboxStatuses []*runtimeapi.PodSandboxStatus + SandboxStatuses []*internalapi.PodSandboxStatus } // Status represents the status of a container. @@ -400,7 +400,7 @@ type Mount struct { // Whether the mount needs SELinux relabeling SELinuxRelabel bool // Requested propagation mode - Propagation runtimeapi.MountPropagation + Propagation internalapi.MountPropagation } // PortMapping contains information about the port mapping. diff --git a/pkg/kubelet/container/testing/fake_runtime.go b/pkg/kubelet/container/testing/fake_runtime.go index 4e50b2c53c71..e51ec08d6a38 100644 --- a/pkg/kubelet/container/testing/fake_runtime.go +++ b/pkg/kubelet/container/testing/fake_runtime.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/flowcontrol" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/volume" ) @@ -297,7 +297,7 @@ func (f *FakeRuntime) GetContainerLogs(_ context.Context, pod *v1.Pod, container return f.Err } -func (f *FakeRuntime) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (f *FakeRuntime) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *internalapi.PodSandboxConfig) (string, error) { f.Lock() defer f.Unlock() diff --git a/pkg/kubelet/container/testing/fake_runtime_helper.go b/pkg/kubelet/container/testing/fake_runtime_helper.go index 0009ee61e0b8..4cab65983d54 100644 --- a/pkg/kubelet/container/testing/fake_runtime_helper.go +++ b/pkg/kubelet/container/testing/fake_runtime_helper.go @@ -19,7 +19,7 @@ package testing import ( "k8s.io/api/core/v1" kubetypes "k8s.io/apimachinery/pkg/types" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -46,8 +46,8 @@ func (f *FakeRuntimeHelper) GetPodCgroupParent(pod *v1.Pod) string { return "" } -func (f *FakeRuntimeHelper) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) { - return &runtimeapi.DNSConfig{ +func (f *FakeRuntimeHelper) GetPodDNS(pod *v1.Pod) (*internalapi.DNSConfig, error) { + return &internalapi.DNSConfig{ Servers: f.DNSServers, Searches: f.DNSSearches, Options: f.DNSOptions}, f.Err diff --git a/pkg/kubelet/container/testing/runtime_mock.go b/pkg/kubelet/container/testing/runtime_mock.go index 999d3bda5068..3d4c47fd12b0 100644 --- a/pkg/kubelet/container/testing/runtime_mock.go +++ b/pkg/kubelet/container/testing/runtime_mock.go @@ -28,7 +28,7 @@ import ( types "k8s.io/apimachinery/pkg/types" remotecommand "k8s.io/client-go/tools/remotecommand" flowcontrol "k8s.io/client-go/util/flowcontrol" - v1alpha2 "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + cri "k8s.io/kubernetes/pkg/kubelet/apis/cri" container "k8s.io/kubernetes/pkg/kubelet/container" url "net/url" reflect "reflect" @@ -284,7 +284,7 @@ func (mr *MockRuntimeMockRecorder) DeleteContainer(containerID interface{}) *gom } // PullImage mocks base method -func (m *MockRuntime) PullImage(image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *v1alpha2.PodSandboxConfig) (string, error) { +func (m *MockRuntime) PullImage(image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *cri.PodSandboxConfig) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PullImage", image, pullSecrets, podSandboxConfig) ret0, _ := ret[0].(string) @@ -463,7 +463,7 @@ func (m *MockImageService) EXPECT() *MockImageServiceMockRecorder { } // PullImage mocks base method -func (m *MockImageService) PullImage(image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *v1alpha2.PodSandboxConfig) (string, error) { +func (m *MockImageService) PullImage(image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *cri.PodSandboxConfig) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PullImage", image, pullSecrets, podSandboxConfig) ret0, _ := ret[0].(string) diff --git a/pkg/kubelet/cri/remote/fake/fake_image_service.go b/pkg/kubelet/cri/remote/fake/fake_image_service.go index c85f9ecf9e8a..2469307f3a0b 100644 --- a/pkg/kubelet/cri/remote/fake/fake_image_service.go +++ b/pkg/kubelet/cri/remote/fake/fake_image_service.go @@ -19,17 +19,17 @@ package fake import ( "context" - kubeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" ) // ListImages lists existing images. -func (f *RemoteRuntime) ListImages(ctx context.Context, req *kubeapi.ListImagesRequest) (*kubeapi.ListImagesResponse, error) { +func (f *RemoteRuntime) ListImages(ctx context.Context, req *internalapi.ListImagesRequest) (*internalapi.ListImagesResponse, error) { images, err := f.ImageService.ListImages(req.Filter) if err != nil { return nil, err } - return &kubeapi.ListImagesResponse{ + return &internalapi.ListImagesResponse{ Images: images, }, nil } @@ -37,23 +37,23 @@ func (f *RemoteRuntime) ListImages(ctx context.Context, req *kubeapi.ListImagesR // ImageStatus returns the status of the image. If the image is not // present, returns a response with ImageStatusResponse.Image set to // nil. -func (f *RemoteRuntime) ImageStatus(ctx context.Context, req *kubeapi.ImageStatusRequest) (*kubeapi.ImageStatusResponse, error) { +func (f *RemoteRuntime) ImageStatus(ctx context.Context, req *internalapi.ImageStatusRequest) (*internalapi.ImageStatusResponse, error) { status, err := f.ImageService.ImageStatus(req.Image) if err != nil { return nil, err } - return &kubeapi.ImageStatusResponse{Image: status}, nil + return &internalapi.ImageStatusResponse{Image: status}, nil } // PullImage pulls an image with authentication config. -func (f *RemoteRuntime) PullImage(ctx context.Context, req *kubeapi.PullImageRequest) (*kubeapi.PullImageResponse, error) { +func (f *RemoteRuntime) PullImage(ctx context.Context, req *internalapi.PullImageRequest) (*internalapi.PullImageResponse, error) { image, err := f.ImageService.PullImage(req.Image, req.Auth, req.SandboxConfig) if err != nil { return nil, err } - return &kubeapi.PullImageResponse{ + return &internalapi.PullImageResponse{ ImageRef: image, }, nil } @@ -61,21 +61,21 @@ func (f *RemoteRuntime) PullImage(ctx context.Context, req *kubeapi.PullImageReq // RemoveImage removes the image. // This call is idempotent, and must not return an error if the image has // already been removed. -func (f *RemoteRuntime) RemoveImage(ctx context.Context, req *kubeapi.RemoveImageRequest) (*kubeapi.RemoveImageResponse, error) { +func (f *RemoteRuntime) RemoveImage(ctx context.Context, req *internalapi.RemoveImageRequest) (*internalapi.RemoveImageResponse, error) { err := f.ImageService.RemoveImage(req.Image) if err != nil { return nil, err } - return &kubeapi.RemoveImageResponse{}, nil + return &internalapi.RemoveImageResponse{}, nil } // ImageFsInfo returns information of the filesystem that is used to store images. -func (f *RemoteRuntime) ImageFsInfo(ctx context.Context, req *kubeapi.ImageFsInfoRequest) (*kubeapi.ImageFsInfoResponse, error) { +func (f *RemoteRuntime) ImageFsInfo(ctx context.Context, req *internalapi.ImageFsInfoRequest) (*internalapi.ImageFsInfoResponse, error) { fsUsage, err := f.ImageService.ImageFsInfo() if err != nil { return nil, err } - return &kubeapi.ImageFsInfoResponse{ImageFilesystems: fsUsage}, nil + return &internalapi.ImageFsInfoResponse{ImageFilesystems: fsUsage}, nil } diff --git a/pkg/kubelet/cri/remote/fake/fake_runtime.go b/pkg/kubelet/cri/remote/fake/fake_runtime.go index 6329480cdc14..40dac1be2bee 100644 --- a/pkg/kubelet/cri/remote/fake/fake_runtime.go +++ b/pkg/kubelet/cri/remote/fake/fake_runtime.go @@ -22,8 +22,8 @@ import ( "time" "google.golang.org/grpc" - kubeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - apitest "k8s.io/cri-api/pkg/apis/testing" + kubeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" + apitest "k8s.io/kubernetes/pkg/kubelet/apis/cri/testing" "k8s.io/kubernetes/pkg/kubelet/cri/remote/util" utilexec "k8s.io/utils/exec" ) @@ -47,8 +47,6 @@ func NewFakeRemoteRuntime() *RemoteRuntime { RuntimeService: fakeRuntimeService, ImageService: fakeImageService, } - kubeapi.RegisterRuntimeServiceServer(f.server, f) - kubeapi.RegisterImageServiceServer(f.server, f) return f } diff --git a/pkg/kubelet/cri/remote/remote_image.go b/pkg/kubelet/cri/remote/remote_image.go index 45845a1efecb..9aaeac1a90d3 100644 --- a/pkg/kubelet/cri/remote/remote_image.go +++ b/pkg/kubelet/cri/remote/remote_image.go @@ -25,19 +25,21 @@ import ( "google.golang.org/grpc" "k8s.io/klog/v2" - internalapi "k8s.io/cri-api/pkg/apis" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtimeapiV1 "k8s.io/cri-api/pkg/apis/runtime/v1" + runtimeapiV1alpha2 "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cri/remote/util" ) // remoteImageService is a gRPC implementation of internalapi.ImageManagerService. type remoteImageService struct { - timeout time.Duration - imageClient runtimeapi.ImageServiceClient + timeout time.Duration + imageClientV1alpha2 runtimeapiV1alpha2.ImageServiceClient + imageClientV1 runtimeapiV1.ImageServiceClient } // NewRemoteImageService creates a new internalapi.ImageManagerService. -func NewRemoteImageService(endpoint string, connectionTimeout time.Duration) (internalapi.ImageManagerService, error) { +func NewRemoteImageService(endpoint string, connectionTimeout time.Duration, apiVersion internalapi.APIVersion) (internalapi.ImageManagerService, error) { klog.V(3).InfoS("Connecting to image service", "endpoint", endpoint) addr, dialer, err := util.GetAddressAndDialer(endpoint) if err != nil { @@ -53,35 +55,77 @@ func NewRemoteImageService(endpoint string, connectionTimeout time.Duration) (in return nil, err } - return &remoteImageService{ - timeout: connectionTimeout, - imageClient: runtimeapi.NewImageServiceClient(conn), - }, nil + service := &remoteImageService{timeout: connectionTimeout} + + if apiVersion == internalapi.APIVersionV1 { + service.imageClientV1 = runtimeapiV1.NewImageServiceClient(conn) + } else { + service.imageClientV1alpha2 = runtimeapiV1alpha2.NewImageServiceClient(conn) + } + + return service, nil +} + +// useV1API returns true if the v1 CRI API should be used instead of v1alpha2. +func (r *remoteImageService) useV1API() bool { + return r.imageClientV1 != nil } // ListImages lists available images. -func (r *remoteImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) { +func (r *remoteImageService) ListImages(filter *internalapi.ImageFilter) ([]*internalapi.Image, error) { ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.imageClient.ListImages(ctx, &runtimeapi.ListImagesRequest{ - Filter: filter, + if r.useV1API() { + return r.listImagesV1(ctx, filter) + } + + return r.listImagesV1alpha2(ctx, filter) +} + +func (r *remoteImageService) listImagesV1alpha2(ctx context.Context, filter *internalapi.ImageFilter) ([]*internalapi.Image, error) { + resp, err := r.imageClientV1alpha2.ListImages(ctx, &runtimeapiV1alpha2.ListImagesRequest{ + Filter: internalapi.V1alpha2ImageFilter(filter), }) if err != nil { klog.ErrorS(err, "ListImages with filter from image service failed", "filter", filter) return nil, err } - return resp.Images, nil + return internalapi.FromV1alpha2ImageList(resp.Images), nil +} + +func (r *remoteImageService) listImagesV1(ctx context.Context, filter *internalapi.ImageFilter) ([]*internalapi.Image, error) { + resp, err := r.imageClientV1.ListImages(ctx, &runtimeapiV1.ListImagesRequest{ + Filter: internalapi.V1ImageFilter(filter), + }) + if err != nil { + klog.ErrorS(err, "ListImages with filter from image service failed", "filter", filter) + return nil, err + } + + return internalapi.FromV1ImageList(resp.Images), nil } // ImageStatus returns the status of the image. -func (r *remoteImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) { +func (r *remoteImageService) ImageStatus(image *internalapi.ImageSpec) (*internalapi.Image, error) { ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.imageClient.ImageStatus(ctx, &runtimeapi.ImageStatusRequest{ - Image: image, + // TODO: for refactoring common code blocks between the cri versions into + // one code block in the internal where possible examples: + // https://github.com/kubernetes/kubernetes/pull/104575/files#r705600987 + // https://github.com/kubernetes/kubernetes/pull/104575/files#r696793706 + if r.useV1API() { + return r.imageStatusV1(ctx, image) + } + + return r.imageStatusV1alpha2(ctx, image) +} + +func (r *remoteImageService) imageStatusV1alpha2(ctx context.Context, image *internalapi.ImageSpec) (*internalapi.Image, error) { + resp, err := r.imageClientV1alpha2.ImageStatus(ctx, &runtimeapiV1alpha2.ImageStatusRequest{ + Image: internalapi.V1alpha2ImageSpec(image), }) if err != nil { klog.ErrorS(err, "Get ImageStatus from image service failed", "image", image.Image) @@ -97,18 +141,67 @@ func (r *remoteImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimea } } - return resp.Image, nil + return internalapi.FromV1alpha2Image(resp.Image), nil +} + +func (r *remoteImageService) imageStatusV1(ctx context.Context, image *internalapi.ImageSpec) (*internalapi.Image, error) { + resp, err := r.imageClientV1.ImageStatus(ctx, &runtimeapiV1.ImageStatusRequest{ + Image: internalapi.V1ImageSpec(image), + }) + if err != nil { + klog.ErrorS(err, "Get ImageStatus from image service failed", "image", image.Image) + return nil, err + } + + if resp.Image != nil { + if resp.Image.Id == "" || resp.Image.Size_ == 0 { + errorMessage := fmt.Sprintf("Id or size of image %q is not set", image.Image) + err := errors.New(errorMessage) + klog.ErrorS(err, "ImageStatus failed", "image", image.Image) + return nil, err + } + } + + return internalapi.FromV1Image(resp.Image), nil } // PullImage pulls an image with authentication config. -func (r *remoteImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (r *remoteImageService) PullImage(image *internalapi.ImageSpec, auth *internalapi.AuthConfig, podSandboxConfig *internalapi.PodSandboxConfig) (string, error) { ctx, cancel := getContextWithCancel() defer cancel() - resp, err := r.imageClient.PullImage(ctx, &runtimeapi.PullImageRequest{ - Image: image, - Auth: auth, - SandboxConfig: podSandboxConfig, + if r.useV1API() { + return r.pullImageV1(ctx, image, auth, podSandboxConfig) + } + + return r.pullImageV1alpha2(ctx, image, auth, podSandboxConfig) +} + +func (r *remoteImageService) pullImageV1alpha2(ctx context.Context, image *internalapi.ImageSpec, auth *internalapi.AuthConfig, podSandboxConfig *internalapi.PodSandboxConfig) (string, error) { + resp, err := r.imageClientV1alpha2.PullImage(ctx, &runtimeapiV1alpha2.PullImageRequest{ + Image: internalapi.V1alpha2ImageSpec(image), + Auth: internalapi.V1alpha2AuthConfig(auth), + SandboxConfig: internalapi.V1alpha2PodSandboxConfig(podSandboxConfig), + }) + if err != nil { + klog.ErrorS(err, "PullImage from image service failed", "image", image.Image) + return "", err + } + + if resp.ImageRef == "" { + klog.ErrorS(errors.New("PullImage failed"), "ImageRef of image is not set", "image", image.Image) + errorMessage := fmt.Sprintf("imageRef of image %q is not set", image.Image) + return "", errors.New(errorMessage) + } + + return resp.ImageRef, nil +} + +func (r *remoteImageService) pullImageV1(ctx context.Context, image *internalapi.ImageSpec, auth *internalapi.AuthConfig, podSandboxConfig *internalapi.PodSandboxConfig) (string, error) { + resp, err := r.imageClientV1.PullImage(ctx, &runtimeapiV1.PullImageRequest{ + Image: internalapi.V1ImageSpec(image), + Auth: internalapi.V1AuthConfig(auth), + SandboxConfig: internalapi.V1PodSandboxConfig(podSandboxConfig), }) if err != nil { klog.ErrorS(err, "PullImage from image service failed", "image", image.Image) @@ -125,13 +218,19 @@ func (r *remoteImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtim } // RemoveImage removes the image. -func (r *remoteImageService) RemoveImage(image *runtimeapi.ImageSpec) error { +func (r *remoteImageService) RemoveImage(image *internalapi.ImageSpec) (err error) { ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - _, err := r.imageClient.RemoveImage(ctx, &runtimeapi.RemoveImageRequest{ - Image: image, - }) + if r.useV1API() { + _, err = r.imageClientV1.RemoveImage(ctx, &runtimeapiV1.RemoveImageRequest{ + Image: internalapi.V1ImageSpec(image), + }) + } else { + _, err = r.imageClientV1alpha2.RemoveImage(ctx, &runtimeapiV1alpha2.RemoveImageRequest{ + Image: internalapi.V1alpha2ImageSpec(image), + }) + } if err != nil { klog.ErrorS(err, "RemoveImage from image service failed", "image", image.Image) return err @@ -141,16 +240,33 @@ func (r *remoteImageService) RemoveImage(image *runtimeapi.ImageSpec) error { } // ImageFsInfo returns information of the filesystem that is used to store images. -func (r *remoteImageService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) { +func (r *remoteImageService) ImageFsInfo() ([]*internalapi.FilesystemUsage, error) { // Do not set timeout, because `ImageFsInfo` takes time. // TODO(random-liu): Should we assume runtime should cache the result, and set timeout here? ctx, cancel := getContextWithCancel() defer cancel() - resp, err := r.imageClient.ImageFsInfo(ctx, &runtimeapi.ImageFsInfoRequest{}) + if r.useV1API() { + return r.imageFsInfoV1(ctx) + } + + return r.imageFsInfoV1alpha2(ctx) +} + +func (r *remoteImageService) imageFsInfoV1alpha2(ctx context.Context) ([]*internalapi.FilesystemUsage, error) { + resp, err := r.imageClientV1alpha2.ImageFsInfo(ctx, &runtimeapiV1alpha2.ImageFsInfoRequest{}) + if err != nil { + klog.ErrorS(err, "ImageFsInfo from image service failed") + return nil, err + } + return internalapi.FromV1alpha2FilesystemUsageList(resp.GetImageFilesystems()), nil +} + +func (r *remoteImageService) imageFsInfoV1(ctx context.Context) ([]*internalapi.FilesystemUsage, error) { + resp, err := r.imageClientV1.ImageFsInfo(ctx, &runtimeapiV1.ImageFsInfoRequest{}) if err != nil { klog.ErrorS(err, "ImageFsInfo from image service failed") return nil, err } - return resp.GetImageFilesystems(), nil + return internalapi.FromV1FilesystemUsageList(resp.GetImageFilesystems()), nil } diff --git a/pkg/kubelet/cri/remote/remote_runtime.go b/pkg/kubelet/cri/remote/remote_runtime.go index 15c84a921f5b..c269ee194125 100644 --- a/pkg/kubelet/cri/remote/remote_runtime.go +++ b/pkg/kubelet/cri/remote/remote_runtime.go @@ -29,8 +29,9 @@ import ( "k8s.io/klog/v2" "k8s.io/component-base/logs/logreduction" - internalapi "k8s.io/cri-api/pkg/apis" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + runtimeapiV1 "k8s.io/cri-api/pkg/apis/runtime/v1" + runtimeapiV1alpha2 "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cri/remote/util" "k8s.io/kubernetes/pkg/probe/exec" utilexec "k8s.io/utils/exec" @@ -38,8 +39,9 @@ import ( // remoteRuntimeService is a gRPC implementation of internalapi.RuntimeService. type remoteRuntimeService struct { - timeout time.Duration - runtimeClient runtimeapi.RuntimeServiceClient + timeout time.Duration + runtimeClientV1alpha2 runtimeapiV1alpha2.RuntimeServiceClient + runtimeClientV1 runtimeapiV1.RuntimeServiceClient // Cache last per-container error message to reduce log spam logReduction *logreduction.LogReduction } @@ -50,7 +52,7 @@ const ( ) // NewRemoteRuntimeService creates a new internalapi.RuntimeService. -func NewRemoteRuntimeService(endpoint string, connectionTimeout time.Duration) (internalapi.RuntimeService, error) { +func NewRemoteRuntimeService(endpoint string, connectionTimeout time.Duration, apiVersion internalapi.APIVersion) (internalapi.RuntimeService, error) { klog.V(3).InfoS("Connecting to runtime service", "endpoint", endpoint) addr, dialer, err := util.GetAddressAndDialer(endpoint) if err != nil { @@ -65,21 +67,111 @@ func NewRemoteRuntimeService(endpoint string, connectionTimeout time.Duration) ( return nil, err } - return &remoteRuntimeService{ - timeout: connectionTimeout, - runtimeClient: runtimeapi.NewRuntimeServiceClient(conn), - logReduction: logreduction.NewLogReduction(identicalErrorDelay), - }, nil + service := &remoteRuntimeService{ + timeout: connectionTimeout, + logReduction: logreduction.NewLogReduction(identicalErrorDelay), + } + + if err := service.determineAPIVersion(conn, apiVersion); err != nil { + return nil, err + } + + return service, nil +} + +// determineAPIVersion tries to connect to the remote runtime by using the +// provided apiVersion string. If the string is empty, it tries to use v1 over +// v1alpha2. +// +// A GRPC redial will always use the initially selected (or automatically +// determined) CRI API version. If the redial was due to the container runtime +// being upgraded, then the container runtime must also support the initially +// selected version or the redial is expected to fail, which requires a restart +// of kubelet. +func (r *remoteRuntimeService) determineAPIVersion(conn *grpc.ClientConn, apiVersion internalapi.APIVersion) error { + ctx, cancel := getContextWithTimeout(r.timeout) + defer cancel() + + switch apiVersion { + case internalapi.APIVersionV1: + klog.V(2).InfoS("Using selected CRI v1 API") + r.runtimeClientV1 = runtimeapiV1.NewRuntimeServiceClient(conn) + + case internalapi.APIVersionV1alpha2: + klog.V(2).InfoS("Using selected CRI v1alpha2 API") + r.runtimeClientV1alpha2 = runtimeapiV1alpha2.NewRuntimeServiceClient(conn) + + case "": // Automatically determine the API version while preferring V1 + klog.V(4).InfoS("Finding the CRI API version") + r.runtimeClientV1 = runtimeapiV1.NewRuntimeServiceClient(conn) + + if _, err := r.runtimeClientV1.Version(ctx, &runtimeapiV1.VersionRequest{}); err == nil { + klog.V(2).InfoS("Using CRI v1 API") + + } else if status.Code(err) == codes.Unimplemented { + klog.V(2).InfoS("Falling back to CRI v1alpha2 API (deprecated)") + r.runtimeClientV1alpha2 = runtimeapiV1alpha2.NewRuntimeServiceClient(conn) + + } else { + return fmt.Errorf("unable to determine runtime API version: %w", err) + } + + default: + return fmt.Errorf( + "invavlid CRI version, must be empty, %q or %q", + internalapi.APIVersionV1, internalapi.APIVersionV1alpha2, + ) + } + + return nil +} + +// useV1API returns true if the v1 CRI API should be used instead of v1alpha2. +func (r *remoteRuntimeService) useV1API() bool { + return r.runtimeClientV1alpha2 == nil +} + +func (r *remoteRuntimeService) APIVersion() internalapi.APIVersion { + if r.useV1API() { + return internalapi.APIVersionV1 + } + return internalapi.APIVersionV1alpha2 } // Version returns the runtime name, runtime version and runtime API version. -func (r *remoteRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) { +func (r *remoteRuntimeService) Version(apiVersion string) (*internalapi.VersionResponse, error) { klog.V(10).InfoS("[RemoteRuntimeService] Version", "apiVersion", apiVersion, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - typedVersion, err := r.runtimeClient.Version(ctx, &runtimeapi.VersionRequest{ + if r.useV1API() { + return r.versionV1(ctx, apiVersion) + } + + return r.versionV1alpha2(ctx, apiVersion) +} + +func (r *remoteRuntimeService) versionV1(ctx context.Context, apiVersion string) (*internalapi.VersionResponse, error) { + typedVersion, err := r.runtimeClientV1.Version(ctx, &runtimeapiV1.VersionRequest{ + Version: apiVersion, + }) + if err != nil { + klog.ErrorS(err, "Version from runtime service failed") + return nil, err + } + + klog.V(10).InfoS("[RemoteRuntimeService] Version Response", "apiVersion", typedVersion) + + if typedVersion.Version == "" || typedVersion.RuntimeName == "" || typedVersion.RuntimeApiVersion == "" || typedVersion.RuntimeVersion == "" { + return nil, fmt.Errorf("not all fields are set in VersionResponse (%q)", *typedVersion) + } + + return internalapi.FromV1VersionResponse(typedVersion), err +} + +func (r *remoteRuntimeService) versionV1alpha2(ctx context.Context, apiVersion string) (*internalapi.VersionResponse, error) { + typedVersion, err := r.runtimeClientV1alpha2.Version(ctx, &runtimeapiV1alpha2.VersionRequest{ Version: apiVersion, }) if err != nil { @@ -93,12 +185,12 @@ func (r *remoteRuntimeService) Version(apiVersion string) (*runtimeapi.VersionRe return nil, fmt.Errorf("not all fields are set in VersionResponse (%q)", *typedVersion) } - return typedVersion, err + return internalapi.FromV1alpha2VersionResponse(typedVersion), err } // RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure // the sandbox is in ready state. -func (r *remoteRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) { +func (r *remoteRuntimeService) RunPodSandbox(config *internalapi.PodSandboxConfig, runtimeHandler string) (string, error) { // Use 2 times longer timeout for sandbox operation (4 mins by default) // TODO: Make the pod sandbox timeout configurable. timeout := r.timeout * 2 @@ -108,38 +200,60 @@ func (r *remoteRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig ctx, cancel := getContextWithTimeout(timeout) defer cancel() - resp, err := r.runtimeClient.RunPodSandbox(ctx, &runtimeapi.RunPodSandboxRequest{ - Config: config, - RuntimeHandler: runtimeHandler, - }) - if err != nil { - klog.ErrorS(err, "RunPodSandbox from runtime service failed") - return "", err + var podSandboxID string + if r.useV1API() { + resp, err := r.runtimeClientV1.RunPodSandbox(ctx, &runtimeapiV1.RunPodSandboxRequest{ + Config: internalapi.V1PodSandboxConfig(config), + RuntimeHandler: runtimeHandler, + }) + + if err != nil { + klog.ErrorS(err, "RunPodSandbox from runtime service failed") + return "", err + } + podSandboxID = resp.PodSandboxId + } else { + resp, err := r.runtimeClientV1alpha2.RunPodSandbox(ctx, &runtimeapiV1alpha2.RunPodSandboxRequest{ + Config: internalapi.V1alpha2PodSandboxConfig(config), + RuntimeHandler: runtimeHandler, + }) + + if err != nil { + klog.ErrorS(err, "RunPodSandbox from runtime service failed") + return "", err + } + podSandboxID = resp.PodSandboxId } - if resp.PodSandboxId == "" { - errorMessage := fmt.Sprintf("PodSandboxId is not set for sandbox %q", config.GetMetadata()) + if podSandboxID == "" { + errorMessage := fmt.Sprintf("PodSandboxId is not set for sandbox %q", config.Metadata) err := errors.New(errorMessage) klog.ErrorS(err, "RunPodSandbox failed") return "", err } - klog.V(10).InfoS("[RemoteRuntimeService] RunPodSandbox Response", "podSandboxID", resp.PodSandboxId) + klog.V(10).InfoS("[RemoteRuntimeService] RunPodSandbox Response", "podSandboxID", podSandboxID) - return resp.PodSandboxId, nil + return podSandboxID, nil } // StopPodSandbox stops the sandbox. If there are any running containers in the // sandbox, they should be forced to termination. -func (r *remoteRuntimeService) StopPodSandbox(podSandBoxID string) error { +func (r *remoteRuntimeService) StopPodSandbox(podSandBoxID string) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] StopPodSandbox", "podSandboxID", podSandBoxID, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - _, err := r.runtimeClient.StopPodSandbox(ctx, &runtimeapi.StopPodSandboxRequest{ - PodSandboxId: podSandBoxID, - }) + if r.useV1API() { + _, err = r.runtimeClientV1.StopPodSandbox(ctx, &runtimeapiV1.StopPodSandboxRequest{ + PodSandboxId: podSandBoxID, + }) + } else { + _, err = r.runtimeClientV1alpha2.StopPodSandbox(ctx, &runtimeapiV1alpha2.StopPodSandboxRequest{ + PodSandboxId: podSandBoxID, + }) + } if err != nil { klog.ErrorS(err, "StopPodSandbox from runtime service failed", "podSandboxID", podSandBoxID) return err @@ -152,14 +266,20 @@ func (r *remoteRuntimeService) StopPodSandbox(podSandBoxID string) error { // RemovePodSandbox removes the sandbox. If there are any containers in the // sandbox, they should be forcibly removed. -func (r *remoteRuntimeService) RemovePodSandbox(podSandBoxID string) error { +func (r *remoteRuntimeService) RemovePodSandbox(podSandBoxID string) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] RemovePodSandbox", "podSandboxID", podSandBoxID, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - _, err := r.runtimeClient.RemovePodSandbox(ctx, &runtimeapi.RemovePodSandboxRequest{ - PodSandboxId: podSandBoxID, - }) + if r.useV1API() { + _, err = r.runtimeClientV1.RemovePodSandbox(ctx, &runtimeapiV1.RemovePodSandboxRequest{ + PodSandboxId: podSandBoxID, + }) + } else { + _, err = r.runtimeClientV1alpha2.RemovePodSandbox(ctx, &runtimeapiV1alpha2.RemovePodSandboxRequest{ + PodSandboxId: podSandBoxID, + }) + } if err != nil { klog.ErrorS(err, "RemovePodSandbox from runtime service failed", "podSandboxID", podSandBoxID) return err @@ -171,12 +291,20 @@ func (r *remoteRuntimeService) RemovePodSandbox(podSandBoxID string) error { } // PodSandboxStatus returns the status of the PodSandbox. -func (r *remoteRuntimeService) PodSandboxStatus(podSandBoxID string) (*runtimeapi.PodSandboxStatus, error) { +func (r *remoteRuntimeService) PodSandboxStatus(podSandBoxID string) (*internalapi.PodSandboxStatus, error) { klog.V(10).InfoS("[RemoteRuntimeService] PodSandboxStatus", "podSandboxID", podSandBoxID, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.PodSandboxStatus(ctx, &runtimeapi.PodSandboxStatusRequest{ + if r.useV1API() { + return r.podSandboxStatusV1(ctx, podSandBoxID) + } + + return r.podSandboxStatusV1alpha2(ctx, podSandBoxID) +} + +func (r *remoteRuntimeService) podSandboxStatusV1alpha2(ctx context.Context, podSandBoxID string) (*internalapi.PodSandboxStatus, error) { + resp, err := r.runtimeClientV1alpha2.PodSandboxStatus(ctx, &runtimeapiV1alpha2.PodSandboxStatusRequest{ PodSandboxId: podSandBoxID, }) if err != nil { @@ -185,23 +313,52 @@ func (r *remoteRuntimeService) PodSandboxStatus(podSandBoxID string) (*runtimeap klog.V(10).InfoS("[RemoteRuntimeService] PodSandboxStatus Response", "podSandboxID", podSandBoxID, "status", resp.Status) + status := internalapi.FromV1alpha2PodSandboxStatus(resp.Status) if resp.Status != nil { - if err := verifySandboxStatus(resp.Status); err != nil { + if err := verifySandboxStatus(status); err != nil { return nil, err } } - return resp.Status, nil + return status, nil +} + +func (r *remoteRuntimeService) podSandboxStatusV1(ctx context.Context, podSandBoxID string) (*internalapi.PodSandboxStatus, error) { + resp, err := r.runtimeClientV1.PodSandboxStatus(ctx, &runtimeapiV1.PodSandboxStatusRequest{ + PodSandboxId: podSandBoxID, + }) + if err != nil { + return nil, err + } + + klog.V(10).InfoS("[RemoteRuntimeService] PodSandboxStatus Response", "podSandboxID", podSandBoxID, "status", resp.Status) + + status := internalapi.FromV1PodSandboxStatus(resp.Status) + if resp.Status != nil { + if err := verifySandboxStatus(status); err != nil { + return nil, err + } + } + + return status, nil } // ListPodSandbox returns a list of PodSandboxes. -func (r *remoteRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) { +func (r *remoteRuntimeService) ListPodSandbox(filter *internalapi.PodSandboxFilter) ([]*internalapi.PodSandbox, error) { klog.V(10).InfoS("[RemoteRuntimeService] ListPodSandbox", "filter", filter, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.ListPodSandbox(ctx, &runtimeapi.ListPodSandboxRequest{ - Filter: filter, + if r.useV1API() { + return r.listPodSandboxV1(ctx, filter) + } + + return r.listPodSandboxV1alpha2(ctx, filter) +} + +func (r *remoteRuntimeService) listPodSandboxV1alpha2(ctx context.Context, filter *internalapi.PodSandboxFilter) ([]*internalapi.PodSandbox, error) { + resp, err := r.runtimeClientV1alpha2.ListPodSandbox(ctx, &runtimeapiV1alpha2.ListPodSandboxRequest{ + Filter: internalapi.V1alpha2PodSandboxFilter(filter), }) if err != nil { klog.ErrorS(err, "ListPodSandbox with filter from runtime service failed", "filter", filter) @@ -210,19 +367,41 @@ func (r *remoteRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilte klog.V(10).InfoS("[RemoteRuntimeService] ListPodSandbox Response", "filter", filter, "items", resp.Items) - return resp.Items, nil + return internalapi.FromV1alpha2PodSandboxes(resp.Items), nil +} + +func (r *remoteRuntimeService) listPodSandboxV1(ctx context.Context, filter *internalapi.PodSandboxFilter) ([]*internalapi.PodSandbox, error) { + resp, err := r.runtimeClientV1.ListPodSandbox(ctx, &runtimeapiV1.ListPodSandboxRequest{ + Filter: internalapi.V1PodSandboxFilter(filter), + }) + if err != nil { + klog.ErrorS(err, "ListPodSandbox with filter from runtime service failed", "filter", filter) + return nil, err + } + + klog.V(10).InfoS("[RemoteRuntimeService] ListPodSandbox Response", "filter", filter, "items", resp.Items) + + return internalapi.FromV1PodSandboxes(resp.Items), nil } // CreateContainer creates a new container in the specified PodSandbox. -func (r *remoteRuntimeService) CreateContainer(podSandBoxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (r *remoteRuntimeService) CreateContainer(podSandBoxID string, config *internalapi.ContainerConfig, sandboxConfig *internalapi.PodSandboxConfig) (string, error) { klog.V(10).InfoS("[RemoteRuntimeService] CreateContainer", "podSandboxID", podSandBoxID, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.CreateContainer(ctx, &runtimeapi.CreateContainerRequest{ + if r.useV1API() { + return r.createContainerV1(ctx, podSandBoxID, config, sandboxConfig) + } + + return r.createContainerV1alpha2(ctx, podSandBoxID, config, sandboxConfig) +} + +func (r *remoteRuntimeService) createContainerV1alpha2(ctx context.Context, podSandBoxID string, config *internalapi.ContainerConfig, sandboxConfig *internalapi.PodSandboxConfig) (string, error) { + resp, err := r.runtimeClientV1alpha2.CreateContainer(ctx, &runtimeapiV1alpha2.CreateContainerRequest{ PodSandboxId: podSandBoxID, - Config: config, - SandboxConfig: sandboxConfig, + Config: internalapi.V1alpha2ContainerConfig(config), + SandboxConfig: internalapi.V1alpha2PodSandboxConfig(sandboxConfig), }) if err != nil { klog.ErrorS(err, "CreateContainer in sandbox from runtime service failed", "podSandboxID", podSandBoxID) @@ -231,7 +410,29 @@ func (r *remoteRuntimeService) CreateContainer(podSandBoxID string, config *runt klog.V(10).InfoS("[RemoteRuntimeService] CreateContainer", "podSandboxID", podSandBoxID, "containerID", resp.ContainerId) if resp.ContainerId == "" { - errorMessage := fmt.Sprintf("ContainerId is not set for container %q", config.GetMetadata()) + errorMessage := fmt.Sprintf("ContainerId is not set for container %q", config.Metadata) + err := errors.New(errorMessage) + klog.ErrorS(err, "CreateContainer failed") + return "", err + } + + return resp.ContainerId, nil +} + +func (r *remoteRuntimeService) createContainerV1(ctx context.Context, podSandBoxID string, config *internalapi.ContainerConfig, sandboxConfig *internalapi.PodSandboxConfig) (string, error) { + resp, err := r.runtimeClientV1.CreateContainer(ctx, &runtimeapiV1.CreateContainerRequest{ + PodSandboxId: podSandBoxID, + Config: internalapi.V1ContainerConfig(config), + SandboxConfig: internalapi.V1PodSandboxConfig(sandboxConfig), + }) + if err != nil { + klog.ErrorS(err, "CreateContainer in sandbox from runtime service failed", "podSandboxID", podSandBoxID) + return "", err + } + + klog.V(10).InfoS("[RemoteRuntimeService] CreateContainer", "podSandboxID", podSandBoxID, "containerID", resp.ContainerId) + if resp.ContainerId == "" { + errorMessage := fmt.Sprintf("ContainerId is not set for container %q", config.Metadata) err := errors.New(errorMessage) klog.ErrorS(err, "CreateContainer failed") return "", err @@ -241,14 +442,21 @@ func (r *remoteRuntimeService) CreateContainer(podSandBoxID string, config *runt } // StartContainer starts the container. -func (r *remoteRuntimeService) StartContainer(containerID string) error { +func (r *remoteRuntimeService) StartContainer(containerID string) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] StartContainer", "containerID", containerID, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - _, err := r.runtimeClient.StartContainer(ctx, &runtimeapi.StartContainerRequest{ - ContainerId: containerID, - }) + if r.useV1API() { + _, err = r.runtimeClientV1.StartContainer(ctx, &runtimeapiV1.StartContainerRequest{ + ContainerId: containerID, + }) + } else { + _, err = r.runtimeClientV1alpha2.StartContainer(ctx, &runtimeapiV1alpha2.StartContainerRequest{ + ContainerId: containerID, + }) + } + if err != nil { klog.ErrorS(err, "StartContainer from runtime service failed", "containerID", containerID) return err @@ -259,7 +467,7 @@ func (r *remoteRuntimeService) StartContainer(containerID string) error { } // StopContainer stops a running container with a grace period (i.e., timeout). -func (r *remoteRuntimeService) StopContainer(containerID string, timeout int64) error { +func (r *remoteRuntimeService) StopContainer(containerID string, timeout int64) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] StopContainer", "containerID", containerID, "timeout", timeout) // Use timeout + default timeout (2 minutes) as timeout to leave extra time // for SIGKILL container and request latency. @@ -268,10 +476,18 @@ func (r *remoteRuntimeService) StopContainer(containerID string, timeout int64) defer cancel() r.logReduction.ClearID(containerID) - _, err := r.runtimeClient.StopContainer(ctx, &runtimeapi.StopContainerRequest{ - ContainerId: containerID, - Timeout: timeout, - }) + + if r.useV1API() { + _, err = r.runtimeClientV1.StopContainer(ctx, &runtimeapiV1.StopContainerRequest{ + ContainerId: containerID, + Timeout: timeout, + }) + } else { + _, err = r.runtimeClientV1alpha2.StopContainer(ctx, &runtimeapiV1alpha2.StopContainerRequest{ + ContainerId: containerID, + Timeout: timeout, + }) + } if err != nil { klog.ErrorS(err, "StopContainer from runtime service failed", "containerID", containerID) return err @@ -283,15 +499,21 @@ func (r *remoteRuntimeService) StopContainer(containerID string, timeout int64) // RemoveContainer removes the container. If the container is running, the container // should be forced to removal. -func (r *remoteRuntimeService) RemoveContainer(containerID string) error { +func (r *remoteRuntimeService) RemoveContainer(containerID string) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] RemoveContainer", "containerID", containerID, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() r.logReduction.ClearID(containerID) - _, err := r.runtimeClient.RemoveContainer(ctx, &runtimeapi.RemoveContainerRequest{ - ContainerId: containerID, - }) + if r.useV1API() { + _, err = r.runtimeClientV1.RemoveContainer(ctx, &runtimeapiV1.RemoveContainerRequest{ + ContainerId: containerID, + }) + } else { + _, err = r.runtimeClientV1alpha2.RemoveContainer(ctx, &runtimeapiV1alpha2.RemoveContainerRequest{ + ContainerId: containerID, + }) + } if err != nil { klog.ErrorS(err, "RemoveContainer from runtime service failed", "containerID", containerID) return err @@ -302,13 +524,34 @@ func (r *remoteRuntimeService) RemoveContainer(containerID string) error { } // ListContainers lists containers by filters. -func (r *remoteRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { +func (r *remoteRuntimeService) ListContainers(filter *internalapi.ContainerFilter) ([]*internalapi.Container, error) { klog.V(10).InfoS("[RemoteRuntimeService] ListContainers", "filter", filter, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.ListContainers(ctx, &runtimeapi.ListContainersRequest{ - Filter: filter, + if r.useV1API() { + return r.listContainersV1(ctx, filter) + } + + return r.listContainersV1alpha2(ctx, filter) +} + +func (r *remoteRuntimeService) listContainersV1alpha2(ctx context.Context, filter *internalapi.ContainerFilter) ([]*internalapi.Container, error) { + resp, err := r.runtimeClientV1alpha2.ListContainers(ctx, &runtimeapiV1alpha2.ListContainersRequest{ + Filter: internalapi.V1alpha2ContainerFilter(filter), + }) + if err != nil { + klog.ErrorS(err, "ListContainers with filter from runtime service failed", "filter", filter) + return nil, err + } + klog.V(10).InfoS("[RemoteRuntimeService] ListContainers Response", "filter", filter, "containers", resp.Containers) + + return internalapi.FromV1alpha2Containers(resp.Containers), nil +} + +func (r *remoteRuntimeService) listContainersV1(ctx context.Context, filter *internalapi.ContainerFilter) ([]*internalapi.Container, error) { + resp, err := r.runtimeClientV1.ListContainers(ctx, &runtimeapiV1.ListContainersRequest{ + Filter: internalapi.V1ContainerFilter(filter), }) if err != nil { klog.ErrorS(err, "ListContainers with filter from runtime service failed", "filter", filter) @@ -316,16 +559,49 @@ func (r *remoteRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter } klog.V(10).InfoS("[RemoteRuntimeService] ListContainers Response", "filter", filter, "containers", resp.Containers) - return resp.Containers, nil + return internalapi.FromV1Containers(resp.Containers), nil } // ContainerStatus returns the container status. -func (r *remoteRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) { +func (r *remoteRuntimeService) ContainerStatus(containerID string) (*internalapi.ContainerStatus, error) { klog.V(10).InfoS("[RemoteRuntimeService] ContainerStatus", "containerID", containerID, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.ContainerStatus(ctx, &runtimeapi.ContainerStatusRequest{ + if r.useV1API() { + return r.containerStatusV1(ctx, containerID) + } + + return r.containerStatusV1alpha2(ctx, containerID) +} + +func (r *remoteRuntimeService) containerStatusV1alpha2(ctx context.Context, containerID string) (*internalapi.ContainerStatus, error) { + resp, err := r.runtimeClientV1alpha2.ContainerStatus(ctx, &runtimeapiV1alpha2.ContainerStatusRequest{ + ContainerId: containerID, + }) + if err != nil { + // Don't spam the log with endless messages about the same failure. + if r.logReduction.ShouldMessageBePrinted(err.Error(), containerID) { + klog.ErrorS(err, "ContainerStatus from runtime service failed", "containerID", containerID) + } + return nil, err + } + r.logReduction.ClearID(containerID) + klog.V(10).InfoS("[RemoteRuntimeService] ContainerStatus Response", "containerID", containerID, "status", resp.Status) + + status := internalapi.FromV1alpha2ContainerStatus(resp.Status) + if resp.Status != nil { + if err := verifyContainerStatus(status); err != nil { + klog.ErrorS(err, "verify ContainerStatus failed", "containerID", containerID) + return nil, err + } + } + + return status, nil +} + +func (r *remoteRuntimeService) containerStatusV1(ctx context.Context, containerID string) (*internalapi.ContainerStatus, error) { + resp, err := r.runtimeClientV1.ContainerStatus(ctx, &runtimeapiV1.ContainerStatusRequest{ ContainerId: containerID, }) if err != nil { @@ -338,26 +614,34 @@ func (r *remoteRuntimeService) ContainerStatus(containerID string) (*runtimeapi. r.logReduction.ClearID(containerID) klog.V(10).InfoS("[RemoteRuntimeService] ContainerStatus Response", "containerID", containerID, "status", resp.Status) + status := internalapi.FromV1ContainerStatus(resp.Status) if resp.Status != nil { - if err := verifyContainerStatus(resp.Status); err != nil { + if err := verifyContainerStatus(status); err != nil { klog.ErrorS(err, "verify ContainerStatus failed", "containerID", containerID) return nil, err } } - return resp.Status, nil + return status, nil } // UpdateContainerResources updates a containers resource config -func (r *remoteRuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources) error { +func (r *remoteRuntimeService) UpdateContainerResources(containerID string, resources *internalapi.LinuxContainerResources) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] UpdateContainerResources", "containerID", containerID, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - _, err := r.runtimeClient.UpdateContainerResources(ctx, &runtimeapi.UpdateContainerResourcesRequest{ - ContainerId: containerID, - Linux: resources, - }) + if r.useV1API() { + _, err = r.runtimeClientV1.UpdateContainerResources(ctx, &runtimeapiV1.UpdateContainerResourcesRequest{ + ContainerId: containerID, + Linux: internalapi.V1ContainerResources(resources), + }) + } else { + _, err = r.runtimeClientV1alpha2.UpdateContainerResources(ctx, &runtimeapiV1alpha2.UpdateContainerResourcesRequest{ + ContainerId: containerID, + Linux: internalapi.V1alpha2ContainerResources(resources), + }) + } if err != nil { klog.ErrorS(err, "UpdateContainerResources from runtime service failed", "containerID", containerID) return err @@ -383,13 +667,52 @@ func (r *remoteRuntimeService) ExecSync(containerID string, cmd []string, timeou } defer cancel() + if r.useV1API() { + return r.execSyncV1(ctx, containerID, cmd, timeout) + } + + return r.execSyncV1alpha2(ctx, containerID, cmd, timeout) +} + +func (r *remoteRuntimeService) execSyncV1alpha2(ctx context.Context, containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) { timeoutSeconds := int64(timeout.Seconds()) - req := &runtimeapi.ExecSyncRequest{ + req := &runtimeapiV1alpha2.ExecSyncRequest{ ContainerId: containerID, Cmd: cmd, Timeout: timeoutSeconds, } - resp, err := r.runtimeClient.ExecSync(ctx, req) + resp, err := r.runtimeClientV1alpha2.ExecSync(ctx, req) + if err != nil { + klog.ErrorS(err, "ExecSync cmd from runtime service failed", "containerID", containerID, "cmd", cmd) + + // interpret DeadlineExceeded gRPC errors as timedout probes + if status.Code(err) == codes.DeadlineExceeded { + err = exec.NewTimeoutError(fmt.Errorf("command %q timed out", strings.Join(cmd, " ")), timeout) + } + + return nil, nil, err + } + + klog.V(10).InfoS("[RemoteRuntimeService] ExecSync Response", "containerID", containerID, "exitCode", resp.ExitCode) + err = nil + if resp.ExitCode != 0 { + err = utilexec.CodeExitError{ + Err: fmt.Errorf("command '%s' exited with %d: %s", strings.Join(cmd, " "), resp.ExitCode, resp.Stderr), + Code: int(resp.ExitCode), + } + } + + return resp.Stdout, resp.Stderr, err +} + +func (r *remoteRuntimeService) execSyncV1(ctx context.Context, containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) { + timeoutSeconds := int64(timeout.Seconds()) + req := &runtimeapiV1.ExecSyncRequest{ + ContainerId: containerID, + Cmd: cmd, + Timeout: timeoutSeconds, + } + resp, err := r.runtimeClientV1.ExecSync(ctx, req) if err != nil { klog.ErrorS(err, "ExecSync cmd from runtime service failed", "containerID", containerID, "cmd", cmd) @@ -414,12 +737,20 @@ func (r *remoteRuntimeService) ExecSync(containerID string, cmd []string, timeou } // Exec prepares a streaming endpoint to execute a command in the container, and returns the address. -func (r *remoteRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { +func (r *remoteRuntimeService) Exec(req *internalapi.ExecRequest) (*internalapi.ExecResponse, error) { klog.V(10).InfoS("[RemoteRuntimeService] Exec", "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.Exec(ctx, req) + if r.useV1API() { + return r.execV1(ctx, req) + } + + return r.execV1alpha2(ctx, req) +} + +func (r *remoteRuntimeService) execV1alpha2(ctx context.Context, req *internalapi.ExecRequest) (*internalapi.ExecResponse, error) { + resp, err := r.runtimeClientV1alpha2.Exec(ctx, internalapi.V1alpha2ExecRequest(req)) if err != nil { klog.ErrorS(err, "Exec cmd from runtime service failed", "containerID", req.ContainerId, "cmd", req.Cmd) return nil, err @@ -433,16 +764,59 @@ func (r *remoteRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.Ex return nil, err } - return resp, nil + return internalapi.FromV1alpha2ExecResponse(resp), nil +} + +func (r *remoteRuntimeService) execV1(ctx context.Context, req *internalapi.ExecRequest) (*internalapi.ExecResponse, error) { + resp, err := r.runtimeClientV1.Exec(ctx, internalapi.V1ExecRequest(req)) + if err != nil { + klog.ErrorS(err, "Exec cmd from runtime service failed", "containerID", req.ContainerId, "cmd", req.Cmd) + return nil, err + } + klog.V(10).InfoS("[RemoteRuntimeService] Exec Response") + + if resp.Url == "" { + errorMessage := "URL is not set" + err := errors.New(errorMessage) + klog.ErrorS(err, "Exec failed") + return nil, err + } + + return internalapi.FromV1ExecResponse(resp), nil } // Attach prepares a streaming endpoint to attach to a running container, and returns the address. -func (r *remoteRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { +func (r *remoteRuntimeService) Attach(req *internalapi.AttachRequest) (*internalapi.AttachResponse, error) { klog.V(10).InfoS("[RemoteRuntimeService] Attach", "containerID", req.ContainerId, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.Attach(ctx, req) + if r.useV1API() { + return r.attachV1(ctx, req) + } + + return r.attachV1alpha2(ctx, req) +} + +func (r *remoteRuntimeService) attachV1alpha2(ctx context.Context, req *internalapi.AttachRequest) (*internalapi.AttachResponse, error) { + resp, err := r.runtimeClientV1alpha2.Attach(ctx, internalapi.V1alpha2AttachRequest(req)) + if err != nil { + klog.ErrorS(err, "Attach container from runtime service failed", "containerID", req.ContainerId) + return nil, err + } + klog.V(10).InfoS("[RemoteRuntimeService] Attach Response", "containerID", req.ContainerId) + + if resp.Url == "" { + errorMessage := "URL is not set" + err := errors.New(errorMessage) + klog.ErrorS(err, "Attach failed") + return nil, err + } + return internalapi.FromV1alpha2AttachResponse(resp), nil +} + +func (r *remoteRuntimeService) attachV1(ctx context.Context, req *internalapi.AttachRequest) (*internalapi.AttachResponse, error) { + resp, err := r.runtimeClientV1.Attach(ctx, internalapi.V1AttachRequest(req)) if err != nil { klog.ErrorS(err, "Attach container from runtime service failed", "containerID", req.ContainerId) return nil, err @@ -455,16 +829,42 @@ func (r *remoteRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeap klog.ErrorS(err, "Attach failed") return nil, err } - return resp, nil + return internalapi.FromV1AttachResponse(resp), nil } // PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address. -func (r *remoteRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { +func (r *remoteRuntimeService) PortForward(req *internalapi.PortForwardRequest) (*internalapi.PortForwardResponse, error) { klog.V(10).InfoS("[RemoteRuntimeService] PortForward", "podSandboxID", req.PodSandboxId, "port", req.Port, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.PortForward(ctx, req) + if r.useV1API() { + return r.portForwardV1(ctx, req) + } + + return r.portForwardV1alpha2(ctx, req) +} + +func (r *remoteRuntimeService) portForwardV1alpha2(ctx context.Context, req *internalapi.PortForwardRequest) (*internalapi.PortForwardResponse, error) { + resp, err := r.runtimeClientV1alpha2.PortForward(ctx, internalapi.V1alpha2PortForwardRequest(req)) + if err != nil { + klog.ErrorS(err, "PortForward from runtime service failed", "podSandboxID", req.PodSandboxId) + return nil, err + } + klog.V(10).InfoS("[RemoteRuntimeService] PortForward Response", "podSandboxID", req.PodSandboxId) + + if resp.Url == "" { + errorMessage := "URL is not set" + err := errors.New(errorMessage) + klog.ErrorS(err, "PortForward failed") + return nil, err + } + + return internalapi.FromV1alpha2PortForwardResponse(resp), nil +} + +func (r *remoteRuntimeService) portForwardV1(ctx context.Context, req *internalapi.PortForwardRequest) (*internalapi.PortForwardResponse, error) { + resp, err := r.runtimeClientV1.PortForward(ctx, internalapi.V1PortForwardRequest(req)) if err != nil { klog.ErrorS(err, "PortForward from runtime service failed", "podSandboxID", req.PodSandboxId) return nil, err @@ -478,13 +878,13 @@ func (r *remoteRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) ( return nil, err } - return resp, nil + return internalapi.FromV1PortForwardResponse(resp), nil } // UpdateRuntimeConfig updates the config of a runtime service. The only // update payload currently supported is the pod CIDR assigned to a node, // and the runtime service just proxies it down to the network plugin. -func (r *remoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error { +func (r *remoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *internalapi.RuntimeConfig) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] UpdateRuntimeConfig", "runtimeConfig", runtimeConfig, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() @@ -492,9 +892,15 @@ func (r *remoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.Run // Response doesn't contain anything of interest. This translates to an // Event notification to the network plugin, which can't fail, so we're // really looking to surface destination unreachable. - _, err := r.runtimeClient.UpdateRuntimeConfig(ctx, &runtimeapi.UpdateRuntimeConfigRequest{ - RuntimeConfig: runtimeConfig, - }) + if r.useV1API() { + _, err = r.runtimeClientV1.UpdateRuntimeConfig(ctx, &runtimeapiV1.UpdateRuntimeConfigRequest{ + RuntimeConfig: internalapi.V1RuntimeConfig(runtimeConfig), + }) + } else { + _, err = r.runtimeClientV1alpha2.UpdateRuntimeConfig(ctx, &runtimeapiV1alpha2.UpdateRuntimeConfigRequest{ + RuntimeConfig: internalapi.V1alpha2RuntimeConfig(runtimeConfig), + }) + } if err != nil { return err @@ -505,12 +911,20 @@ func (r *remoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.Run } // Status returns the status of the runtime. -func (r *remoteRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) { +func (r *remoteRuntimeService) Status() (*internalapi.RuntimeStatus, error) { klog.V(10).InfoS("[RemoteRuntimeService] Status", "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.Status(ctx, &runtimeapi.StatusRequest{}) + if r.useV1API() { + return r.statusV1(ctx) + } + + return r.statusV1alpha2(ctx) +} + +func (r *remoteRuntimeService) statusV1alpha2(ctx context.Context) (*internalapi.RuntimeStatus, error) { + resp, err := r.runtimeClientV1alpha2.Status(ctx, &runtimeapiV1alpha2.StatusRequest{}) if err != nil { klog.ErrorS(err, "Status from runtime service failed") return nil, err @@ -525,16 +939,43 @@ func (r *remoteRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) { return nil, err } - return resp.Status, nil + return internalapi.FromV1alpha2RuntimeStatus(resp.Status), nil +} + +func (r *remoteRuntimeService) statusV1(ctx context.Context) (*internalapi.RuntimeStatus, error) { + resp, err := r.runtimeClientV1.Status(ctx, &runtimeapiV1.StatusRequest{}) + if err != nil { + klog.ErrorS(err, "Status from runtime service failed") + return nil, err + } + + klog.V(10).InfoS("[RemoteRuntimeService] Status Response", "status", resp.Status) + + if resp.Status == nil || len(resp.Status.Conditions) < 2 { + errorMessage := "RuntimeReady or NetworkReady condition are not set" + err := errors.New(errorMessage) + klog.ErrorS(err, "Status failed") + return nil, err + } + + return internalapi.FromV1RuntimeStatus(resp.Status), nil } // ContainerStats returns the stats of the container. -func (r *remoteRuntimeService) ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) { +func (r *remoteRuntimeService) ContainerStats(containerID string) (*internalapi.ContainerStats, error) { klog.V(10).InfoS("[RemoteRuntimeService] ContainerStats", "containerID", containerID, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.ContainerStats(ctx, &runtimeapi.ContainerStatsRequest{ + if r.useV1API() { + return r.containerStatsV1(ctx, containerID) + } + + return r.containerStatsV1alpha2(ctx, containerID) +} + +func (r *remoteRuntimeService) containerStatsV1alpha2(ctx context.Context, containerID string) (*internalapi.ContainerStats, error) { + resp, err := r.runtimeClientV1alpha2.ContainerStats(ctx, &runtimeapiV1alpha2.ContainerStatsRequest{ ContainerId: containerID, }) if err != nil { @@ -546,19 +987,43 @@ func (r *remoteRuntimeService) ContainerStats(containerID string) (*runtimeapi.C r.logReduction.ClearID(containerID) klog.V(10).InfoS("[RemoteRuntimeService] ContainerStats Response", "containerID", containerID, "stats", resp.GetStats()) - return resp.GetStats(), nil + return internalapi.FromV1alpha2ContainerStats(resp.GetStats()), nil +} + +func (r *remoteRuntimeService) containerStatsV1(ctx context.Context, containerID string) (*internalapi.ContainerStats, error) { + resp, err := r.runtimeClientV1.ContainerStats(ctx, &runtimeapiV1.ContainerStatsRequest{ + ContainerId: containerID, + }) + if err != nil { + if r.logReduction.ShouldMessageBePrinted(err.Error(), containerID) { + klog.ErrorS(err, "ContainerStats from runtime service failed", "containerID", containerID) + } + return nil, err + } + r.logReduction.ClearID(containerID) + klog.V(10).InfoS("[RemoteRuntimeService] ContainerStats Response", "containerID", containerID, "stats", resp.GetStats()) + + return internalapi.FromV1ContainerStats(resp.GetStats()), nil } // ListContainerStats returns the list of ContainerStats given the filter. -func (r *remoteRuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { +func (r *remoteRuntimeService) ListContainerStats(filter *internalapi.ContainerStatsFilter) ([]*internalapi.ContainerStats, error) { klog.V(10).InfoS("[RemoteRuntimeService] ListContainerStats", "filter", filter) // Do not set timeout, because writable layer stats collection takes time. // TODO(random-liu): Should we assume runtime should cache the result, and set timeout here? ctx, cancel := getContextWithCancel() defer cancel() - resp, err := r.runtimeClient.ListContainerStats(ctx, &runtimeapi.ListContainerStatsRequest{ - Filter: filter, + if r.useV1API() { + return r.listContainerStatsV1(ctx, filter) + } + + return r.listContainerStatsV1alpha2(ctx, filter) +} + +func (r *remoteRuntimeService) listContainerStatsV1alpha2(ctx context.Context, filter *internalapi.ContainerStatsFilter) ([]*internalapi.ContainerStats, error) { + resp, err := r.runtimeClientV1alpha2.ListContainerStats(ctx, &runtimeapiV1alpha2.ListContainerStatsRequest{ + Filter: internalapi.V1alpha2ContainerStatsFilter(filter), }) if err != nil { klog.ErrorS(err, "ListContainerStats with filter from runtime service failed", "filter", filter) @@ -566,16 +1031,53 @@ func (r *remoteRuntimeService) ListContainerStats(filter *runtimeapi.ContainerSt } klog.V(10).InfoS("[RemoteRuntimeService] ListContainerStats Response", "filter", filter, "stats", resp.GetStats()) - return resp.GetStats(), nil + return internalapi.FromV1alpha2ContainerStatsList(resp.GetStats()), nil +} + +func (r *remoteRuntimeService) listContainerStatsV1(ctx context.Context, filter *internalapi.ContainerStatsFilter) ([]*internalapi.ContainerStats, error) { + resp, err := r.runtimeClientV1.ListContainerStats(ctx, &runtimeapiV1.ListContainerStatsRequest{ + Filter: internalapi.V1ContainerStatsFilter(filter), + }) + if err != nil { + klog.ErrorS(err, "ListContainerStats with filter from runtime service failed", "filter", filter) + return nil, err + } + klog.V(10).InfoS("[RemoteRuntimeService] ListContainerStats Response", "filter", filter, "stats", resp.GetStats()) + + return internalapi.FromV1ContainerStatsList(resp.GetStats()), nil } // PodSandboxStats returns the stats of the pod. -func (r *remoteRuntimeService) PodSandboxStats(podSandboxID string) (*runtimeapi.PodSandboxStats, error) { +func (r *remoteRuntimeService) PodSandboxStats(podSandboxID string) (*internalapi.PodSandboxStats, error) { klog.V(10).InfoS("[RemoteRuntimeService] PodSandboxStats", "podSandboxID", podSandboxID, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.PodSandboxStats(ctx, &runtimeapi.PodSandboxStatsRequest{ + if r.useV1API() { + return r.podSandboxStatsV1(ctx, podSandboxID) + } + + return r.podSandboxStatsV1alpha2(ctx, podSandboxID) +} + +func (r *remoteRuntimeService) podSandboxStatsV1alpha2(ctx context.Context, podSandboxID string) (*internalapi.PodSandboxStats, error) { + resp, err := r.runtimeClientV1alpha2.PodSandboxStats(ctx, &runtimeapiV1alpha2.PodSandboxStatsRequest{ + PodSandboxId: podSandboxID, + }) + if err != nil { + if r.logReduction.ShouldMessageBePrinted(err.Error(), podSandboxID) { + klog.ErrorS(err, "PodSandbox from runtime service failed", "podSandboxID", podSandboxID) + } + return nil, err + } + r.logReduction.ClearID(podSandboxID) + klog.V(10).InfoS("[RemoteRuntimeService] PodSandbox Response", "podSandboxID", podSandboxID, "stats", resp.GetStats()) + + return internalapi.FromV1alpha2PodSandboxStats(resp.GetStats()), nil +} + +func (r *remoteRuntimeService) podSandboxStatsV1(ctx context.Context, podSandboxID string) (*internalapi.PodSandboxStats, error) { + resp, err := r.runtimeClientV1.PodSandboxStats(ctx, &runtimeapiV1.PodSandboxStatsRequest{ PodSandboxId: podSandboxID, }) if err != nil { @@ -587,18 +1089,26 @@ func (r *remoteRuntimeService) PodSandboxStats(podSandboxID string) (*runtimeapi r.logReduction.ClearID(podSandboxID) klog.V(10).InfoS("[RemoteRuntimeService] PodSandbox Response", "podSandboxID", podSandboxID, "stats", resp.GetStats()) - return resp.GetStats(), nil + return internalapi.FromV1PodSandboxStats(resp.GetStats()), nil } // ListPodSandboxStats returns the list of pod sandbox stats given the filter -func (r *remoteRuntimeService) ListPodSandboxStats(filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) { +func (r *remoteRuntimeService) ListPodSandboxStats(filter *internalapi.PodSandboxStatsFilter) ([]*internalapi.PodSandboxStats, error) { klog.V(10).InfoS("[RemoteRuntimeService] ListPodSandboxStats", "filter", filter) // Set timeout, because runtimes are able to cache disk stats results ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - resp, err := r.runtimeClient.ListPodSandboxStats(ctx, &runtimeapi.ListPodSandboxStatsRequest{ - Filter: filter, + if r.useV1API() { + return r.listPodSandboxStatsV1(ctx, filter) + } + + return r.listPodSandboxStatsV1alpha2(ctx, filter) +} + +func (r *remoteRuntimeService) listPodSandboxStatsV1alpha2(ctx context.Context, filter *internalapi.PodSandboxStatsFilter) ([]*internalapi.PodSandboxStats, error) { + resp, err := r.runtimeClientV1alpha2.ListPodSandboxStats(ctx, &runtimeapiV1alpha2.ListPodSandboxStatsRequest{ + Filter: internalapi.V1alpha2PodSandboxStatsFilter(filter), }) if err != nil { klog.ErrorS(err, "ListPodSandboxStats with filter from runtime service failed", "filter", filter) @@ -606,16 +1116,33 @@ func (r *remoteRuntimeService) ListPodSandboxStats(filter *runtimeapi.PodSandbox } klog.V(10).InfoS("[RemoteRuntimeService] ListPodSandboxStats Response", "filter", filter, "stats", resp.GetStats()) - return resp.GetStats(), nil + return internalapi.FromV1alpha2PodSandboxStatsList(resp.GetStats()), nil +} + +func (r *remoteRuntimeService) listPodSandboxStatsV1(ctx context.Context, filter *internalapi.PodSandboxStatsFilter) ([]*internalapi.PodSandboxStats, error) { + resp, err := r.runtimeClientV1.ListPodSandboxStats(ctx, &runtimeapiV1.ListPodSandboxStatsRequest{ + Filter: internalapi.V1PodSandboxStatsFilter(filter), + }) + if err != nil { + klog.ErrorS(err, "ListPodSandboxStats with filter from runtime service failed", "filter", filter) + return nil, err + } + klog.V(10).InfoS("[RemoteRuntimeService] ListPodSandboxStats Response", "filter", filter, "stats", resp.GetStats()) + + return internalapi.FromV1PodSandboxStatsList(resp.GetStats()), nil } // ReopenContainerLog reopens the container log file. -func (r *remoteRuntimeService) ReopenContainerLog(containerID string) error { +func (r *remoteRuntimeService) ReopenContainerLog(containerID string) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] ReopenContainerLog", "containerID", containerID, "timeout", r.timeout) ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() - _, err := r.runtimeClient.ReopenContainerLog(ctx, &runtimeapi.ReopenContainerLogRequest{ContainerId: containerID}) + if r.useV1API() { + _, err = r.runtimeClientV1.ReopenContainerLog(ctx, &runtimeapiV1.ReopenContainerLogRequest{ContainerId: containerID}) + } else { + _, err = r.runtimeClientV1alpha2.ReopenContainerLog(ctx, &runtimeapiV1alpha2.ReopenContainerLogRequest{ContainerId: containerID}) + } if err != nil { klog.ErrorS(err, "ReopenContainerLog from runtime service failed", "containerID", containerID) return err diff --git a/pkg/kubelet/cri/remote/remote_runtime_test.go b/pkg/kubelet/cri/remote/remote_runtime_test.go index 44ef6aa11cef..3a83bea99d72 100644 --- a/pkg/kubelet/cri/remote/remote_runtime_test.go +++ b/pkg/kubelet/cri/remote/remote_runtime_test.go @@ -23,8 +23,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - internalapi "k8s.io/cri-api/pkg/apis" - apitest "k8s.io/cri-api/pkg/apis/testing" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" + apitest "k8s.io/kubernetes/pkg/kubelet/apis/cri/testing" fakeremote "k8s.io/kubernetes/pkg/kubelet/cri/remote/fake" "k8s.io/kubernetes/pkg/kubelet/cri/remote/util" ) @@ -47,7 +49,7 @@ func createAndStartFakeRemoteRuntime(t *testing.T) (*fakeremote.RemoteRuntime, s } func createRemoteRuntimeService(endpoint string, t *testing.T) internalapi.RuntimeService { - runtimeService, err := NewRemoteRuntimeService(endpoint, defaultConnectionTimeout) + runtimeService, err := NewRemoteRuntimeService(endpoint, defaultConnectionTimeout, "") require.NoError(t, err) return runtimeService @@ -67,7 +69,7 @@ func TestVersion(t *testing.T) { r := createRemoteRuntimeService(endpoint, t) version, err := r.Version(apitest.FakeVersion) - assert.NoError(t, err) - assert.Equal(t, apitest.FakeVersion, version.Version) - assert.Equal(t, apitest.FakeRuntimeName, version.RuntimeName) + assert.Nil(t, version) + assert.NotNil(t, err) + assert.Equal(t, codes.Unimplemented, status.Code(err)) } diff --git a/pkg/kubelet/cri/remote/utils.go b/pkg/kubelet/cri/remote/utils.go index 02aefe585b4b..497aa4b898bd 100644 --- a/pkg/kubelet/cri/remote/utils.go +++ b/pkg/kubelet/cri/remote/utils.go @@ -21,7 +21,7 @@ import ( "fmt" "time" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" ) // maxMsgSize use 16MB as the default message size limit. @@ -39,7 +39,7 @@ func getContextWithCancel() (context.Context, context.CancelFunc) { } // verifySandboxStatus verified whether all required fields are set in PodSandboxStatus. -func verifySandboxStatus(status *runtimeapi.PodSandboxStatus) error { +func verifySandboxStatus(status *internalapi.PodSandboxStatus) error { if status.Id == "" { return fmt.Errorf("status.Id is not set") } @@ -61,7 +61,7 @@ func verifySandboxStatus(status *runtimeapi.PodSandboxStatus) error { } // verifyContainerStatus verified whether all required fields are set in ContainerStatus. -func verifyContainerStatus(status *runtimeapi.ContainerStatus) error { +func verifyContainerStatus(status *internalapi.ContainerStatus) error { if status.Id == "" { return fmt.Errorf("status.Id is not set") } diff --git a/pkg/kubelet/cri/remote/utils_test.go b/pkg/kubelet/cri/remote/utils_test.go index 0f23272426e8..d6467f98c128 100644 --- a/pkg/kubelet/cri/remote/utils_test.go +++ b/pkg/kubelet/cri/remote/utils_test.go @@ -19,12 +19,12 @@ package remote import ( "fmt" "github.com/stretchr/testify/assert" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "testing" ) -func makePodSandboxMetadata(name, namespace, uid string) *runtimeapi.PodSandboxMetadata { - return &runtimeapi.PodSandboxMetadata{ +func makePodSandboxMetadata(name, namespace, uid string) *internalapi.PodSandboxMetadata { + return &internalapi.PodSandboxMetadata{ Name: name, Namespace: namespace, Uid: uid, @@ -38,25 +38,25 @@ func TestVerifySandboxStatus(t *testing.T) { metaWithoutUid := makePodSandboxMetadata("foo", "bar", "") statuses := []struct { - input *runtimeapi.PodSandboxStatus + input *internalapi.PodSandboxStatus expected error }{ { - input: &runtimeapi.PodSandboxStatus{ + input: &internalapi.PodSandboxStatus{ CreatedAt: ct, Metadata: makePodSandboxMetadata("foo", "bar", "1"), }, expected: fmt.Errorf("status.Id is not set"), }, { - input: &runtimeapi.PodSandboxStatus{ + input: &internalapi.PodSandboxStatus{ Id: "1", CreatedAt: ct, }, expected: fmt.Errorf("status.Metadata is not set"), }, { - input: &runtimeapi.PodSandboxStatus{ + input: &internalapi.PodSandboxStatus{ Id: "2", CreatedAt: ct, Metadata: metaWithoutName, @@ -64,7 +64,7 @@ func TestVerifySandboxStatus(t *testing.T) { expected: fmt.Errorf("metadata.Name, metadata.Namespace or metadata.Uid is not in metadata %q", metaWithoutName), }, { - input: &runtimeapi.PodSandboxStatus{ + input: &internalapi.PodSandboxStatus{ Id: "3", CreatedAt: ct, Metadata: metaWithoutNamespace, @@ -72,7 +72,7 @@ func TestVerifySandboxStatus(t *testing.T) { expected: fmt.Errorf("metadata.Name, metadata.Namespace or metadata.Uid is not in metadata %q", metaWithoutNamespace), }, { - input: &runtimeapi.PodSandboxStatus{ + input: &internalapi.PodSandboxStatus{ Id: "4", CreatedAt: ct, Metadata: metaWithoutUid, @@ -80,14 +80,14 @@ func TestVerifySandboxStatus(t *testing.T) { expected: fmt.Errorf("metadata.Name, metadata.Namespace or metadata.Uid is not in metadata %q", metaWithoutUid), }, { - input: &runtimeapi.PodSandboxStatus{ + input: &internalapi.PodSandboxStatus{ Id: "5", Metadata: makePodSandboxMetadata("foo", "bar", "1"), }, expected: fmt.Errorf("status.CreatedAt is not set"), }, { - input: &runtimeapi.PodSandboxStatus{ + input: &internalapi.PodSandboxStatus{ Id: "6", CreatedAt: ct, Metadata: makePodSandboxMetadata("foo", "bar", "1"), @@ -107,41 +107,41 @@ func TestVerifySandboxStatus(t *testing.T) { } func TestVerifyContainerStatus(t *testing.T) { - meta := &runtimeapi.ContainerMetadata{Name: "cname", Attempt: 3} - metaWithoutName := &runtimeapi.ContainerMetadata{Attempt: 3} - imageSpec := &runtimeapi.ImageSpec{Image: "fimage"} - imageSpecWithoutImage := &runtimeapi.ImageSpec{} + meta := &internalapi.ContainerMetadata{Name: "cname", Attempt: 3} + metaWithoutName := &internalapi.ContainerMetadata{Attempt: 3} + imageSpec := &internalapi.ImageSpec{Image: "fimage"} + imageSpecWithoutImage := &internalapi.ImageSpec{} statuses := []struct { - input *runtimeapi.ContainerStatus + input *internalapi.ContainerStatus expected error }{ { - input: &runtimeapi.ContainerStatus{}, + input: &internalapi.ContainerStatus{}, expected: fmt.Errorf("status.Id is not set"), }, { - input: &runtimeapi.ContainerStatus{ + input: &internalapi.ContainerStatus{ Id: "1", }, expected: fmt.Errorf("status.Metadata is not set"), }, { - input: &runtimeapi.ContainerStatus{ + input: &internalapi.ContainerStatus{ Id: "2", Metadata: metaWithoutName, }, expected: fmt.Errorf("metadata.Name is not in metadata %q", metaWithoutName), }, { - input: &runtimeapi.ContainerStatus{ + input: &internalapi.ContainerStatus{ Id: "3", Metadata: meta, }, expected: fmt.Errorf("status.CreatedAt is not set"), }, { - input: &runtimeapi.ContainerStatus{ + input: &internalapi.ContainerStatus{ Id: "4", Metadata: meta, CreatedAt: 1, @@ -150,7 +150,7 @@ func TestVerifyContainerStatus(t *testing.T) { expected: fmt.Errorf("status.Image is not set"), }, { - input: &runtimeapi.ContainerStatus{ + input: &internalapi.ContainerStatus{ Id: "5", Metadata: meta, Image: imageSpec, @@ -159,7 +159,7 @@ func TestVerifyContainerStatus(t *testing.T) { expected: fmt.Errorf("status.ImageRef is not set"), }, { - input: &runtimeapi.ContainerStatus{ + input: &internalapi.ContainerStatus{ Id: "5", Metadata: meta, Image: imageSpec, diff --git a/pkg/kubelet/cri/streaming/.import-restrictions b/pkg/kubelet/cri/streaming/.import-restrictions index 10215ff92511..7fce5ac1ae7b 100644 --- a/pkg/kubelet/cri/streaming/.import-restrictions +++ b/pkg/kubelet/cri/streaming/.import-restrictions @@ -2,4 +2,5 @@ rules: # prevent exposing internal api in streaming packages - selectorRegexp: k8s[.]io/kubernetes allowedPrefixes: + - k8s.io/kubernetes/pkg/kubelet/apis/cri - k8s.io/kubernetes/pkg/kubelet/cri diff --git a/pkg/kubelet/cri/streaming/server.go b/pkg/kubelet/cri/streaming/server.go index d5cacbbf7eef..0415a568391f 100644 --- a/pkg/kubelet/cri/streaming/server.go +++ b/pkg/kubelet/cri/streaming/server.go @@ -34,7 +34,7 @@ import ( "k8s.io/apimachinery/pkg/types" remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" "k8s.io/client-go/tools/remotecommand" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward" remotecommandserver "k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand" ) @@ -45,9 +45,9 @@ type Server interface { // Get the serving URL for the requests. // Requests must not be nil. Responses may be nil iff an error is returned. - GetExec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) - GetAttach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) - GetPortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) + GetExec(*internalapi.ExecRequest) (*internalapi.ExecResponse, error) + GetAttach(req *internalapi.AttachRequest) (*internalapi.AttachResponse, error) + GetPortForward(*internalapi.PortForwardRequest) (*internalapi.PortForwardResponse, error) // Start the server. // addr is the address to serve on (address:port) stayUp indicates whether the server should @@ -161,7 +161,7 @@ type server struct { server *http.Server } -func validateExecRequest(req *runtimeapi.ExecRequest) error { +func validateExecRequest(req *internalapi.ExecRequest) error { if req.ContainerId == "" { return status.Errorf(codes.InvalidArgument, "missing required container_id") } @@ -176,7 +176,7 @@ func validateExecRequest(req *runtimeapi.ExecRequest) error { return nil } -func (s *server) GetExec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { +func (s *server) GetExec(req *internalapi.ExecRequest) (*internalapi.ExecResponse, error) { if err := validateExecRequest(req); err != nil { return nil, err } @@ -184,12 +184,12 @@ func (s *server) GetExec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, if err != nil { return nil, err } - return &runtimeapi.ExecResponse{ + return &internalapi.ExecResponse{ Url: s.buildURL("exec", token), }, nil } -func validateAttachRequest(req *runtimeapi.AttachRequest) error { +func validateAttachRequest(req *internalapi.AttachRequest) error { if req.ContainerId == "" { return status.Errorf(codes.InvalidArgument, "missing required container_id") } @@ -204,7 +204,7 @@ func validateAttachRequest(req *runtimeapi.AttachRequest) error { return nil } -func (s *server) GetAttach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { +func (s *server) GetAttach(req *internalapi.AttachRequest) (*internalapi.AttachResponse, error) { if err := validateAttachRequest(req); err != nil { return nil, err } @@ -212,12 +212,12 @@ func (s *server) GetAttach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachRes if err != nil { return nil, err } - return &runtimeapi.AttachResponse{ + return &internalapi.AttachResponse{ Url: s.buildURL("attach", token), }, nil } -func (s *server) GetPortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { +func (s *server) GetPortForward(req *internalapi.PortForwardRequest) (*internalapi.PortForwardResponse, error) { if req.PodSandboxId == "" { return nil, status.Errorf(codes.InvalidArgument, "missing required pod_sandbox_id") } @@ -225,7 +225,7 @@ func (s *server) GetPortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi if err != nil { return nil, err } - return &runtimeapi.PortForwardResponse{ + return &internalapi.PortForwardResponse{ Url: s.buildURL("portforward", token), }, nil } @@ -269,7 +269,7 @@ func (s *server) serveExec(req *restful.Request, resp *restful.Response) { http.NotFound(resp.ResponseWriter, req.Request) return } - exec, ok := cachedRequest.(*runtimeapi.ExecRequest) + exec, ok := cachedRequest.(*internalapi.ExecRequest) if !ok { http.NotFound(resp.ResponseWriter, req.Request) return @@ -303,7 +303,7 @@ func (s *server) serveAttach(req *restful.Request, resp *restful.Response) { http.NotFound(resp.ResponseWriter, req.Request) return } - attach, ok := cachedRequest.(*runtimeapi.AttachRequest) + attach, ok := cachedRequest.(*internalapi.AttachRequest) if !ok { http.NotFound(resp.ResponseWriter, req.Request) return @@ -335,7 +335,7 @@ func (s *server) servePortForward(req *restful.Request, resp *restful.Response) http.NotFound(resp.ResponseWriter, req.Request) return } - pf, ok := cachedRequest.(*runtimeapi.PortForwardRequest) + pf, ok := cachedRequest.(*internalapi.PortForwardRequest) if !ok { http.NotFound(resp.ResponseWriter, req.Request) return diff --git a/pkg/kubelet/cri/streaming/server_test.go b/pkg/kubelet/cri/streaming/server_test.go index 0ce3f50feb55..9e026a7621b8 100644 --- a/pkg/kubelet/cri/streaming/server_test.go +++ b/pkg/kubelet/cri/streaming/server_test.go @@ -34,7 +34,7 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" "k8s.io/client-go/transport/spdy" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubeletportforward "k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward" ) @@ -67,12 +67,12 @@ func TestGetExec(t *testing.T) { }, nil) assert.NoError(t, err) - assertRequestToken := func(expectedReq *runtimeapi.ExecRequest, cache *requestCache, token string) { + assertRequestToken := func(expectedReq *internalapi.ExecRequest, cache *requestCache, token string) { req, ok := cache.Consume(token) require.True(t, ok, "token %s not found!", token) assert.Equal(t, expectedReq, req) } - request := &runtimeapi.ExecRequest{ + request := &internalapi.ExecRequest{ ContainerId: testContainerID, Cmd: []string{"echo", "foo"}, Tty: true, @@ -155,7 +155,7 @@ func TestValidateExecAttachRequest(t *testing.T) { t.Run(tc.desc, func(t *testing.T) { for _, c := range tc.configs { // validate the exec request. - execReq := &runtimeapi.ExecRequest{ + execReq := &internalapi.ExecRequest{ ContainerId: testContainerID, Cmd: []string{"date"}, Tty: c.tty, @@ -167,7 +167,7 @@ func TestValidateExecAttachRequest(t *testing.T) { assert.Equal(t, tc.expectErr, err != nil, "config: %v, err: %v", c, err) // validate the attach request. - attachReq := &runtimeapi.AttachRequest{ + attachReq := &internalapi.AttachRequest{ ContainerId: testContainerID, Tty: c.tty, Stdin: c.stdin, @@ -193,13 +193,13 @@ func TestGetAttach(t *testing.T) { }, nil) require.NoError(t, err) - assertRequestToken := func(expectedReq *runtimeapi.AttachRequest, cache *requestCache, token string) { + assertRequestToken := func(expectedReq *internalapi.AttachRequest, cache *requestCache, token string) { req, ok := cache.Consume(token) require.True(t, ok, "token %s not found!", token) assert.Equal(t, expectedReq, req) } - request := &runtimeapi.AttachRequest{ + request := &internalapi.AttachRequest{ ContainerId: testContainerID, Stdin: true, Tty: true, @@ -225,7 +225,7 @@ func TestGetAttach(t *testing.T) { func TestGetPortForward(t *testing.T) { podSandboxID := testPodSandboxID - request := &runtimeapi.PortForwardRequest{ + request := &internalapi.PortForwardRequest{ PodSandboxId: podSandboxID, Port: []int32{1, 2, 3, 4}, } @@ -242,7 +242,7 @@ func TestGetPortForward(t *testing.T) { token := strings.TrimPrefix(resp.Url, expectedURL) req, ok := serv.(*server).cache.Consume(token) require.True(t, ok, "token %s not found!", token) - assert.Equal(t, testPodSandboxID, req.(*runtimeapi.PortForwardRequest).PodSandboxId) + assert.Equal(t, testPodSandboxID, req.(*internalapi.PortForwardRequest).PodSandboxId) } { // TLS @@ -258,7 +258,7 @@ func TestGetPortForward(t *testing.T) { token := strings.TrimPrefix(resp.Url, expectedURL) req, ok := tlsServer.(*server).cache.Consume(token) require.True(t, ok, "token %s not found!", token) - assert.Equal(t, testPodSandboxID, req.(*runtimeapi.PortForwardRequest).PodSandboxId) + assert.Equal(t, testPodSandboxID, req.(*internalapi.PortForwardRequest).PodSandboxId) } } @@ -274,7 +274,7 @@ func TestServePortForward(t *testing.T) { s, testServer := startTestServer(t) defer testServer.Close() - resp, err := s.GetPortForward(&runtimeapi.PortForwardRequest{ + resp, err := s.GetPortForward(&internalapi.PortForwardRequest{ PodSandboxId: testPodSandboxID, }) require.NoError(t, err) @@ -316,7 +316,7 @@ func runRemoteCommandTest(t *testing.T, commandType string) { containerID := testContainerID switch commandType { case "exec": - resp, err := s.GetExec(&runtimeapi.ExecRequest{ + resp, err := s.GetExec(&internalapi.ExecRequest{ ContainerId: containerID, Cmd: []string{"echo"}, Stdin: stdin, @@ -327,7 +327,7 @@ func runRemoteCommandTest(t *testing.T, commandType string) { reqURL, err = url.Parse(resp.Url) require.NoError(t, err) case "attach": - resp, err := s.GetAttach(&runtimeapi.AttachRequest{ + resp, err := s.GetAttach(&internalapi.AttachRequest{ ContainerId: containerID, Stdin: stdin, Stdout: stdout, diff --git a/pkg/kubelet/dockershim/docker_service.go b/pkg/kubelet/dockershim/docker_service.go index b364a19152c5..7059de1508a8 100644 --- a/pkg/kubelet/dockershim/docker_service.go +++ b/pkg/kubelet/dockershim/docker_service.go @@ -40,12 +40,12 @@ import ( "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/cri/streaming" "k8s.io/kubernetes/pkg/kubelet/dockershim/cm" "k8s.io/kubernetes/pkg/kubelet/dockershim/network" "k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni" "k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport" "k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet" + "k8s.io/kubernetes/pkg/kubelet/dockershim/streaming" "k8s.io/kubernetes/pkg/kubelet/legacy" "k8s.io/kubernetes/pkg/kubelet/util/cache" diff --git a/pkg/kubelet/dockershim/docker_streaming.go b/pkg/kubelet/dockershim/docker_streaming.go index 0320fa8977ef..2def35c5b3e1 100644 --- a/pkg/kubelet/dockershim/docker_streaming.go +++ b/pkg/kubelet/dockershim/docker_streaming.go @@ -35,8 +35,8 @@ import ( "k8s.io/client-go/tools/remotecommand" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/cri/streaming" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" + "k8s.io/kubernetes/pkg/kubelet/dockershim/streaming" "k8s.io/kubernetes/pkg/kubelet/util/ioutils" utilexec "k8s.io/utils/exec" ) diff --git a/pkg/kubelet/dockershim/streaming/errors.go b/pkg/kubelet/dockershim/streaming/errors.go new file mode 100644 index 000000000000..2504f63df461 --- /dev/null +++ b/pkg/kubelet/dockershim/streaming/errors.go @@ -0,0 +1,59 @@ +//go:build !dockerless +// +build !dockerless + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package streaming + +import ( + "net/http" + "strconv" + + "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" +) + +// NewErrorStreamingDisabled creates an error for disabled streaming method. +func NewErrorStreamingDisabled(method string) error { + return grpcstatus.Errorf(codes.NotFound, "streaming method %s disabled", method) +} + +// NewErrorTooManyInFlight creates an error for exceeding the maximum number of in-flight requests. +func NewErrorTooManyInFlight() error { + return grpcstatus.Error(codes.ResourceExhausted, "maximum number of in-flight requests exceeded") +} + +// WriteError translates a CRI streaming error into an appropriate HTTP response. +func WriteError(err error, w http.ResponseWriter) error { + s, _ := grpcstatus.FromError(err) + var status int + switch s.Code() { + case codes.NotFound: + status = http.StatusNotFound + case codes.ResourceExhausted: + // We only expect to hit this if there is a DoS, so we just wait the full TTL. + // If this is ever hit in steady-state operations, consider increasing the maxInFlight requests, + // or plumbing through the time to next expiration. + w.Header().Set("Retry-After", strconv.Itoa(int(cacheTTL.Seconds()))) + status = http.StatusTooManyRequests + default: + status = http.StatusInternalServerError + } + w.WriteHeader(status) + _, writeErr := w.Write([]byte(err.Error())) + return writeErr +} diff --git a/pkg/kubelet/dockershim/streaming/request_cache.go b/pkg/kubelet/dockershim/streaming/request_cache.go new file mode 100644 index 000000000000..07b7406d4131 --- /dev/null +++ b/pkg/kubelet/dockershim/streaming/request_cache.go @@ -0,0 +1,149 @@ +//go:build !dockerless +// +build !dockerless + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package streaming + +import ( + "container/list" + "crypto/rand" + "encoding/base64" + "fmt" + "math" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/clock" +) + +var ( + // cacheTTL is the timeout after which tokens become invalid. + cacheTTL = 1 * time.Minute + // maxInFlight is the maximum number of in-flight requests to allow. + maxInFlight = 1000 + // tokenLen is the length of the random base64 encoded token identifying the request. + tokenLen = 8 +) + +// requestCache caches streaming (exec/attach/port-forward) requests and generates a single-use +// random token for their retrieval. The requestCache is used for building streaming URLs without +// the need to encode every request parameter in the URL. +type requestCache struct { + // clock is used to obtain the current time + clock clock.Clock + + // tokens maps the generate token to the request for fast retrieval. + tokens map[string]*list.Element + // ll maintains an age-ordered request list for faster garbage collection of expired requests. + ll *list.List + + lock sync.Mutex +} + +// Type representing an *ExecRequest, *AttachRequest, or *PortForwardRequest. +type request interface{} + +type cacheEntry struct { + token string + req request + expireTime time.Time +} + +func newRequestCache() *requestCache { + return &requestCache{ + clock: clock.RealClock{}, + ll: list.New(), + tokens: make(map[string]*list.Element), + } +} + +// Insert the given request into the cache and returns the token used for fetching it out. +func (c *requestCache) Insert(req request) (token string, err error) { + c.lock.Lock() + defer c.lock.Unlock() + + // Remove expired entries. + c.gc() + // If the cache is full, reject the request. + if c.ll.Len() == maxInFlight { + return "", NewErrorTooManyInFlight() + } + token, err = c.uniqueToken() + if err != nil { + return "", err + } + ele := c.ll.PushFront(&cacheEntry{token, req, c.clock.Now().Add(cacheTTL)}) + + c.tokens[token] = ele + return token, nil +} + +// Consume the token (remove it from the cache) and return the cached request, if found. +func (c *requestCache) Consume(token string) (req request, found bool) { + c.lock.Lock() + defer c.lock.Unlock() + ele, ok := c.tokens[token] + if !ok { + return nil, false + } + c.ll.Remove(ele) + delete(c.tokens, token) + + entry := ele.Value.(*cacheEntry) + if c.clock.Now().After(entry.expireTime) { + // Entry already expired. + return nil, false + } + return entry.req, true +} + +// uniqueToken generates a random URL-safe token and ensures uniqueness. +func (c *requestCache) uniqueToken() (string, error) { + const maxTries = 10 + // Number of bytes to be tokenLen when base64 encoded. + tokenSize := math.Ceil(float64(tokenLen) * 6 / 8) + rawToken := make([]byte, int(tokenSize)) + for i := 0; i < maxTries; i++ { + if _, err := rand.Read(rawToken); err != nil { + return "", err + } + encoded := base64.RawURLEncoding.EncodeToString(rawToken) + token := encoded[:tokenLen] + // If it's unique, return it. Otherwise retry. + if _, exists := c.tokens[encoded]; !exists { + return token, nil + } + } + return "", fmt.Errorf("failed to generate unique token") +} + +// Must be write-locked prior to calling. +func (c *requestCache) gc() { + now := c.clock.Now() + for c.ll.Len() > 0 { + oldest := c.ll.Back() + entry := oldest.Value.(*cacheEntry) + if !now.After(entry.expireTime) { + return + } + + // Oldest value is expired; remove it. + c.ll.Remove(oldest) + delete(c.tokens, entry.token) + } +} diff --git a/pkg/kubelet/dockershim/streaming/server.go b/pkg/kubelet/dockershim/streaming/server.go new file mode 100644 index 000000000000..452d29d8436c --- /dev/null +++ b/pkg/kubelet/dockershim/streaming/server.go @@ -0,0 +1,385 @@ +//go:build !dockerless +// +build !dockerless + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package streaming + +import ( + "crypto/tls" + "errors" + "io" + "net" + "net/http" + "net/url" + "path" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + restful "github.com/emicklei/go-restful" + + "k8s.io/apimachinery/pkg/types" + remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" + "k8s.io/client-go/tools/remotecommand" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + "k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward" + remotecommandserver "k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand" +) + +// Server is the library interface to serve the stream requests. +type Server interface { + http.Handler + + // Get the serving URL for the requests. + // Requests must not be nil. Responses may be nil iff an error is returned. + GetExec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) + GetAttach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) + GetPortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) + + // Start the server. + // addr is the address to serve on (address:port) stayUp indicates whether the server should + // listen until Stop() is called, or automatically stop after all expected connections are + // closed. Calling Get{Exec,Attach,PortForward} increments the expected connection count. + // Function does not return until the server is stopped. + Start(stayUp bool) error + // Stop the server, and terminate any open connections. + Stop() error +} + +// Runtime is the interface to execute the commands and provide the streams. +type Runtime interface { + Exec(containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error + Attach(containerID string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error + PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error +} + +// Config defines the options used for running the stream server. +type Config struct { + // The host:port address the server will listen on. + Addr string + // The optional base URL for constructing streaming URLs. If empty, the baseURL will be + // constructed from the serve address. + // Note that for port "0", the URL port will be set to actual port in use. + BaseURL *url.URL + + // How long to leave idle connections open for. + StreamIdleTimeout time.Duration + // How long to wait for clients to create streams. Only used for SPDY streaming. + StreamCreationTimeout time.Duration + + // The streaming protocols the server supports (understands and permits). See + // k8s.io/kubernetes/pkg/kubelet/server/remotecommand/constants.go for available protocols. + // Only used for SPDY streaming. + SupportedRemoteCommandProtocols []string + + // The streaming protocols the server supports (understands and permits). See + // k8s.io/kubernetes/pkg/kubelet/server/portforward/constants.go for available protocols. + // Only used for SPDY streaming. + SupportedPortForwardProtocols []string + + // The config for serving over TLS. If nil, TLS will not be used. + TLSConfig *tls.Config +} + +// DefaultConfig provides default values for server Config. The DefaultConfig is partial, so +// some fields like Addr must still be provided. +var DefaultConfig = Config{ + StreamIdleTimeout: 4 * time.Hour, + StreamCreationTimeout: remotecommandconsts.DefaultStreamCreationTimeout, + SupportedRemoteCommandProtocols: remotecommandconsts.SupportedStreamingProtocols, + SupportedPortForwardProtocols: portforward.SupportedProtocols, +} + +// NewServer creates a new Server for stream requests. +// TODO(tallclair): Add auth(n/z) interface & handling. +func NewServer(config Config, runtime Runtime) (Server, error) { + s := &server{ + config: config, + runtime: &criAdapter{runtime}, + cache: newRequestCache(), + } + + if s.config.BaseURL == nil { + s.config.BaseURL = &url.URL{ + Scheme: "http", + Host: s.config.Addr, + } + if s.config.TLSConfig != nil { + s.config.BaseURL.Scheme = "https" + } + } + + ws := &restful.WebService{} + endpoints := []struct { + path string + handler restful.RouteFunction + }{ + {"/exec/{token}", s.serveExec}, + {"/attach/{token}", s.serveAttach}, + {"/portforward/{token}", s.servePortForward}, + } + // If serving relative to a base path, set that here. + pathPrefix := path.Dir(s.config.BaseURL.Path) + for _, e := range endpoints { + for _, method := range []string{"GET", "POST"} { + ws.Route(ws. + Method(method). + Path(path.Join(pathPrefix, e.path)). + To(e.handler)) + } + } + handler := restful.NewContainer() + handler.Add(ws) + s.handler = handler + s.server = &http.Server{ + Addr: s.config.Addr, + Handler: s.handler, + TLSConfig: s.config.TLSConfig, + } + + return s, nil +} + +type server struct { + config Config + runtime *criAdapter + handler http.Handler + cache *requestCache + server *http.Server +} + +func validateExecRequest(req *runtimeapi.ExecRequest) error { + if req.ContainerId == "" { + return status.Errorf(codes.InvalidArgument, "missing required container_id") + } + if req.Tty && req.Stderr { + // If TTY is set, stderr cannot be true because multiplexing is not + // supported. + return status.Errorf(codes.InvalidArgument, "tty and stderr cannot both be true") + } + if !req.Stdin && !req.Stdout && !req.Stderr { + return status.Errorf(codes.InvalidArgument, "one of stdin, stdout, or stderr must be set") + } + return nil +} + +func (s *server) GetExec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { + if err := validateExecRequest(req); err != nil { + return nil, err + } + token, err := s.cache.Insert(req) + if err != nil { + return nil, err + } + return &runtimeapi.ExecResponse{ + Url: s.buildURL("exec", token), + }, nil +} + +func validateAttachRequest(req *runtimeapi.AttachRequest) error { + if req.ContainerId == "" { + return status.Errorf(codes.InvalidArgument, "missing required container_id") + } + if req.Tty && req.Stderr { + // If TTY is set, stderr cannot be true because multiplexing is not + // supported. + return status.Errorf(codes.InvalidArgument, "tty and stderr cannot both be true") + } + if !req.Stdin && !req.Stdout && !req.Stderr { + return status.Errorf(codes.InvalidArgument, "one of stdin, stdout, and stderr must be set") + } + return nil +} + +func (s *server) GetAttach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { + if err := validateAttachRequest(req); err != nil { + return nil, err + } + token, err := s.cache.Insert(req) + if err != nil { + return nil, err + } + return &runtimeapi.AttachResponse{ + Url: s.buildURL("attach", token), + }, nil +} + +func (s *server) GetPortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { + if req.PodSandboxId == "" { + return nil, status.Errorf(codes.InvalidArgument, "missing required pod_sandbox_id") + } + token, err := s.cache.Insert(req) + if err != nil { + return nil, err + } + return &runtimeapi.PortForwardResponse{ + Url: s.buildURL("portforward", token), + }, nil +} + +func (s *server) Start(stayUp bool) error { + if !stayUp { + // TODO(tallclair): Implement this. + return errors.New("stayUp=false is not yet implemented") + } + + listener, err := net.Listen("tcp", s.config.Addr) + if err != nil { + return err + } + // Use the actual address as baseURL host. This handles the "0" port case. + s.config.BaseURL.Host = listener.Addr().String() + if s.config.TLSConfig != nil { + return s.server.ServeTLS(listener, "", "") // Use certs from TLSConfig. + } + return s.server.Serve(listener) +} + +func (s *server) Stop() error { + return s.server.Close() +} + +func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.handler.ServeHTTP(w, r) +} + +func (s *server) buildURL(method, token string) string { + return s.config.BaseURL.ResolveReference(&url.URL{ + Path: path.Join(method, token), + }).String() +} + +func (s *server) serveExec(req *restful.Request, resp *restful.Response) { + token := req.PathParameter("token") + cachedRequest, ok := s.cache.Consume(token) + if !ok { + http.NotFound(resp.ResponseWriter, req.Request) + return + } + exec, ok := cachedRequest.(*runtimeapi.ExecRequest) + if !ok { + http.NotFound(resp.ResponseWriter, req.Request) + return + } + + streamOpts := &remotecommandserver.Options{ + Stdin: exec.Stdin, + Stdout: exec.Stdout, + Stderr: exec.Stderr, + TTY: exec.Tty, + } + + remotecommandserver.ServeExec( + resp.ResponseWriter, + req.Request, + s.runtime, + "", // unused: podName + "", // unusued: podUID + exec.ContainerId, + exec.Cmd, + streamOpts, + s.config.StreamIdleTimeout, + s.config.StreamCreationTimeout, + s.config.SupportedRemoteCommandProtocols) +} + +func (s *server) serveAttach(req *restful.Request, resp *restful.Response) { + token := req.PathParameter("token") + cachedRequest, ok := s.cache.Consume(token) + if !ok { + http.NotFound(resp.ResponseWriter, req.Request) + return + } + attach, ok := cachedRequest.(*runtimeapi.AttachRequest) + if !ok { + http.NotFound(resp.ResponseWriter, req.Request) + return + } + + streamOpts := &remotecommandserver.Options{ + Stdin: attach.Stdin, + Stdout: attach.Stdout, + Stderr: attach.Stderr, + TTY: attach.Tty, + } + remotecommandserver.ServeAttach( + resp.ResponseWriter, + req.Request, + s.runtime, + "", // unused: podName + "", // unusued: podUID + attach.ContainerId, + streamOpts, + s.config.StreamIdleTimeout, + s.config.StreamCreationTimeout, + s.config.SupportedRemoteCommandProtocols) +} + +func (s *server) servePortForward(req *restful.Request, resp *restful.Response) { + token := req.PathParameter("token") + cachedRequest, ok := s.cache.Consume(token) + if !ok { + http.NotFound(resp.ResponseWriter, req.Request) + return + } + pf, ok := cachedRequest.(*runtimeapi.PortForwardRequest) + if !ok { + http.NotFound(resp.ResponseWriter, req.Request) + return + } + + portForwardOptions, err := portforward.BuildV4Options(pf.Port) + if err != nil { + resp.WriteError(http.StatusBadRequest, err) + return + } + + portforward.ServePortForward( + resp.ResponseWriter, + req.Request, + s.runtime, + pf.PodSandboxId, + "", // unused: podUID + portForwardOptions, + s.config.StreamIdleTimeout, + s.config.StreamCreationTimeout, + s.config.SupportedPortForwardProtocols) +} + +// criAdapter wraps the Runtime functions to conform to the remotecommand interfaces. +// The adapter binds the container ID to the container name argument, and the pod sandbox ID to the pod name. +type criAdapter struct { + Runtime +} + +var _ remotecommandserver.Executor = &criAdapter{} +var _ remotecommandserver.Attacher = &criAdapter{} +var _ portforward.PortForwarder = &criAdapter{} + +func (a *criAdapter) ExecInContainer(podName string, podUID types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { + return a.Runtime.Exec(container, cmd, in, out, err, tty, resize) +} + +func (a *criAdapter) AttachContainer(podName string, podUID types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { + return a.Runtime.Attach(container, in, out, err, tty, resize) +} + +func (a *criAdapter) PortForward(podName string, podUID types.UID, port int32, stream io.ReadWriteCloser) error { + return a.Runtime.PortForward(podName, port, stream) +} diff --git a/pkg/kubelet/images/helpers.go b/pkg/kubelet/images/helpers.go index ad6f560d78b6..aab231ffa21e 100644 --- a/pkg/kubelet/images/helpers.go +++ b/pkg/kubelet/images/helpers.go @@ -21,7 +21,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/client-go/util/flowcontrol" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -43,7 +43,7 @@ type throttledImageService struct { limiter flowcontrol.RateLimiter } -func (ts throttledImageService) PullImage(image kubecontainer.ImageSpec, secrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (ts throttledImageService) PullImage(image kubecontainer.ImageSpec, secrets []v1.Secret, podSandboxConfig *internalapi.PodSandboxConfig) (string, error) { if ts.limiter.TryAccept() { return ts.ImageService.PullImage(image, secrets, podSandboxConfig) } diff --git a/pkg/kubelet/images/image_manager.go b/pkg/kubelet/images/image_manager.go index 6d8b5f334294..64a9efffd825 100644 --- a/pkg/kubelet/images/image_manager.go +++ b/pkg/kubelet/images/image_manager.go @@ -26,7 +26,7 @@ import ( "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" ) @@ -86,7 +86,7 @@ func (m *imageManager) logIt(ref *v1.ObjectReference, eventtype, event, prefix, // EnsureImageExists pulls the image for the specified pod and container, and returns // (imageRef, error message, error). -func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, string, error) { +func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *internalapi.PodSandboxConfig) (string, string, error) { logPrefix := fmt.Sprintf("%s/%s/%s", pod.Namespace, pod.Name, container.Image) ref, err := kubecontainer.GenerateContainerRef(pod, container) if err != nil { diff --git a/pkg/kubelet/images/puller.go b/pkg/kubelet/images/puller.go index 5ff842411a76..fbbf414ac904 100644 --- a/pkg/kubelet/images/puller.go +++ b/pkg/kubelet/images/puller.go @@ -21,7 +21,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -31,7 +31,7 @@ type pullResult struct { } type imagePuller interface { - pullImage(kubecontainer.ImageSpec, []v1.Secret, chan<- pullResult, *runtimeapi.PodSandboxConfig) + pullImage(kubecontainer.ImageSpec, []v1.Secret, chan<- pullResult, *internalapi.PodSandboxConfig) } var _, _ imagePuller = ¶llelImagePuller{}, &serialImagePuller{} @@ -44,7 +44,7 @@ func newParallelImagePuller(imageService kubecontainer.ImageService) imagePuller return ¶llelImagePuller{imageService} } -func (pip *parallelImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) { +func (pip *parallelImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *internalapi.PodSandboxConfig) { go func() { imageRef, err := pip.imageService.PullImage(spec, pullSecrets, podSandboxConfig) pullChan <- pullResult{ @@ -72,10 +72,10 @@ type imagePullRequest struct { spec kubecontainer.ImageSpec pullSecrets []v1.Secret pullChan chan<- pullResult - podSandboxConfig *runtimeapi.PodSandboxConfig + podSandboxConfig *internalapi.PodSandboxConfig } -func (sip *serialImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) { +func (sip *serialImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *internalapi.PodSandboxConfig) { sip.pullRequests <- &imagePullRequest{ spec: spec, pullSecrets: pullSecrets, diff --git a/pkg/kubelet/images/types.go b/pkg/kubelet/images/types.go index 8af7bfddcfbe..d2162f467b04 100644 --- a/pkg/kubelet/images/types.go +++ b/pkg/kubelet/images/types.go @@ -20,7 +20,7 @@ import ( "errors" "k8s.io/api/core/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" ) var ( @@ -50,7 +50,7 @@ var ( // Implementations are expected to be thread safe. type ImageManager interface { // EnsureImageExists ensures that image specified in `container` exists. - EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, string, error) + EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *internalapi.PodSandboxConfig) (string, string, error) // TODO(ronl): consolidating image managing and deleting operation in this interface } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index f174235d262b..46d82f7f436e 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -58,13 +58,13 @@ import ( "k8s.io/client-go/util/flowcontrol" cloudprovider "k8s.io/cloud-provider" "k8s.io/component-helpers/apimachinery/lease" - internalapi "k8s.io/cri-api/pkg/apis" "k8s.io/klog/v2" pluginwatcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1" statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/features" kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/apis/podresources" "k8s.io/kubernetes/pkg/kubelet/cadvisor" kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate" @@ -74,7 +74,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/configmap" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/cri/remote" - "k8s.io/kubernetes/pkg/kubelet/cri/streaming" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/eviction" "k8s.io/kubernetes/pkg/kubelet/images" @@ -331,10 +330,10 @@ func PreInitRuntimeService(kubeCfg *kubeletconfiginternal.KubeletConfiguration, } var err error - if kubeDeps.RemoteRuntimeService, err = remote.NewRemoteRuntimeService(remoteRuntimeEndpoint, kubeCfg.RuntimeRequestTimeout.Duration); err != nil { + if kubeDeps.RemoteRuntimeService, err = remote.NewRemoteRuntimeService(remoteRuntimeEndpoint, kubeCfg.RuntimeRequestTimeout.Duration, internalapi.APIVersion(crOptions.CRIVersion)); err != nil { return err } - if kubeDeps.RemoteImageService, err = remote.NewRemoteImageService(remoteImageEndpoint, kubeCfg.RuntimeRequestTimeout.Duration); err != nil { + if kubeDeps.RemoteImageService, err = remote.NewRemoteImageService(remoteImageEndpoint, kubeCfg.RuntimeRequestTimeout.Duration, kubeDeps.RemoteRuntimeService.APIVersion()); err != nil { return err } @@ -2439,15 +2438,3 @@ func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool { // ContainerRemoved doesn't affect pod state return event.Type != pleg.ContainerRemoved } - -// Gets the streaming server configuration to use with in-process CRI shims. -func getStreamingConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, crOptions *config.ContainerRuntimeOptions) *streaming.Config { - config := &streaming.Config{ - StreamIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration, - StreamCreationTimeout: streaming.DefaultConfig.StreamCreationTimeout, - SupportedRemoteCommandProtocols: streaming.DefaultConfig.SupportedRemoteCommandProtocols, - SupportedPortForwardProtocols: streaming.DefaultConfig.SupportedPortForwardProtocols, - } - config.Addr = net.JoinHostPort("localhost", "0") - return config -} diff --git a/pkg/kubelet/kubelet_dockershim.go b/pkg/kubelet/kubelet_dockershim.go index edc486949e84..b9287701de84 100644 --- a/pkg/kubelet/kubelet_dockershim.go +++ b/pkg/kubelet/kubelet_dockershim.go @@ -20,12 +20,16 @@ limitations under the License. package kubelet import ( + "net" + "k8s.io/klog/v2" kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/pkg/kubelet/config" + "k8s.io/kubernetes/pkg/kubelet/cri/streaming" "k8s.io/kubernetes/pkg/kubelet/dockershim" dockerremote "k8s.io/kubernetes/pkg/kubelet/dockershim/remote" + dockerstream "k8s.io/kubernetes/pkg/kubelet/dockershim/streaming" ) func runDockershim(kubeCfg *kubeletconfiginternal.KubeletConfiguration, @@ -78,3 +82,15 @@ func runDockershim(kubeCfg *kubeletconfiginternal.KubeletConfiguration, return nil } + +// Gets the streaming server configuration to use with in-process CRI shims. +func getStreamingConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, crOptions *config.ContainerRuntimeOptions) *dockerstream.Config { + config := &dockerstream.Config{ + StreamIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration, + StreamCreationTimeout: streaming.DefaultConfig.StreamCreationTimeout, + SupportedRemoteCommandProtocols: streaming.DefaultConfig.SupportedRemoteCommandProtocols, + SupportedPortForwardProtocols: streaming.DefaultConfig.SupportedPortForwardProtocols, + } + config.Addr = net.JoinHostPort("localhost", "0") + return config +} diff --git a/pkg/kubelet/kubelet_network.go b/pkg/kubelet/kubelet_network.go index ae030a48dbb6..41fdb41e0928 100644 --- a/pkg/kubelet/kubelet_network.go +++ b/pkg/kubelet/kubelet_network.go @@ -20,8 +20,8 @@ import ( "fmt" "k8s.io/api/core/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog/v2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" utiliptables "k8s.io/kubernetes/pkg/util/iptables" ) @@ -81,6 +81,6 @@ func (kl *Kubelet) updatePodCIDR(cidr string) (bool, error) { // GetPodDNS returns DNS settings for the pod. // This function is defined in kubecontainer.RuntimeHelper interface so we // have to implement it. -func (kl *Kubelet) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) { +func (kl *Kubelet) GetPodDNS(pod *v1.Pod) (*internalapi.DNSConfig, error) { return kl.dnsConfigurer.GetPodDNS(pod) } diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 0917f2bf8730..57641f513510 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -40,7 +40,6 @@ import ( utilvalidation "k8s.io/apimachinery/pkg/util/validation" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-helpers/storage/ephemeral" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog/v2" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/api/v1/resource" @@ -49,6 +48,7 @@ import ( v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/fieldpath" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward" @@ -279,24 +279,24 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h } // translateMountPropagation transforms v1.MountPropagationMode to -// runtimeapi.MountPropagation. -func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.MountPropagation, error) { +// internalapi.MountPropagation. +func translateMountPropagation(mountMode *v1.MountPropagationMode) (internalapi.MountPropagation, error) { if runtime.GOOS == "windows" { // Windows containers doesn't support mount propagation, use private for it. // Refer https://docs.docker.com/storage/bind-mounts/#configure-bind-propagation. - return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil + return internalapi.MountPropagation_PROPAGATION_PRIVATE, nil } switch { case mountMode == nil: // PRIVATE is the default - return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil + return internalapi.MountPropagation_PROPAGATION_PRIVATE, nil case *mountMode == v1.MountPropagationHostToContainer: - return runtimeapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER, nil + return internalapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER, nil case *mountMode == v1.MountPropagationBidirectional: - return runtimeapi.MountPropagation_PROPAGATION_BIDIRECTIONAL, nil + return internalapi.MountPropagation_PROPAGATION_BIDIRECTIONAL, nil case *mountMode == v1.MountPropagationNone: - return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil + return internalapi.MountPropagation_PROPAGATION_PRIVATE, nil default: return 0, fmt.Errorf("invalid MountPropagation mode: %q", *mountMode) } diff --git a/pkg/kubelet/kubelet_pods_linux_test.go b/pkg/kubelet/kubelet_pods_linux_test.go index 4d4d69fb6e0e..f3b4d56e4c07 100644 --- a/pkg/kubelet/kubelet_pods_linux_test.go +++ b/pkg/kubelet/kubelet_pods_linux_test.go @@ -25,8 +25,8 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/api/core/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" _ "k8s.io/kubernetes/pkg/apis/core/install" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" volumetest "k8s.io/kubernetes/pkg/volume/testing" "k8s.io/kubernetes/pkg/volume/util/hostutil" @@ -86,7 +86,7 @@ func TestMakeMounts(t *testing.T) { HostPath: "/mnt/disk", ReadOnly: false, SELinuxRelabel: false, - Propagation: runtimeapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER, + Propagation: internalapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER, }, { Name: "disk", @@ -94,7 +94,7 @@ func TestMakeMounts(t *testing.T) { HostPath: "/mnt/disk", ReadOnly: true, SELinuxRelabel: false, - Propagation: runtimeapi.MountPropagation_PROPAGATION_PRIVATE, + Propagation: internalapi.MountPropagation_PROPAGATION_PRIVATE, }, { Name: "disk4", @@ -102,7 +102,7 @@ func TestMakeMounts(t *testing.T) { HostPath: "/mnt/host", ReadOnly: false, SELinuxRelabel: false, - Propagation: runtimeapi.MountPropagation_PROPAGATION_PRIVATE, + Propagation: internalapi.MountPropagation_PROPAGATION_PRIVATE, }, { Name: "disk5", @@ -110,7 +110,7 @@ func TestMakeMounts(t *testing.T) { HostPath: "/var/lib/kubelet/podID/volumes/empty/disk5", ReadOnly: false, SELinuxRelabel: false, - Propagation: runtimeapi.MountPropagation_PROPAGATION_PRIVATE, + Propagation: internalapi.MountPropagation_PROPAGATION_PRIVATE, }, }, expectErr: false, @@ -153,7 +153,7 @@ func TestMakeMounts(t *testing.T) { HostPath: "/mnt/disk", ReadOnly: false, SELinuxRelabel: false, - Propagation: runtimeapi.MountPropagation_PROPAGATION_BIDIRECTIONAL, + Propagation: internalapi.MountPropagation_PROPAGATION_BIDIRECTIONAL, }, { Name: "disk", @@ -161,7 +161,7 @@ func TestMakeMounts(t *testing.T) { HostPath: "/mnt/disk", ReadOnly: true, SELinuxRelabel: false, - Propagation: runtimeapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER, + Propagation: internalapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER, }, { Name: "disk4", @@ -169,7 +169,7 @@ func TestMakeMounts(t *testing.T) { HostPath: "/mnt/host", ReadOnly: false, SELinuxRelabel: false, - Propagation: runtimeapi.MountPropagation_PROPAGATION_PRIVATE, + Propagation: internalapi.MountPropagation_PROPAGATION_PRIVATE, }, }, expectErr: false, diff --git a/pkg/kubelet/kuberuntime/convert.go b/pkg/kubelet/kuberuntime/convert.go index 6b80477ce481..59038e1af2ce 100644 --- a/pkg/kubelet/kuberuntime/convert.go +++ b/pkg/kubelet/kuberuntime/convert.go @@ -19,13 +19,13 @@ package kuberuntime import ( "sort" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) // This file contains help function to kuberuntime types to CRI runtime API types, or vice versa. -func toKubeContainerImageSpec(image *runtimeapi.Image) kubecontainer.ImageSpec { +func toKubeContainerImageSpec(image *internalapi.Image) kubecontainer.ImageSpec { var annotations []kubecontainer.Annotation if image.Spec != nil && len(image.Spec.Annotations) > 0 { @@ -48,14 +48,14 @@ func toKubeContainerImageSpec(image *runtimeapi.Image) kubecontainer.ImageSpec { } } -func toRuntimeAPIImageSpec(imageSpec kubecontainer.ImageSpec) *runtimeapi.ImageSpec { +func toRuntimeAPIImageSpec(imageSpec kubecontainer.ImageSpec) *internalapi.ImageSpec { var annotations = make(map[string]string) if imageSpec.Annotations != nil { for _, a := range imageSpec.Annotations { annotations[a.Name] = a.Value } } - return &runtimeapi.ImageSpec{ + return &internalapi.ImageSpec{ Image: imageSpec.Image, Annotations: annotations, } diff --git a/pkg/kubelet/kuberuntime/convert_test.go b/pkg/kubelet/kuberuntime/convert_test.go index 7e2e95d2d85d..b5eba843f9a1 100644 --- a/pkg/kubelet/kuberuntime/convert_test.go +++ b/pkg/kubelet/kuberuntime/convert_test.go @@ -21,17 +21,17 @@ import ( "github.com/stretchr/testify/assert" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) func TestConvertToKubeContainerImageSpec(t *testing.T) { testCases := []struct { - input *runtimeapi.Image + input *internalapi.Image expected kubecontainer.ImageSpec }{ { - input: &runtimeapi.Image{ + input: &internalapi.Image{ Id: "test", Spec: nil, }, @@ -41,9 +41,9 @@ func TestConvertToKubeContainerImageSpec(t *testing.T) { }, }, { - input: &runtimeapi.Image{ + input: &internalapi.Image{ Id: "test", - Spec: &runtimeapi.ImageSpec{ + Spec: &internalapi.ImageSpec{ Annotations: nil, }, }, @@ -53,9 +53,9 @@ func TestConvertToKubeContainerImageSpec(t *testing.T) { }, }, { - input: &runtimeapi.Image{ + input: &internalapi.Image{ Id: "test", - Spec: &runtimeapi.ImageSpec{ + Spec: &internalapi.ImageSpec{ Annotations: map[string]string{}, }, }, @@ -65,9 +65,9 @@ func TestConvertToKubeContainerImageSpec(t *testing.T) { }, }, { - input: &runtimeapi.Image{ + input: &internalapi.Image{ Id: "test", - Spec: &runtimeapi.ImageSpec{ + Spec: &internalapi.ImageSpec{ Annotations: map[string]string{ "kubernetes.io/os": "linux", "kubernetes.io/runtimehandler": "handler", @@ -99,14 +99,14 @@ func TestConvertToKubeContainerImageSpec(t *testing.T) { func TestConvertToRuntimeAPIImageSpec(t *testing.T) { testCases := []struct { input kubecontainer.ImageSpec - expected *runtimeapi.ImageSpec + expected *internalapi.ImageSpec }{ { input: kubecontainer.ImageSpec{ Image: "test", Annotations: nil, }, - expected: &runtimeapi.ImageSpec{ + expected: &internalapi.ImageSpec{ Image: "test", Annotations: map[string]string{}, }, @@ -116,7 +116,7 @@ func TestConvertToRuntimeAPIImageSpec(t *testing.T) { Image: "test", Annotations: []kubecontainer.Annotation{}, }, - expected: &runtimeapi.ImageSpec{ + expected: &internalapi.ImageSpec{ Image: "test", Annotations: map[string]string{}, }, @@ -135,7 +135,7 @@ func TestConvertToRuntimeAPIImageSpec(t *testing.T) { }, }, }, - expected: &runtimeapi.ImageSpec{ + expected: &internalapi.ImageSpec{ Image: "test", Annotations: map[string]string{ "kubernetes.io/os": "linux", diff --git a/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go b/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go index e62866eb1b72..df2ebff2a62a 100644 --- a/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go @@ -28,8 +28,8 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" "k8s.io/component-base/logs/logreduction" - internalapi "k8s.io/cri-api/pkg/apis" "k8s.io/kubernetes/pkg/credentialprovider" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/images" diff --git a/pkg/kubelet/kuberuntime/helpers.go b/pkg/kubelet/kuberuntime/helpers.go index 8974888a15fa..1a8f99b5634e 100644 --- a/pkg/kubelet/kuberuntime/helpers.go +++ b/pkg/kubelet/kuberuntime/helpers.go @@ -24,8 +24,8 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog/v2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -42,7 +42,7 @@ func (b containersByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b containersByID) Less(i, j int) bool { return b[i].ID.ID < b[j].ID.ID } // Newest first. -type podSandboxByCreated []*runtimeapi.PodSandbox +type podSandboxByCreated []*internalapi.PodSandbox func (p podSandboxByCreated) Len() int { return len(p) } func (p podSandboxByCreated) Swap(i, j int) { p[i], p[j] = p[j], p[i] } @@ -54,39 +54,39 @@ func (c containerStatusByCreated) Len() int { return len(c) } func (c containerStatusByCreated) Swap(i, j int) { c[i], c[j] = c[j], c[i] } func (c containerStatusByCreated) Less(i, j int) bool { return c[i].CreatedAt.After(c[j].CreatedAt) } -// toKubeContainerState converts runtimeapi.ContainerState to kubecontainer.State. -func toKubeContainerState(state runtimeapi.ContainerState) kubecontainer.State { +// toKubeContainerState converts internalapi.ContainerState to kubecontainer.State. +func toKubeContainerState(state internalapi.ContainerState) kubecontainer.State { switch state { - case runtimeapi.ContainerState_CONTAINER_CREATED: + case internalapi.ContainerState_CONTAINER_CREATED: return kubecontainer.ContainerStateCreated - case runtimeapi.ContainerState_CONTAINER_RUNNING: + case internalapi.ContainerState_CONTAINER_RUNNING: return kubecontainer.ContainerStateRunning - case runtimeapi.ContainerState_CONTAINER_EXITED: + case internalapi.ContainerState_CONTAINER_EXITED: return kubecontainer.ContainerStateExited - case runtimeapi.ContainerState_CONTAINER_UNKNOWN: + case internalapi.ContainerState_CONTAINER_UNKNOWN: return kubecontainer.ContainerStateUnknown } return kubecontainer.ContainerStateUnknown } -// toRuntimeProtocol converts v1.Protocol to runtimeapi.Protocol. -func toRuntimeProtocol(protocol v1.Protocol) runtimeapi.Protocol { +// toRuntimeProtocol converts v1.Protocol to internalapi.Protocol. +func toRuntimeProtocol(protocol v1.Protocol) internalapi.Protocol { switch protocol { case v1.ProtocolTCP: - return runtimeapi.Protocol_TCP + return internalapi.Protocol_TCP case v1.ProtocolUDP: - return runtimeapi.Protocol_UDP + return internalapi.Protocol_UDP case v1.ProtocolSCTP: - return runtimeapi.Protocol_SCTP + return internalapi.Protocol_SCTP } klog.InfoS("Unknown protocol, defaulting to TCP", "protocol", protocol) - return runtimeapi.Protocol_TCP + return internalapi.Protocol_TCP } -// toKubeContainer converts runtimeapi.Container to kubecontainer.Container. -func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeapi.Container) (*kubecontainer.Container, error) { +// toKubeContainer converts internalapi.Container to kubecontainer.Container. +func (m *kubeGenericRuntimeManager) toKubeContainer(c *internalapi.Container) (*kubecontainer.Container, error) { if c == nil || c.Id == "" || c.Image == nil { return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container") } @@ -102,11 +102,11 @@ func (m *kubeGenericRuntimeManager) toKubeContainer(c *runtimeapi.Container) (*k }, nil } -// sandboxToKubeContainer converts runtimeapi.PodSandbox to kubecontainer.Container. +// sandboxToKubeContainer converts internalapi.PodSandbox to kubecontainer.Container. // This is only needed because we need to return sandboxes as if they were // kubecontainer.Containers to avoid substantial changes to PLEG. // TODO: Remove this once it becomes obsolete. -func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeapi.PodSandbox) (*kubecontainer.Container, error) { +func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *internalapi.PodSandbox) (*kubecontainer.Container, error) { if s == nil || s.Id == "" { return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container") } @@ -120,7 +120,7 @@ func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeapi.PodSand // getImageUser gets uid or user name that will run the command(s) from image. The function // guarantees that only one of them is set. func (m *kubeGenericRuntimeManager) getImageUser(image string) (*int64, string, error) { - imageStatus, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image}) + imageStatus, err := m.imageService.ImageStatus(&internalapi.ImageSpec{Image: image}) if err != nil { return nil, "", err } @@ -188,8 +188,8 @@ func parsePodUIDFromLogsDirectory(name string) types.UID { return types.UID(parts[len(parts)-1]) } -// toKubeRuntimeStatus converts the runtimeapi.RuntimeStatus to kubecontainer.RuntimeStatus. -func toKubeRuntimeStatus(status *runtimeapi.RuntimeStatus) *kubecontainer.RuntimeStatus { +// toKubeRuntimeStatus converts the internalapi.RuntimeStatus to kubecontainer.RuntimeStatus. +func toKubeRuntimeStatus(status *internalapi.RuntimeStatus) *kubecontainer.RuntimeStatus { conditions := []kubecontainer.RuntimeCondition{} for _, c := range status.GetConditions() { conditions = append(conditions, kubecontainer.RuntimeCondition{ @@ -266,36 +266,36 @@ func (m *kubeGenericRuntimeManager) getSeccompProfilePath(annotations map[string return "" } -func fieldSeccompProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) *runtimeapi.SecurityProfile { +func fieldSeccompProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) *internalapi.SecurityProfile { if scmp == nil { if fallbackToRuntimeDefault { - return &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_RuntimeDefault, + return &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_RuntimeDefault, } } - return &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_Unconfined, + return &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_Unconfined, } } if scmp.Type == v1.SeccompProfileTypeRuntimeDefault { - return &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_RuntimeDefault, + return &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_RuntimeDefault, } } if scmp.Type == v1.SeccompProfileTypeLocalhost && scmp.LocalhostProfile != nil && len(*scmp.LocalhostProfile) > 0 { fname := filepath.Join(profileRootPath, *scmp.LocalhostProfile) - return &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_Localhost, + return &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_Localhost, LocalhostRef: fname, } } - return &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_Unconfined, + return &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_Unconfined, } } func (m *kubeGenericRuntimeManager) getSeccompProfile(annotations map[string]string, containerName string, - podSecContext *v1.PodSecurityContext, containerSecContext *v1.SecurityContext, fallbackToRuntimeDefault bool) *runtimeapi.SecurityProfile { + podSecContext *v1.PodSecurityContext, containerSecContext *v1.SecurityContext, fallbackToRuntimeDefault bool) *internalapi.SecurityProfile { // container fields are applied first if containerSecContext != nil && containerSecContext.SeccompProfile != nil { return fieldSeccompProfile(containerSecContext.SeccompProfile, m.seccompProfileRoot, fallbackToRuntimeDefault) @@ -307,47 +307,47 @@ func (m *kubeGenericRuntimeManager) getSeccompProfile(annotations map[string]str } if fallbackToRuntimeDefault { - return &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_RuntimeDefault, + return &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_RuntimeDefault, } } - return &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_Unconfined, + return &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_Unconfined, } } -func ipcNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode { +func ipcNamespaceForPod(pod *v1.Pod) internalapi.NamespaceMode { if pod != nil && pod.Spec.HostIPC { - return runtimeapi.NamespaceMode_NODE + return internalapi.NamespaceMode_NODE } - return runtimeapi.NamespaceMode_POD + return internalapi.NamespaceMode_POD } -func networkNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode { +func networkNamespaceForPod(pod *v1.Pod) internalapi.NamespaceMode { if pod != nil && pod.Spec.HostNetwork { - return runtimeapi.NamespaceMode_NODE + return internalapi.NamespaceMode_NODE } - return runtimeapi.NamespaceMode_POD + return internalapi.NamespaceMode_POD } -func pidNamespaceForPod(pod *v1.Pod) runtimeapi.NamespaceMode { +func pidNamespaceForPod(pod *v1.Pod) internalapi.NamespaceMode { if pod != nil { if pod.Spec.HostPID { - return runtimeapi.NamespaceMode_NODE + return internalapi.NamespaceMode_NODE } if pod.Spec.ShareProcessNamespace != nil && *pod.Spec.ShareProcessNamespace { - return runtimeapi.NamespaceMode_POD + return internalapi.NamespaceMode_POD } } // Note that PID does not default to the zero value for v1.Pod - return runtimeapi.NamespaceMode_CONTAINER + return internalapi.NamespaceMode_CONTAINER } -// namespacesForPod returns the runtimeapi.NamespaceOption for a given pod. +// namespacesForPod returns the internalapi.NamespaceOption for a given pod. // An empty or nil pod can be used to get the namespace defaults for v1.Pod. -func namespacesForPod(pod *v1.Pod) *runtimeapi.NamespaceOption { - return &runtimeapi.NamespaceOption{ +func namespacesForPod(pod *v1.Pod) *internalapi.NamespaceOption { + return &internalapi.NamespaceOption{ Ipc: ipcNamespaceForPod(pod), Network: networkNamespaceForPod(pod), Pid: pidNamespaceForPod(pod), diff --git a/pkg/kubelet/kuberuntime/helpers_test.go b/pkg/kubelet/kuberuntime/helpers_test.go index 7cc6298e6367..be89983a5d96 100644 --- a/pkg/kubelet/kuberuntime/helpers_test.go +++ b/pkg/kubelet/kuberuntime/helpers_test.go @@ -25,8 +25,8 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - runtimetesting "k8s.io/cri-api/pkg/apis/testing" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" + runtimetesting "k8s.io/kubernetes/pkg/kubelet/apis/cri/testing" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" utilpointer "k8s.io/utils/pointer" ) @@ -63,15 +63,15 @@ func TestStableKey(t *testing.T) { } func TestToKubeContainer(t *testing.T) { - c := &runtimeapi.Container{ + c := &internalapi.Container{ Id: "test-id", - Metadata: &runtimeapi.ContainerMetadata{ + Metadata: &internalapi.ContainerMetadata{ Name: "test-name", Attempt: 1, }, - Image: &runtimeapi.ImageSpec{Image: "test-image"}, + Image: &internalapi.ImageSpec{Image: "test-image"}, ImageRef: "test-image-ref", - State: runtimeapi.ContainerState_CONTAINER_RUNNING, + State: internalapi.ContainerState_CONTAINER_RUNNING, Annotations: map[string]string{ containerHashLabel: "1234", }, @@ -101,7 +101,7 @@ func TestGetImageUser(t *testing.T) { type image struct { name string - uid *runtimeapi.Int64Value + uid *internalapi.Int64Value username string } @@ -122,7 +122,7 @@ func TestGetImageUser(t *testing.T) { "image without username and uid should return (new(int64), \"\", nil)", image{ name: "test-image-ref1", - uid: (*runtimeapi.Int64Value)(nil), + uid: (*internalapi.Int64Value)(nil), username: "", }, imageUserValues{ @@ -135,7 +135,7 @@ func TestGetImageUser(t *testing.T) { "image with username and no uid should return ((*int64)nil, imageStatus.Username, nil)", image{ name: "test-image-ref2", - uid: (*runtimeapi.Int64Value)(nil), + uid: (*internalapi.Int64Value)(nil), username: "testUser", }, imageUserValues{ @@ -148,7 +148,7 @@ func TestGetImageUser(t *testing.T) { "image with uid should return (*int64, \"\", nil)", image{ name: "test-image-ref3", - uid: &runtimeapi.Int64Value{ + uid: &internalapi.Int64Value{ Value: 2, }, username: "whatever", @@ -674,12 +674,12 @@ func TestGetSeccompProfile(t *testing.T) { _, _, m, err := createTestRuntimeManager() require.NoError(t, err) - unconfinedProfile := &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_Unconfined, + unconfinedProfile := &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_Unconfined, } - runtimeDefaultProfile := &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_RuntimeDefault, + runtimeDefaultProfile := &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_RuntimeDefault, } tests := []struct { @@ -688,7 +688,7 @@ func TestGetSeccompProfile(t *testing.T) { podSc *v1.PodSecurityContext containerSc *v1.SecurityContext containerName string - expectedProfile *runtimeapi.SecurityProfile + expectedProfile *internalapi.SecurityProfile }{ { description: "no seccomp should return unconfined", @@ -717,8 +717,8 @@ func TestGetSeccompProfile(t *testing.T) { { description: "pod seccomp profile set to SeccompProfileTypeLocalhost returns 'localhost/' + LocalhostProfile", podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("filename")}}, - expectedProfile: &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_Localhost, + expectedProfile: &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_Localhost, LocalhostRef: seccompLocalhostRef("filename"), }, }, @@ -735,8 +735,8 @@ func TestGetSeccompProfile(t *testing.T) { { description: "container seccomp profile set to SeccompProfileTypeLocalhost returns 'localhost/' + LocalhostProfile", containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("filename2")}}, - expectedProfile: &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_Localhost, + expectedProfile: &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_Localhost, LocalhostRef: seccompLocalhostRef("filename2"), }, }, @@ -751,8 +751,8 @@ func TestGetSeccompProfile(t *testing.T) { podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("field-pod-profile.json")}}, containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("field-cont-profile.json")}}, containerName: "container1", - expectedProfile: &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_Localhost, + expectedProfile: &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_Localhost, LocalhostRef: seccompLocalhostRef("field-cont-profile.json"), }, }, @@ -768,12 +768,12 @@ func TestGetSeccompProfileDefaultSeccomp(t *testing.T) { _, _, m, err := createTestRuntimeManager() require.NoError(t, err) - unconfinedProfile := &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_Unconfined, + unconfinedProfile := &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_Unconfined, } - runtimeDefaultProfile := &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_RuntimeDefault, + runtimeDefaultProfile := &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_RuntimeDefault, } tests := []struct { @@ -782,7 +782,7 @@ func TestGetSeccompProfileDefaultSeccomp(t *testing.T) { podSc *v1.PodSecurityContext containerSc *v1.SecurityContext containerName string - expectedProfile *runtimeapi.SecurityProfile + expectedProfile *internalapi.SecurityProfile }{ { description: "no seccomp should return RuntimeDefault", @@ -811,8 +811,8 @@ func TestGetSeccompProfileDefaultSeccomp(t *testing.T) { { description: "pod seccomp profile set to SeccompProfileTypeLocalhost returns 'localhost/' + LocalhostProfile", podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("filename")}}, - expectedProfile: &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_Localhost, + expectedProfile: &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_Localhost, LocalhostRef: seccompLocalhostRef("filename"), }, }, @@ -829,8 +829,8 @@ func TestGetSeccompProfileDefaultSeccomp(t *testing.T) { { description: "container seccomp profile set to SeccompProfileTypeLocalhost returns 'localhost/' + LocalhostProfile", containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("filename2")}}, - expectedProfile: &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_Localhost, + expectedProfile: &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_Localhost, LocalhostRef: seccompLocalhostRef("filename2"), }, }, @@ -845,8 +845,8 @@ func TestGetSeccompProfileDefaultSeccomp(t *testing.T) { podSc: &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("field-pod-profile.json")}}, containerSc: &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeLocalhost, LocalhostProfile: getLocal("field-cont-profile.json")}}, containerName: "container1", - expectedProfile: &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_Localhost, + expectedProfile: &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_Localhost, LocalhostRef: seccompLocalhostRef("field-cont-profile.json"), }, }, @@ -865,22 +865,22 @@ func getLocal(v string) *string { func TestNamespacesForPod(t *testing.T) { for desc, test := range map[string]struct { input *v1.Pod - expected *runtimeapi.NamespaceOption + expected *internalapi.NamespaceOption }{ "nil pod -> default v1 namespaces": { nil, - &runtimeapi.NamespaceOption{ - Ipc: runtimeapi.NamespaceMode_POD, - Network: runtimeapi.NamespaceMode_POD, - Pid: runtimeapi.NamespaceMode_CONTAINER, + &internalapi.NamespaceOption{ + Ipc: internalapi.NamespaceMode_POD, + Network: internalapi.NamespaceMode_POD, + Pid: internalapi.NamespaceMode_CONTAINER, }, }, "v1.Pod default namespaces": { &v1.Pod{}, - &runtimeapi.NamespaceOption{ - Ipc: runtimeapi.NamespaceMode_POD, - Network: runtimeapi.NamespaceMode_POD, - Pid: runtimeapi.NamespaceMode_CONTAINER, + &internalapi.NamespaceOption{ + Ipc: internalapi.NamespaceMode_POD, + Network: internalapi.NamespaceMode_POD, + Pid: internalapi.NamespaceMode_CONTAINER, }, }, "Host Namespaces": { @@ -891,10 +891,10 @@ func TestNamespacesForPod(t *testing.T) { HostPID: true, }, }, - &runtimeapi.NamespaceOption{ - Ipc: runtimeapi.NamespaceMode_NODE, - Network: runtimeapi.NamespaceMode_NODE, - Pid: runtimeapi.NamespaceMode_NODE, + &internalapi.NamespaceOption{ + Ipc: internalapi.NamespaceMode_NODE, + Network: internalapi.NamespaceMode_NODE, + Pid: internalapi.NamespaceMode_NODE, }, }, "Shared Process Namespace (feature enabled)": { @@ -903,10 +903,10 @@ func TestNamespacesForPod(t *testing.T) { ShareProcessNamespace: &[]bool{true}[0], }, }, - &runtimeapi.NamespaceOption{ - Ipc: runtimeapi.NamespaceMode_POD, - Network: runtimeapi.NamespaceMode_POD, - Pid: runtimeapi.NamespaceMode_POD, + &internalapi.NamespaceOption{ + Ipc: internalapi.NamespaceMode_POD, + Network: internalapi.NamespaceMode_POD, + Pid: internalapi.NamespaceMode_POD, }, }, "Shared Process Namespace, redundant flag (feature enabled)": { @@ -915,10 +915,10 @@ func TestNamespacesForPod(t *testing.T) { ShareProcessNamespace: &[]bool{false}[0], }, }, - &runtimeapi.NamespaceOption{ - Ipc: runtimeapi.NamespaceMode_POD, - Network: runtimeapi.NamespaceMode_POD, - Pid: runtimeapi.NamespaceMode_CONTAINER, + &internalapi.NamespaceOption{ + Ipc: internalapi.NamespaceMode_POD, + Network: internalapi.NamespaceMode_POD, + Pid: internalapi.NamespaceMode_CONTAINER, }, }, } { diff --git a/pkg/kubelet/kuberuntime/instrumented_services.go b/pkg/kubelet/kuberuntime/instrumented_services.go index 91d57b097f96..b61835a3073b 100644 --- a/pkg/kubelet/kuberuntime/instrumented_services.go +++ b/pkg/kubelet/kuberuntime/instrumented_services.go @@ -19,8 +19,7 @@ package kuberuntime import ( "time" - internalapi "k8s.io/cri-api/pkg/apis" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/metrics" ) @@ -59,7 +58,11 @@ func recordError(operation string, err error) { } } -func (in instrumentedRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) { +func (in instrumentedRuntimeService) APIVersion() internalapi.APIVersion { + return in.service.APIVersion() +} + +func (in instrumentedRuntimeService) Version(apiVersion string) (*internalapi.VersionResponse, error) { const operation = "version" defer recordOperation(operation, time.Now()) @@ -68,7 +71,7 @@ func (in instrumentedRuntimeService) Version(apiVersion string) (*runtimeapi.Ver return out, err } -func (in instrumentedRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) { +func (in instrumentedRuntimeService) Status() (*internalapi.RuntimeStatus, error) { const operation = "status" defer recordOperation(operation, time.Now()) @@ -77,7 +80,7 @@ func (in instrumentedRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) return out, err } -func (in instrumentedRuntimeService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (in instrumentedRuntimeService) CreateContainer(podSandboxID string, config *internalapi.ContainerConfig, sandboxConfig *internalapi.PodSandboxConfig) (string, error) { const operation = "create_container" defer recordOperation(operation, time.Now()) @@ -113,7 +116,7 @@ func (in instrumentedRuntimeService) RemoveContainer(containerID string) error { return err } -func (in instrumentedRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { +func (in instrumentedRuntimeService) ListContainers(filter *internalapi.ContainerFilter) ([]*internalapi.Container, error) { const operation = "list_containers" defer recordOperation(operation, time.Now()) @@ -122,7 +125,7 @@ func (in instrumentedRuntimeService) ListContainers(filter *runtimeapi.Container return out, err } -func (in instrumentedRuntimeService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) { +func (in instrumentedRuntimeService) ContainerStatus(containerID string) (*internalapi.ContainerStatus, error) { const operation = "container_status" defer recordOperation(operation, time.Now()) @@ -131,7 +134,7 @@ func (in instrumentedRuntimeService) ContainerStatus(containerID string) (*runti return out, err } -func (in instrumentedRuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources) error { +func (in instrumentedRuntimeService) UpdateContainerResources(containerID string, resources *internalapi.LinuxContainerResources) error { const operation = "update_container" defer recordOperation(operation, time.Now()) @@ -158,7 +161,7 @@ func (in instrumentedRuntimeService) ExecSync(containerID string, cmd []string, return stdout, stderr, err } -func (in instrumentedRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { +func (in instrumentedRuntimeService) Exec(req *internalapi.ExecRequest) (*internalapi.ExecResponse, error) { const operation = "exec" defer recordOperation(operation, time.Now()) @@ -167,7 +170,7 @@ func (in instrumentedRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtime return resp, err } -func (in instrumentedRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { +func (in instrumentedRuntimeService) Attach(req *internalapi.AttachRequest) (*internalapi.AttachResponse, error) { const operation = "attach" defer recordOperation(operation, time.Now()) @@ -176,7 +179,7 @@ func (in instrumentedRuntimeService) Attach(req *runtimeapi.AttachRequest) (*run return resp, err } -func (in instrumentedRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) { +func (in instrumentedRuntimeService) RunPodSandbox(config *internalapi.PodSandboxConfig, runtimeHandler string) (string, error) { const operation = "run_podsandbox" startTime := time.Now() defer recordOperation(operation, startTime) @@ -208,7 +211,7 @@ func (in instrumentedRuntimeService) RemovePodSandbox(podSandboxID string) error return err } -func (in instrumentedRuntimeService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) { +func (in instrumentedRuntimeService) PodSandboxStatus(podSandboxID string) (*internalapi.PodSandboxStatus, error) { const operation = "podsandbox_status" defer recordOperation(operation, time.Now()) @@ -217,7 +220,7 @@ func (in instrumentedRuntimeService) PodSandboxStatus(podSandboxID string) (*run return out, err } -func (in instrumentedRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) { +func (in instrumentedRuntimeService) ListPodSandbox(filter *internalapi.PodSandboxFilter) ([]*internalapi.PodSandbox, error) { const operation = "list_podsandbox" defer recordOperation(operation, time.Now()) @@ -226,7 +229,7 @@ func (in instrumentedRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandbo return out, err } -func (in instrumentedRuntimeService) ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) { +func (in instrumentedRuntimeService) ContainerStats(containerID string) (*internalapi.ContainerStats, error) { const operation = "container_stats" defer recordOperation(operation, time.Now()) @@ -235,7 +238,7 @@ func (in instrumentedRuntimeService) ContainerStats(containerID string) (*runtim return out, err } -func (in instrumentedRuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { +func (in instrumentedRuntimeService) ListContainerStats(filter *internalapi.ContainerStatsFilter) ([]*internalapi.ContainerStats, error) { const operation = "list_container_stats" defer recordOperation(operation, time.Now()) @@ -244,7 +247,7 @@ func (in instrumentedRuntimeService) ListContainerStats(filter *runtimeapi.Conta return out, err } -func (in instrumentedRuntimeService) PodSandboxStats(podSandboxID string) (*runtimeapi.PodSandboxStats, error) { +func (in instrumentedRuntimeService) PodSandboxStats(podSandboxID string) (*internalapi.PodSandboxStats, error) { const operation = "podsandbox_stats" defer recordOperation(operation, time.Now()) @@ -253,7 +256,7 @@ func (in instrumentedRuntimeService) PodSandboxStats(podSandboxID string) (*runt return out, err } -func (in instrumentedRuntimeService) ListPodSandboxStats(filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) { +func (in instrumentedRuntimeService) ListPodSandboxStats(filter *internalapi.PodSandboxStatsFilter) ([]*internalapi.PodSandboxStats, error) { const operation = "list_podsandbox_stats" defer recordOperation(operation, time.Now()) @@ -262,7 +265,7 @@ func (in instrumentedRuntimeService) ListPodSandboxStats(filter *runtimeapi.PodS return out, err } -func (in instrumentedRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { +func (in instrumentedRuntimeService) PortForward(req *internalapi.PortForwardRequest) (*internalapi.PortForwardResponse, error) { const operation = "port_forward" defer recordOperation(operation, time.Now()) @@ -271,7 +274,7 @@ func (in instrumentedRuntimeService) PortForward(req *runtimeapi.PortForwardRequ return resp, err } -func (in instrumentedRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error { +func (in instrumentedRuntimeService) UpdateRuntimeConfig(runtimeConfig *internalapi.RuntimeConfig) error { const operation = "update_runtime_config" defer recordOperation(operation, time.Now()) @@ -280,7 +283,7 @@ func (in instrumentedRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimea return err } -func (in instrumentedImageManagerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) { +func (in instrumentedImageManagerService) ListImages(filter *internalapi.ImageFilter) ([]*internalapi.Image, error) { const operation = "list_images" defer recordOperation(operation, time.Now()) @@ -289,7 +292,7 @@ func (in instrumentedImageManagerService) ListImages(filter *runtimeapi.ImageFil return out, err } -func (in instrumentedImageManagerService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) { +func (in instrumentedImageManagerService) ImageStatus(image *internalapi.ImageSpec) (*internalapi.Image, error) { const operation = "image_status" defer recordOperation(operation, time.Now()) @@ -298,7 +301,7 @@ func (in instrumentedImageManagerService) ImageStatus(image *runtimeapi.ImageSpe return out, err } -func (in instrumentedImageManagerService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (in instrumentedImageManagerService) PullImage(image *internalapi.ImageSpec, auth *internalapi.AuthConfig, podSandboxConfig *internalapi.PodSandboxConfig) (string, error) { const operation = "pull_image" defer recordOperation(operation, time.Now()) @@ -307,7 +310,7 @@ func (in instrumentedImageManagerService) PullImage(image *runtimeapi.ImageSpec, return imageRef, err } -func (in instrumentedImageManagerService) RemoveImage(image *runtimeapi.ImageSpec) error { +func (in instrumentedImageManagerService) RemoveImage(image *internalapi.ImageSpec) error { const operation = "remove_image" defer recordOperation(operation, time.Now()) @@ -316,7 +319,7 @@ func (in instrumentedImageManagerService) RemoveImage(image *runtimeapi.ImageSpe return err } -func (in instrumentedImageManagerService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) { +func (in instrumentedImageManagerService) ImageFsInfo() ([]*internalapi.FilesystemUsage, error) { const operation = "image_fs_info" defer recordOperation(operation, time.Now()) diff --git a/pkg/kubelet/kuberuntime/instrumented_services_test.go b/pkg/kubelet/kuberuntime/instrumented_services_test.go index d027414b6503..25c4b2a0e1ed 100644 --- a/pkg/kubelet/kuberuntime/instrumented_services_test.go +++ b/pkg/kubelet/kuberuntime/instrumented_services_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/component-base/metrics/legacyregistry" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/metrics" ) @@ -73,19 +73,19 @@ func TestInstrumentedVersion(t *testing.T) { func TestStatus(t *testing.T) { fakeRuntime, _, _, _ := createTestRuntimeManager() - fakeRuntime.FakeStatus = &runtimeapi.RuntimeStatus{ - Conditions: []*runtimeapi.RuntimeCondition{ - {Type: runtimeapi.RuntimeReady, Status: false}, - {Type: runtimeapi.NetworkReady, Status: true}, + fakeRuntime.FakeStatus = &internalapi.RuntimeStatus{ + Conditions: []*internalapi.RuntimeCondition{ + {Type: internalapi.RuntimeReady, Status: false}, + {Type: internalapi.NetworkReady, Status: true}, }, } irs := newInstrumentedRuntimeService(fakeRuntime) actural, err := irs.Status() assert.NoError(t, err) - expected := &runtimeapi.RuntimeStatus{ - Conditions: []*runtimeapi.RuntimeCondition{ - {Type: runtimeapi.RuntimeReady, Status: false}, - {Type: runtimeapi.NetworkReady, Status: true}, + expected := &internalapi.RuntimeStatus{ + Conditions: []*internalapi.RuntimeCondition{ + {Type: internalapi.RuntimeReady, Status: false}, + {Type: internalapi.NetworkReady, Status: true}, }, } assert.Equal(t, expected, actural) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index 44fbfee46720..53a3debaff09 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -45,8 +45,8 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/types" @@ -170,7 +170,7 @@ func calcRestartCountByLogDir(path string) (int, error) { // * create the container // * start the container // * run the post start lifecycle hooks (if applicable) -func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, spec *startSpec, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string) (string, error) { +func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *internalapi.PodSandboxConfig, spec *startSpec, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string) (string, error) { container := spec.container // Step 1: pull the image. @@ -294,7 +294,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb } // generateContainerConfig generates container config for kubelet runtime v1. -func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, podIPs []string, nsTarget *kubecontainer.ContainerID) (*runtimeapi.ContainerConfig, func(), error) { +func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, podIPs []string, nsTarget *kubecontainer.ContainerID) (*internalapi.ContainerConfig, func(), error) { opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, podIPs) if err != nil { return nil, nil, err @@ -318,12 +318,12 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai } containerLogsPath := buildContainerLogsPath(container.Name, restartCount) restartCountUint32 := uint32(restartCount) - config := &runtimeapi.ContainerConfig{ - Metadata: &runtimeapi.ContainerMetadata{ + config := &internalapi.ContainerConfig{ + Metadata: &internalapi.ContainerMetadata{ Name: container.Name, Attempt: restartCountUint32, }, - Image: &runtimeapi.ImageSpec{Image: imageRef}, + Image: &internalapi.ImageSpec{Image: imageRef}, Command: command, Args: args, WorkingDir: container.WorkingDir, @@ -343,10 +343,10 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai } // set environment variables - envs := make([]*runtimeapi.KeyValue, len(opts.Envs)) + envs := make([]*internalapi.KeyValue, len(opts.Envs)) for idx := range opts.Envs { e := opts.Envs[idx] - envs[idx] = &runtimeapi.KeyValue{ + envs[idx] = &internalapi.KeyValue{ Key: e.Name, Value: e.Value, } @@ -357,12 +357,12 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai } // makeDevices generates container devices for kubelet runtime v1. -func makeDevices(opts *kubecontainer.RunContainerOptions) []*runtimeapi.Device { - devices := make([]*runtimeapi.Device, len(opts.Devices)) +func makeDevices(opts *kubecontainer.RunContainerOptions) []*internalapi.Device { + devices := make([]*internalapi.Device, len(opts.Devices)) for idx := range opts.Devices { device := opts.Devices[idx] - devices[idx] = &runtimeapi.Device{ + devices[idx] = &internalapi.Device{ HostPath: device.PathOnHost, ContainerPath: device.PathInContainer, Permissions: device.Permissions, @@ -373,13 +373,13 @@ func makeDevices(opts *kubecontainer.RunContainerOptions) []*runtimeapi.Device { } // makeMounts generates container volume mounts for kubelet runtime v1. -func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerOptions, container *v1.Container) []*runtimeapi.Mount { - volumeMounts := []*runtimeapi.Mount{} +func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerOptions, container *v1.Container) []*internalapi.Mount { + volumeMounts := []*internalapi.Mount{} for idx := range opts.Mounts { v := opts.Mounts[idx] selinuxRelabel := v.SELinuxRelabel && selinux.SELinuxEnabled() - mount := &runtimeapi.Mount{ + mount := &internalapi.Mount{ HostPath: v.HostPath, ContainerPath: v.ContainerPath, Readonly: v.ReadOnly, @@ -419,7 +419,7 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO containerLogPath = volumeutil.MakeAbsolutePath(goruntime.GOOS, containerLogPath) terminationMessagePath := volumeutil.MakeAbsolutePath(goruntime.GOOS, container.TerminationMessagePath) selinuxRelabel := selinux.SELinuxEnabled() - volumeMounts = append(volumeMounts, &runtimeapi.Mount{ + volumeMounts = append(volumeMounts, &internalapi.Mount{ HostPath: containerLogPath, ContainerPath: terminationMessagePath, SelinuxRelabel: selinuxRelabel, @@ -433,11 +433,11 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO // getKubeletContainers lists containers managed by kubelet. // The boolean parameter specifies whether returns all containers including // those already exited and dead containers (used for garbage collection). -func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]*runtimeapi.Container, error) { - filter := &runtimeapi.ContainerFilter{} +func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]*internalapi.Container, error) { + filter := &internalapi.ContainerFilter{} if !allContainers { - filter.State = &runtimeapi.ContainerStateValue{ - State: runtimeapi.ContainerState_CONTAINER_RUNNING, + filter.State = &internalapi.ContainerStateValue{ + State: internalapi.ContainerState_CONTAINER_RUNNING, } } @@ -457,7 +457,7 @@ func makeUID() string { // getTerminationMessage looks on the filesystem for the provided termination message path, returning a limited // amount of those bytes, or returns true if the logs should be checked. -func getTerminationMessage(status *runtimeapi.ContainerStatus, terminationMessagePath string, fallbackToLogs bool) (string, bool) { +func getTerminationMessage(status *internalapi.ContainerStatus, terminationMessagePath string, fallbackToLogs bool) (string, bool) { if len(terminationMessagePath) == 0 { return "", fallbackToLogs } @@ -494,7 +494,7 @@ func (m *kubeGenericRuntimeManager) readLastStringFromContainerLogs(path string) // getPodContainerStatuses gets all containers' statuses for the pod. func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, name, namespace string) ([]*kubecontainer.Status, error) { // Select all containers of the given pod. - containers, err := m.runtimeService.ListContainers(&runtimeapi.ContainerFilter{ + containers, err := m.runtimeService.ListContainers(&internalapi.ContainerFilter{ LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(uid)}, }) if err != nil { @@ -512,7 +512,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n return nil, err } cStatus := toKubeContainerStatus(status, m.runtimeName) - if status.State == runtimeapi.ContainerState_CONTAINER_EXITED { + if status.State == internalapi.ContainerState_CONTAINER_EXITED { // Populate the termination message if needed. annotatedInfo := getContainerInfoFromAnnotations(status.Annotations) // If a container cannot even be started, it certainly does not have logs, so no need to fallbackToLogs. @@ -545,7 +545,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n return statuses, nil } -func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName string) *kubecontainer.Status { +func toKubeContainerStatus(status *internalapi.ContainerStatus, runtimeName string) *kubecontainer.Status { annotatedInfo := getContainerInfoFromAnnotations(status.Annotations) labeledInfo := getContainerInfoFromLabels(status.Labels) cStatus := &kubecontainer.Status{ @@ -562,12 +562,12 @@ func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName strin CreatedAt: time.Unix(0, status.CreatedAt), } - if status.State != runtimeapi.ContainerState_CONTAINER_CREATED { + if status.State != internalapi.ContainerState_CONTAINER_CREATED { // If container is not in the created state, we have tried and // started the container. Set the StartedAt time. cStatus.StartedAt = time.Unix(0, status.StartedAt) } - if status.State == runtimeapi.ContainerState_CONTAINER_EXITED { + if status.State == internalapi.ContainerState_CONTAINER_EXITED { cStatus.Reason = status.Reason cStatus.Message = status.Message cStatus.ExitCode = int(status.ExitCode) @@ -887,7 +887,7 @@ func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v // GetExec gets the endpoint the runtime will serve the exec request from. func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) { - req := &runtimeapi.ExecRequest{ + req := &internalapi.ExecRequest{ ContainerId: id.ID, Cmd: cmd, Tty: tty, @@ -905,7 +905,7 @@ func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd [] // GetAttach gets the endpoint the runtime will serve the attach request from. func (m *kubeGenericRuntimeManager) GetAttach(id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) { - req := &runtimeapi.AttachRequest{ + req := &internalapi.AttachRequest{ ContainerId: id.ID, Stdin: stdin, Stdout: stdout, diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go b/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go index f8956acf7285..cd9ab965f59e 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go @@ -28,18 +28,18 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" utilfeature "k8s.io/apiserver/pkg/util/feature" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog/v2" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" kubefeatures "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/qos" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" ) -// applyPlatformSpecificContainerConfig applies platform specific configurations to runtimeapi.ContainerConfig. -func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID) error { +// applyPlatformSpecificContainerConfig applies platform specific configurations to internalapi.ContainerConfig. +func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *internalapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID) error { enforceMemoryQoS := false // Set memory.min and memory.high if MemoryQoS enabled with cgroups v2 if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) && @@ -51,14 +51,14 @@ func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config } // generateLinuxContainerConfig generates linux container config for kubelet runtime v1. -func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID, enforceMemoryQoS bool) *runtimeapi.LinuxContainerConfig { - lc := &runtimeapi.LinuxContainerConfig{ - Resources: &runtimeapi.LinuxContainerResources{}, +func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID, enforceMemoryQoS bool) *internalapi.LinuxContainerConfig { + lc := &internalapi.LinuxContainerConfig{ + Resources: &internalapi.LinuxContainerResources{}, SecurityContext: m.determineEffectiveSecurityContext(pod, container, uid, username), } - if nsTarget != nil && lc.SecurityContext.NamespaceOptions.Pid == runtimeapi.NamespaceMode_CONTAINER { - lc.SecurityContext.NamespaceOptions.Pid = runtimeapi.NamespaceMode_TARGET + if nsTarget != nil && lc.SecurityContext.NamespaceOptions.Pid == internalapi.NamespaceMode_CONTAINER { + lc.SecurityContext.NamespaceOptions.Pid = internalapi.NamespaceMode_TARGET lc.SecurityContext.NamespaceOptions.TargetId = nsTarget.ID } @@ -129,8 +129,8 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C } // calculateLinuxResources will create the linuxContainerResources type based on the provided CPU and memory resource requests, limits -func (m *kubeGenericRuntimeManager) calculateLinuxResources(cpuRequest, cpuLimit, memoryLimit *resource.Quantity) *runtimeapi.LinuxContainerResources { - resources := runtimeapi.LinuxContainerResources{} +func (m *kubeGenericRuntimeManager) calculateLinuxResources(cpuRequest, cpuLimit, memoryLimit *resource.Quantity) *internalapi.LinuxContainerResources { + resources := internalapi.LinuxContainerResources{} var cpuShares int64 memLimit := memoryLimit.Value() @@ -166,12 +166,12 @@ func (m *kubeGenericRuntimeManager) calculateLinuxResources(cpuRequest, cpuLimit } // GetHugepageLimitsFromResources returns limits of each hugepages from resources. -func GetHugepageLimitsFromResources(resources v1.ResourceRequirements) []*runtimeapi.HugepageLimit { - var hugepageLimits []*runtimeapi.HugepageLimit +func GetHugepageLimitsFromResources(resources v1.ResourceRequirements) []*internalapi.HugepageLimit { + var hugepageLimits []*internalapi.HugepageLimit // For each page size, limit to 0. for _, pageSize := range cgroupfs.HugePageSizes { - hugepageLimits = append(hugepageLimits, &runtimeapi.HugepageLimit{ + hugepageLimits = append(hugepageLimits, &internalapi.HugepageLimit{ PageSize: pageSize, Limit: uint64(0), }) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go index 3ef2f98fde04..0e652f073b08 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go @@ -32,27 +32,27 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" featuregatetesting "k8s.io/component-base/featuregate/testing" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" ) -func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerIndex int, enforceMemoryQoS bool) *runtimeapi.ContainerConfig { +func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerIndex int, enforceMemoryQoS bool) *internalapi.ContainerConfig { container := &pod.Spec.Containers[containerIndex] podIP := "" restartCount := 0 opts, _, _ := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, []string{podIP}) containerLogsPath := buildContainerLogsPath(container.Name, restartCount) restartCountUint32 := uint32(restartCount) - envs := make([]*runtimeapi.KeyValue, len(opts.Envs)) + envs := make([]*internalapi.KeyValue, len(opts.Envs)) - expectedConfig := &runtimeapi.ContainerConfig{ - Metadata: &runtimeapi.ContainerMetadata{ + expectedConfig := &internalapi.ContainerConfig{ + Metadata: &internalapi.ContainerMetadata{ Name: container.Name, Attempt: restartCountUint32, }, - Image: &runtimeapi.ImageSpec{Image: container.Image}, + Image: &internalapi.ImageSpec{Image: container.Image}, Command: container.Command, Args: []string(nil), WorkingDir: container.WorkingDir, @@ -134,8 +134,8 @@ func TestGenerateContainerConfig(t *testing.T) { _, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil) assert.Error(t, err) - imageID, _ := imageService.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil, nil) - image, _ := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imageID}) + imageID, _ := imageService.PullImage(&internalapi.ImageSpec{Image: "busybox"}, nil, nil) + image, _ := imageService.ImageStatus(&internalapi.ImageSpec{Image: imageID}) image.Uid = nil image.Username = "test" @@ -156,7 +156,7 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) { tests := []struct { name string podResources v1.ResourceRequirements - expected *runtimeapi.LinuxContainerResources + expected *internalapi.LinuxContainerResources }{ { name: "Request 128M/1C, Limit 256M/3C", @@ -170,7 +170,7 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) { v1.ResourceCPU: resource.MustParse("3"), }, }, - expected: &runtimeapi.LinuxContainerResources{ + expected: &internalapi.LinuxContainerResources{ CpuPeriod: 100000, CpuQuota: 300000, CpuShares: 1024, @@ -185,7 +185,7 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) { v1.ResourceCPU: resource.MustParse("2"), }, }, - expected: &runtimeapi.LinuxContainerResources{ + expected: &internalapi.LinuxContainerResources{ CpuPeriod: 100000, CpuQuota: 0, CpuShares: 2048, @@ -234,14 +234,14 @@ func TestCalculateLinuxResources(t *testing.T) { cpuReq resource.Quantity cpuLim resource.Quantity memLim resource.Quantity - expected *runtimeapi.LinuxContainerResources + expected *internalapi.LinuxContainerResources }{ { name: "Request128MBLimit256MB", cpuReq: resource.MustParse("1"), cpuLim: resource.MustParse("2"), memLim: resource.MustParse("128Mi"), - expected: &runtimeapi.LinuxContainerResources{ + expected: &internalapi.LinuxContainerResources{ CpuPeriod: 100000, CpuQuota: 200000, CpuShares: 1024, @@ -253,7 +253,7 @@ func TestCalculateLinuxResources(t *testing.T) { cpuReq: resource.MustParse("2"), cpuLim: resource.MustParse("8"), memLim: resource.MustParse("0"), - expected: &runtimeapi.LinuxContainerResources{ + expected: &internalapi.LinuxContainerResources{ CpuPeriod: 100000, CpuQuota: 800000, CpuShares: 2048, @@ -325,7 +325,7 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) { pod2MemoryHigh := float64(memoryNodeAllocatable.Value()) * m.memoryThrottlingFactor type expectedResult struct { - containerConfig *runtimeapi.LinuxContainerConfig + containerConfig *internalapi.LinuxContainerConfig memoryLow int64 memoryHigh int64 } @@ -363,11 +363,11 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) { } func TestGetHugepageLimitsFromResources(t *testing.T) { - var baseHugepage []*runtimeapi.HugepageLimit + var baseHugepage []*internalapi.HugepageLimit // For each page size, limit to 0. for _, pageSize := range cgroupfs.HugePageSizes { - baseHugepage = append(baseHugepage, &runtimeapi.HugepageLimit{ + baseHugepage = append(baseHugepage, &internalapi.HugepageLimit{ PageSize: pageSize, Limit: uint64(0), }) @@ -376,7 +376,7 @@ func TestGetHugepageLimitsFromResources(t *testing.T) { tests := []struct { name string resources v1.ResourceRequirements - expected []*runtimeapi.HugepageLimit + expected []*internalapi.HugepageLimit }{ { name: "Success2MB", @@ -385,7 +385,7 @@ func TestGetHugepageLimitsFromResources(t *testing.T) { "hugepages-2Mi": resource.MustParse("2Mi"), }, }, - expected: []*runtimeapi.HugepageLimit{ + expected: []*internalapi.HugepageLimit{ { PageSize: "2MB", Limit: 2097152, @@ -399,7 +399,7 @@ func TestGetHugepageLimitsFromResources(t *testing.T) { "hugepages-1Gi": resource.MustParse("2Gi"), }, }, - expected: []*runtimeapi.HugepageLimit{ + expected: []*internalapi.HugepageLimit{ { PageSize: "1GB", Limit: 2147483648, @@ -413,7 +413,7 @@ func TestGetHugepageLimitsFromResources(t *testing.T) { "hugepages-2MB": resource.MustParse("2Mi"), }, }, - expected: []*runtimeapi.HugepageLimit{ + expected: []*internalapi.HugepageLimit{ { PageSize: "2MB", Limit: 0, @@ -427,7 +427,7 @@ func TestGetHugepageLimitsFromResources(t *testing.T) { "hugepages-1GB": resource.MustParse("2Gi"), }, }, - expected: []*runtimeapi.HugepageLimit{ + expected: []*internalapi.HugepageLimit{ { PageSize: "1GB", Limit: 0, @@ -443,7 +443,7 @@ func TestGetHugepageLimitsFromResources(t *testing.T) { "hugepages-1Gi": resource.MustParse("2Gi"), }, }, - expected: []*runtimeapi.HugepageLimit{ + expected: []*internalapi.HugepageLimit{ { PageSize: "2MB", Limit: 2097152, @@ -463,7 +463,7 @@ func TestGetHugepageLimitsFromResources(t *testing.T) { "hugepages-1GB": resource.MustParse("2Gi"), }, }, - expected: []*runtimeapi.HugepageLimit{ + expected: []*internalapi.HugepageLimit{ { PageSize: "2MB", Limit: 0, @@ -530,7 +530,7 @@ func TestGenerateLinuxContainerConfigNamespaces(t *testing.T) { name string pod *v1.Pod target *kubecontainer.ContainerID - want *runtimeapi.NamespaceOption + want *internalapi.NamespaceOption }{ { "Default namespaces", @@ -542,8 +542,8 @@ func TestGenerateLinuxContainerConfigNamespaces(t *testing.T) { }, }, nil, - &runtimeapi.NamespaceOption{ - Pid: runtimeapi.NamespaceMode_CONTAINER, + &internalapi.NamespaceOption{ + Pid: internalapi.NamespaceMode_CONTAINER, }, }, { @@ -557,8 +557,8 @@ func TestGenerateLinuxContainerConfigNamespaces(t *testing.T) { }, }, nil, - &runtimeapi.NamespaceOption{ - Pid: runtimeapi.NamespaceMode_POD, + &internalapi.NamespaceOption{ + Pid: internalapi.NamespaceMode_POD, }, }, { @@ -571,8 +571,8 @@ func TestGenerateLinuxContainerConfigNamespaces(t *testing.T) { }, }, &kubecontainer.ContainerID{Type: "docker", ID: "really-long-id-string"}, - &runtimeapi.NamespaceOption{ - Pid: runtimeapi.NamespaceMode_TARGET, + &internalapi.NamespaceOption{ + Pid: internalapi.NamespaceMode_TARGET, TargetId: "really-long-id-string", }, }, diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_test.go b/pkg/kubelet/kuberuntime/kuberuntime_container_test.go index 1a1ae2b65e4a..487a03919cf0 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_test.go @@ -34,8 +34,8 @@ import ( v1 "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" featuregatetesting "k8s.io/component-base/featuregate/testing" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" "k8s.io/kubernetes/pkg/kubelet/lifecycle" @@ -89,7 +89,7 @@ func TestRemoveContainer(t *testing.T) { fakeOS.Removes) // Verify container is removed assert.Contains(t, fakeRuntime.Called, "RemoveContainer") - containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: containerID}) + containers, err := fakeRuntime.ListContainers(&internalapi.ContainerFilter{Id: containerID}) assert.NoError(t, err) assert.Empty(t, containers) } @@ -134,8 +134,8 @@ func TestKillContainer(t *testing.T) { // different states. func TestToKubeContainerStatus(t *testing.T) { cid := &kubecontainer.ContainerID{Type: "testRuntime", ID: "dummyid"} - meta := &runtimeapi.ContainerMetadata{Name: "cname", Attempt: 3} - imageSpec := &runtimeapi.ImageSpec{Image: "fimage"} + meta := &internalapi.ContainerMetadata{Name: "cname", Attempt: 3} + imageSpec := &internalapi.ImageSpec{Image: "fimage"} var ( createdAt int64 = 327 startedAt int64 = 999 @@ -143,15 +143,15 @@ func TestToKubeContainerStatus(t *testing.T) { ) for desc, test := range map[string]struct { - input *runtimeapi.ContainerStatus + input *internalapi.ContainerStatus expected *kubecontainer.Status }{ "created container": { - input: &runtimeapi.ContainerStatus{ + input: &internalapi.ContainerStatus{ Id: cid.ID, Metadata: meta, Image: imageSpec, - State: runtimeapi.ContainerState_CONTAINER_CREATED, + State: internalapi.ContainerState_CONTAINER_CREATED, CreatedAt: createdAt, }, expected: &kubecontainer.Status{ @@ -162,11 +162,11 @@ func TestToKubeContainerStatus(t *testing.T) { }, }, "running container": { - input: &runtimeapi.ContainerStatus{ + input: &internalapi.ContainerStatus{ Id: cid.ID, Metadata: meta, Image: imageSpec, - State: runtimeapi.ContainerState_CONTAINER_RUNNING, + State: internalapi.ContainerState_CONTAINER_RUNNING, CreatedAt: createdAt, StartedAt: startedAt, }, @@ -179,11 +179,11 @@ func TestToKubeContainerStatus(t *testing.T) { }, }, "exited container": { - input: &runtimeapi.ContainerStatus{ + input: &internalapi.ContainerStatus{ Id: cid.ID, Metadata: meta, Image: imageSpec, - State: runtimeapi.ContainerState_CONTAINER_EXITED, + State: internalapi.ContainerState_CONTAINER_EXITED, CreatedAt: createdAt, StartedAt: startedAt, FinishedAt: finishedAt, @@ -204,11 +204,11 @@ func TestToKubeContainerStatus(t *testing.T) { }, }, "unknown container": { - input: &runtimeapi.ContainerStatus{ + input: &internalapi.ContainerStatus{ Id: cid.ID, Metadata: meta, Image: imageSpec, - State: runtimeapi.ContainerState_CONTAINER_UNKNOWN, + State: internalapi.ContainerState_CONTAINER_UNKNOWN, CreatedAt: createdAt, StartedAt: startedAt, }, diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_unsupported.go b/pkg/kubelet/kuberuntime/kuberuntime_container_unsupported.go index 730b8437666f..b047d49ebb76 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_unsupported.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_unsupported.go @@ -21,11 +21,11 @@ package kuberuntime import ( "k8s.io/api/core/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) -// applyPlatformSpecificContainerConfig applies platform specific configurations to runtimeapi.ContainerConfig. -func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID) error { +// applyPlatformSpecificContainerConfig applies platform specific configurations to internalapi.ContainerConfig. +func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *internalapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, nsTarget *kubecontainer.ContainerID) error { return nil } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_windows.go b/pkg/kubelet/kuberuntime/kuberuntime_container_windows.go index 51604dbfa7c2..efcd497d96b9 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_windows.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_windows.go @@ -25,15 +25,15 @@ import ( v1 "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/securitycontext" ) -// applyPlatformSpecificContainerConfig applies platform specific configurations to runtimeapi.ContainerConfig. -func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, _ *kubecontainer.ContainerID) error { +// applyPlatformSpecificContainerConfig applies platform specific configurations to internalapi.ContainerConfig. +func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *internalapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string, _ *kubecontainer.ContainerID) error { windowsConfig, err := m.generateWindowsContainerConfig(container, pod, uid, username) if err != nil { return err @@ -45,10 +45,10 @@ func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config // generateWindowsContainerConfig generates windows container config for kubelet runtime v1. // Refer https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/cri-windows.md. -func (m *kubeGenericRuntimeManager) generateWindowsContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string) (*runtimeapi.WindowsContainerConfig, error) { - wc := &runtimeapi.WindowsContainerConfig{ - Resources: &runtimeapi.WindowsContainerResources{}, - SecurityContext: &runtimeapi.WindowsContainerSecurityContext{}, +func (m *kubeGenericRuntimeManager) generateWindowsContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string) (*internalapi.WindowsContainerConfig, error) { + wc := &internalapi.WindowsContainerConfig{ + Resources: &internalapi.WindowsContainerResources{}, + SecurityContext: &internalapi.WindowsContainerSecurityContext{}, } cpuLimit := container.Resources.Limits.Cpu() diff --git a/pkg/kubelet/kuberuntime/kuberuntime_gc.go b/pkg/kubelet/kuberuntime/kuberuntime_gc.go index 1d522a2faa26..20e6eb1f7b9c 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_gc.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_gc.go @@ -26,9 +26,8 @@ import ( "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" - internalapi "k8s.io/cri-api/pkg/apis" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog/v2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -193,7 +192,7 @@ func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByE newestGCTime := time.Now().Add(-minAge) for _, container := range containers { // Prune out running containers. - if container.State == runtimeapi.ContainerState_CONTAINER_RUNNING { + if container.State == internalapi.ContainerState_CONTAINER_RUNNING { continue } @@ -207,7 +206,7 @@ func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByE id: container.Id, name: container.Metadata.Name, createTime: createdAt, - unknown: container.State == runtimeapi.ContainerState_CONTAINER_UNKNOWN, + unknown: container.State == internalapi.ContainerState_CONTAINER_UNKNOWN, } key := evictUnit{ uid: labeledInfo.PodUID, @@ -298,7 +297,7 @@ func (cgc *containerGC) evictSandboxes(evictNonDeletedPods bool) error { } // Set ready sandboxes to be active. - if sandbox.State == runtimeapi.PodSandboxState_SANDBOX_READY { + if sandbox.State == internalapi.PodSandboxState_SANDBOX_READY { sandboxInfo.active = true } @@ -359,7 +358,7 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error { // TODO: we should handle container not found (i.e. container was deleted) case differently // once https://github.com/kubernetes/kubernetes/issues/63336 is resolved klog.InfoS("Error getting ContainerStatus for containerID", "containerID", containerID, "err", err) - } else if status.State != runtimeapi.ContainerState_CONTAINER_EXITED { + } else if status.State != internalapi.ContainerState_CONTAINER_EXITED { // Here is how container log rotation works (see containerLogManager#rotateLatestLog): // // 1. rename current log to rotated log file whose filename contains current timestamp (fmt.Sprintf("%s.%s", log, timestamp)) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go b/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go index a88db2d442d8..c12166cb3302 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" ) @@ -36,7 +36,7 @@ func TestSandboxGC(t *testing.T) { assert.NoError(t, err) podStateProvider := m.containerGC.podStateProvider.(*fakePodStateProvider) - makeGCSandbox := func(pod *v1.Pod, attempt uint32, state runtimeapi.PodSandboxState, hasRunningContainers, isTerminating bool, createdAt int64) sandboxTemplate { + makeGCSandbox := func(pod *v1.Pod, attempt uint32, state internalapi.PodSandboxState, hasRunningContainers, isTerminating bool, createdAt int64) sandboxTemplate { return sandboxTemplate{ pod: pod, state: state, @@ -70,7 +70,7 @@ func TestSandboxGC(t *testing.T) { { description: "notready sandboxes without containers for deleted pods should be garbage collected.", sandboxes: []sandboxTemplate{ - makeGCSandbox(pods[2], 0, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, false, false, 0), + makeGCSandbox(pods[2], 0, internalapi.PodSandboxState_SANDBOX_NOTREADY, false, false, 0), }, containers: []containerTemplate{}, remain: []int{}, @@ -79,7 +79,7 @@ func TestSandboxGC(t *testing.T) { { description: "ready sandboxes without containers for deleted pods should not be garbage collected.", sandboxes: []sandboxTemplate{ - makeGCSandbox(pods[2], 0, runtimeapi.PodSandboxState_SANDBOX_READY, false, false, 0), + makeGCSandbox(pods[2], 0, internalapi.PodSandboxState_SANDBOX_READY, false, false, 0), }, containers: []containerTemplate{}, remain: []int{0}, @@ -88,8 +88,8 @@ func TestSandboxGC(t *testing.T) { { description: "sandboxes for existing pods should not be garbage collected.", sandboxes: []sandboxTemplate{ - makeGCSandbox(pods[0], 0, runtimeapi.PodSandboxState_SANDBOX_READY, true, false, 0), - makeGCSandbox(pods[1], 0, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 0), + makeGCSandbox(pods[0], 0, internalapi.PodSandboxState_SANDBOX_READY, true, false, 0), + makeGCSandbox(pods[1], 0, internalapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 0), }, containers: []containerTemplate{}, remain: []int{0, 1}, @@ -98,8 +98,8 @@ func TestSandboxGC(t *testing.T) { { description: "older exited sandboxes without containers for existing pods should be garbage collected if there are more than one exited sandboxes.", sandboxes: []sandboxTemplate{ - makeGCSandbox(pods[0], 1, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 1), - makeGCSandbox(pods[0], 0, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 0), + makeGCSandbox(pods[0], 1, internalapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 1), + makeGCSandbox(pods[0], 0, internalapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 0), }, containers: []containerTemplate{}, remain: []int{0}, @@ -108,11 +108,11 @@ func TestSandboxGC(t *testing.T) { { description: "older exited sandboxes with containers for existing pods should not be garbage collected even if there are more than one exited sandboxes.", sandboxes: []sandboxTemplate{ - makeGCSandbox(pods[0], 1, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 1), - makeGCSandbox(pods[0], 0, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 0), + makeGCSandbox(pods[0], 1, internalapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 1), + makeGCSandbox(pods[0], 0, internalapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 0), }, containers: []containerTemplate{ - {pod: pods[0], container: &pods[0].Spec.Containers[0], sandboxAttempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED}, + {pod: pods[0], container: &pods[0].Spec.Containers[0], sandboxAttempt: 0, state: internalapi.ContainerState_CONTAINER_EXITED}, }, remain: []int{0, 1}, evictTerminatingPods: false, @@ -120,8 +120,8 @@ func TestSandboxGC(t *testing.T) { { description: "non-running sandboxes for existing pods should be garbage collected if evictTerminatingPods is set.", sandboxes: []sandboxTemplate{ - makeGCSandbox(pods[0], 0, runtimeapi.PodSandboxState_SANDBOX_READY, true, true, 0), - makeGCSandbox(pods[1], 0, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, true, true, 0), + makeGCSandbox(pods[0], 0, internalapi.PodSandboxState_SANDBOX_READY, true, true, 0), + makeGCSandbox(pods[1], 0, internalapi.PodSandboxState_SANDBOX_NOTREADY, true, true, 0), }, containers: []containerTemplate{}, remain: []int{0}, @@ -130,10 +130,10 @@ func TestSandboxGC(t *testing.T) { { description: "sandbox with containers should not be garbage collected.", sandboxes: []sandboxTemplate{ - makeGCSandbox(pods[0], 0, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, false, false, 0), + makeGCSandbox(pods[0], 0, internalapi.PodSandboxState_SANDBOX_NOTREADY, false, false, 0), }, containers: []containerTemplate{ - {pod: pods[0], container: &pods[0].Spec.Containers[0], state: runtimeapi.ContainerState_CONTAINER_EXITED}, + {pod: pods[0], container: &pods[0].Spec.Containers[0], state: internalapi.ContainerState_CONTAINER_EXITED}, }, remain: []int{0}, evictTerminatingPods: false, @@ -142,18 +142,18 @@ func TestSandboxGC(t *testing.T) { description: "multiple sandboxes should be handled properly.", sandboxes: []sandboxTemplate{ // running sandbox. - makeGCSandbox(pods[0], 1, runtimeapi.PodSandboxState_SANDBOX_READY, true, false, 1), + makeGCSandbox(pods[0], 1, internalapi.PodSandboxState_SANDBOX_READY, true, false, 1), // exited sandbox without containers. - makeGCSandbox(pods[0], 0, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 0), + makeGCSandbox(pods[0], 0, internalapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 0), // exited sandbox with containers. - makeGCSandbox(pods[1], 1, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 1), + makeGCSandbox(pods[1], 1, internalapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 1), // exited sandbox without containers. - makeGCSandbox(pods[1], 0, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 0), + makeGCSandbox(pods[1], 0, internalapi.PodSandboxState_SANDBOX_NOTREADY, true, false, 0), // exited sandbox without containers for deleted pods. - makeGCSandbox(pods[2], 0, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, false, true, 0), + makeGCSandbox(pods[2], 0, internalapi.PodSandboxState_SANDBOX_NOTREADY, false, true, 0), }, containers: []containerTemplate{ - {pod: pods[1], container: &pods[1].Spec.Containers[0], sandboxAttempt: 1, state: runtimeapi.ContainerState_CONTAINER_EXITED}, + {pod: pods[1], container: &pods[1].Spec.Containers[0], sandboxAttempt: 1, state: internalapi.ContainerState_CONTAINER_EXITED}, }, remain: []int{0, 2}, evictTerminatingPods: false, @@ -189,7 +189,7 @@ func TestSandboxGC(t *testing.T) { } } -func makeGCContainer(podName, containerName string, attempt int, createdAt int64, state runtimeapi.ContainerState) containerTemplate { +func makeGCContainer(podName, containerName string, attempt int, createdAt int64, state internalapi.ContainerState) containerTemplate { container := makeTestContainer(containerName, "test-image") pod := makeTestPod(podName, "test-ns", podName, []v1.Container{container}) return containerTemplate{ @@ -219,7 +219,7 @@ func TestContainerGC(t *testing.T) { { description: "all containers should be removed when max container limit is 0", containers: []containerTemplate{ - makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), }, policy: &kubecontainer.GCPolicy{MinAge: time.Minute, MaxPerPodContainer: 1, MaxContainers: 0}, remain: []int{}, @@ -229,11 +229,11 @@ func TestContainerGC(t *testing.T) { { description: "max containers should be complied when no max per pod container limit is set", containers: []containerTemplate{ - makeGCContainer("foo", "bar", 4, 4, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 3, 3, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 4, 4, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 3, 3, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 2, 2, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), }, policy: &kubecontainer.GCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: 4}, remain: []int{0, 1, 2, 3}, @@ -243,9 +243,9 @@ func TestContainerGC(t *testing.T) { { description: "no containers should be removed if both max container and per pod container limits are not set", containers: []containerTemplate{ - makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 2, 2, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), }, policy: &kubecontainer.GCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: -1}, remain: []int{0, 1, 2}, @@ -255,9 +255,9 @@ func TestContainerGC(t *testing.T) { { description: "recently started containers should not be removed", containers: []containerTemplate{ - makeGCContainer("foo", "bar", 2, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 1, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 0, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 2, time.Now().UnixNano(), internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 1, time.Now().UnixNano(), internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 0, time.Now().UnixNano(), internalapi.ContainerState_CONTAINER_EXITED), }, remain: []int{0, 1, 2}, evictTerminatingPods: false, @@ -266,9 +266,9 @@ func TestContainerGC(t *testing.T) { { description: "oldest containers should be removed when per pod container limit exceeded", containers: []containerTemplate{ - makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 2, 2, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), }, remain: []int{0, 1}, evictTerminatingPods: false, @@ -277,9 +277,9 @@ func TestContainerGC(t *testing.T) { { description: "running containers should not be removed", containers: []containerTemplate{ - makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_RUNNING), + makeGCContainer("foo", "bar", 2, 2, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 0, 0, internalapi.ContainerState_CONTAINER_RUNNING), }, remain: []int{0, 1, 2}, evictTerminatingPods: false, @@ -288,8 +288,8 @@ func TestContainerGC(t *testing.T) { { description: "no containers should be removed when limits are not exceeded", containers: []containerTemplate{ - makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), }, remain: []int{0, 1}, evictTerminatingPods: false, @@ -298,15 +298,15 @@ func TestContainerGC(t *testing.T) { { description: "max container count should apply per (UID, container) pair", containers: []containerTemplate{ - makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo1", "baz", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo1", "baz", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo1", "baz", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo2", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo2", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo2", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 2, 2, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo1", "baz", 2, 2, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo1", "baz", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo1", "baz", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo2", "bar", 2, 2, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo2", "bar", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo2", "bar", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), }, remain: []int{0, 1, 3, 4, 6, 7}, evictTerminatingPods: false, @@ -315,16 +315,16 @@ func TestContainerGC(t *testing.T) { { description: "max limit should apply and try to keep from every pod", containers: []containerTemplate{ - makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo1", "bar1", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo1", "bar1", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo2", "bar2", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo2", "bar2", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo3", "bar3", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo3", "bar3", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo4", "bar4", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo4", "bar4", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo1", "bar1", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo1", "bar1", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo2", "bar2", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo2", "bar2", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo3", "bar3", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo3", "bar3", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo4", "bar4", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo4", "bar4", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), }, remain: []int{0, 2, 4, 6, 8}, evictTerminatingPods: false, @@ -333,16 +333,16 @@ func TestContainerGC(t *testing.T) { { description: "oldest pods should be removed if limit exceeded", containers: []containerTemplate{ - makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo1", "bar1", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo1", "bar1", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo2", "bar2", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo3", "bar3", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo4", "bar4", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo5", "bar5", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo6", "bar6", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo7", "bar7", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 2, 2, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo1", "bar1", 2, 2, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo1", "bar1", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo2", "bar2", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo3", "bar3", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo4", "bar4", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo5", "bar5", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo6", "bar6", 2, 2, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo7", "bar7", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), }, remain: []int{0, 2, 4, 6, 8, 9}, evictTerminatingPods: false, @@ -351,12 +351,12 @@ func TestContainerGC(t *testing.T) { { description: "all non-running containers should be removed when evictTerminatingPods is set", containers: []containerTemplate{ - makeGCContainer("foo", "bar", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo1", "bar1", 2, 2, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo1", "bar1", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("running", "bar2", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo3", "bar3", 0, 0, runtimeapi.ContainerState_CONTAINER_RUNNING), + makeGCContainer("foo", "bar", 2, 2, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo1", "bar1", 2, 2, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo1", "bar1", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("running", "bar2", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo3", "bar3", 0, 0, internalapi.ContainerState_CONTAINER_RUNNING), }, remain: []int{4, 5}, evictTerminatingPods: true, @@ -365,12 +365,12 @@ func TestContainerGC(t *testing.T) { { description: "containers for deleted pods should be removed", containers: []containerTemplate{ - makeGCContainer("foo", "bar", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("foo", "bar", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), // deleted pods still respect MinAge. - makeGCContainer("deleted", "bar1", 2, time.Now().UnixNano(), runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("deleted", "bar1", 1, 1, runtimeapi.ContainerState_CONTAINER_EXITED), - makeGCContainer("deleted", "bar1", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("deleted", "bar1", 2, time.Now().UnixNano(), internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("deleted", "bar1", 1, 1, internalapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("deleted", "bar1", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), }, remain: []int{0, 1, 2}, evictTerminatingPods: false, @@ -379,7 +379,7 @@ func TestContainerGC(t *testing.T) { { description: "containers for deleted pods may not be removed if allSourcesReady is set false ", containers: []containerTemplate{ - makeGCContainer("deleted", "bar1", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED), + makeGCContainer("deleted", "bar1", 0, 0, internalapi.ContainerState_CONTAINER_EXITED), }, remain: []int{0}, evictTerminatingPods: true, @@ -468,7 +468,7 @@ func TestUnknownStateContainerGC(t *testing.T) { defaultGCPolicy := kubecontainer.GCPolicy{MinAge: time.Hour, MaxPerPodContainer: 0, MaxContainers: 0} fakeContainers := makeFakeContainers(t, m, []containerTemplate{ - makeGCContainer("foo", "bar", 0, 0, runtimeapi.ContainerState_CONTAINER_UNKNOWN), + makeGCContainer("foo", "bar", 0, 0, internalapi.ContainerState_CONTAINER_UNKNOWN), }) fakeRuntime.SetFakeContainers(fakeContainers) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_image.go b/pkg/kubelet/kuberuntime/kuberuntime_image.go index 1255335bd357..66136a590e79 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_image.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_image.go @@ -19,16 +19,16 @@ package kuberuntime import ( v1 "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog/v2" credentialprovidersecrets "k8s.io/kubernetes/pkg/credentialprovider/secrets" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/util/parsers" ) // PullImage pulls an image from the network to local storage using the supplied // secrets if necessary. -func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *internalapi.PodSandboxConfig) (string, error) { img := image.Image repoToPull, _, _, err := parsers.ParseImageName(img) if err != nil { @@ -57,7 +57,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul var pullErrs []error for _, currentCreds := range creds { - auth := &runtimeapi.AuthConfig{ + auth := &internalapi.AuthConfig{ Username: currentCreds.Username, Password: currentCreds.Password, Auth: currentCreds.Auth, @@ -117,7 +117,7 @@ func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error) // RemoveImage removes the specified image. func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) error { - err := m.imageService.RemoveImage(&runtimeapi.ImageSpec{Image: image.Image}) + err := m.imageService.RemoveImage(&internalapi.ImageSpec{Image: image.Image}) if err != nil { klog.ErrorS(err, "Failed to remove image", "image", image.Image) return err diff --git a/pkg/kubelet/kuberuntime/kuberuntime_image_test.go b/pkg/kubelet/kuberuntime/kuberuntime_image_test.go index a67f3ba8b854..d714bd74bffb 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_image_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_image_test.go @@ -26,8 +26,8 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/kubernetes/pkg/credentialprovider" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -205,7 +205,7 @@ func TestPullWithSecrets(t *testing.T) { imageName string passedSecrets []v1.Secret builtInDockerConfig credentialprovider.DockerConfig - expectedAuth *runtimeapi.AuthConfig + expectedAuth *internalapi.AuthConfig }{ "no matching secrets": { "ubuntu", @@ -219,7 +219,7 @@ func TestPullWithSecrets(t *testing.T) { credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{ "index.docker.io/v1/": {Username: "built-in", Password: "password", Provider: nil}, }), - &runtimeapi.AuthConfig{Username: "built-in", Password: "password"}, + &internalapi.AuthConfig{Username: "built-in", Password: "password"}, }, "default keyring secrets unused": { "ubuntu", @@ -235,7 +235,7 @@ func TestPullWithSecrets(t *testing.T) { credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{ "index.docker.io/v1/": {Username: "built-in", Password: "password", Provider: nil}, }), - &runtimeapi.AuthConfig{Username: "passed-user", Password: "passed-password"}, + &internalapi.AuthConfig{Username: "passed-user", Password: "passed-password"}, }, "builtin keyring secrets, but use passed with new docker config": { "ubuntu", @@ -243,7 +243,7 @@ func TestPullWithSecrets(t *testing.T) { credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{ "index.docker.io/v1/": {Username: "built-in", Password: "password", Provider: nil}, }), - &runtimeapi.AuthConfig{Username: "passed-user", Password: "passed-password"}, + &internalapi.AuthConfig{Username: "passed-user", Password: "passed-password"}, }, } for description, test := range tests { @@ -254,7 +254,7 @@ func TestPullWithSecrets(t *testing.T) { _, err = fakeManager.PullImage(kubecontainer.ImageSpec{Image: test.imageName}, test.passedSecrets, nil) require.NoError(t, err) - fakeImageService.AssertImagePulledWithAuth(t, &runtimeapi.ImageSpec{Image: test.imageName, Annotations: make(map[string]string)}, test.expectedAuth, description) + fakeImageService.AssertImagePulledWithAuth(t, &internalapi.ImageSpec{Image: test.imageName, Annotations: make(map[string]string)}, test.expectedAuth, description) } } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 70ebdfaba322..777f88b7ae46 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -37,12 +37,11 @@ import ( ref "k8s.io/client-go/tools/reference" "k8s.io/client-go/util/flowcontrol" "k8s.io/component-base/logs/logreduction" - internalapi "k8s.io/cri-api/pkg/apis" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/credentialprovider" "k8s.io/kubernetes/pkg/credentialprovider/plugin" "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" @@ -314,7 +313,7 @@ func newRuntimeVersion(version string) (*utilversion.Version, error) { return utilversion.ParseGeneric(version) } -func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionResponse, error) { +func (m *kubeGenericRuntimeManager) getTypedVersion() (*internalapi.VersionResponse, error) { typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion) if err != nil { return nil, fmt.Errorf("get remote runtime typed version failed: %v", err) @@ -340,7 +339,7 @@ func (m *kubeGenericRuntimeManager) APIVersion() (kubecontainer.Version, error) if err != nil { return nil, err } - typedVersion := versionObject.(*runtimeapi.VersionResponse) + typedVersion := versionObject.(*internalapi.VersionResponse) return newRuntimeVersion(typedVersion.RuntimeApiVersion) } @@ -487,7 +486,7 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *ku readySandboxCount := 0 for _, s := range podStatus.SandboxStatuses { - if s.State == runtimeapi.PodSandboxState_SANDBOX_READY { + if s.State == internalapi.PodSandboxState_SANDBOX_READY { readySandboxCount++ } } @@ -499,7 +498,7 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *ku return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id } - if sandboxStatus.State != runtimeapi.PodSandboxState_SANDBOX_READY { + if sandboxStatus.State != internalapi.PodSandboxState_SANDBOX_READY { klog.V(2).InfoS("No ready sandbox for pod can be found. Need to start a new one", "pod", klog.KObj(pod)) return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id } @@ -1030,7 +1029,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp klog.V(4).InfoS("getSandboxIDByPodUID got sandbox IDs for pod", "podSandboxID", podSandboxIDs, "pod", klog.KObj(pod)) - sandboxStatuses := make([]*runtimeapi.PodSandboxStatus, len(podSandboxIDs)) + sandboxStatuses := make([]*internalapi.PodSandboxStatus, len(podSandboxIDs)) podIPs := []string{} for idx, podSandboxID := range podSandboxIDs { podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID) @@ -1041,7 +1040,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp sandboxStatuses[idx] = podSandboxStatus // Only get pod IP from latest sandbox - if idx == 0 && podSandboxStatus.State == runtimeapi.PodSandboxState_SANDBOX_READY { + if idx == 0 && podSandboxStatus.State == internalapi.PodSandboxState_SANDBOX_READY { podIPs = m.determinePodSandboxIPs(namespace, name, podSandboxStatus) } } @@ -1078,8 +1077,8 @@ func (m *kubeGenericRuntimeManager) UpdatePodCIDR(podCIDR string) error { // field of the config? klog.InfoS("Updating runtime config through cri with podcidr", "CIDR", podCIDR) return m.runtimeService.UpdateRuntimeConfig( - &runtimeapi.RuntimeConfig{ - NetworkConfig: &runtimeapi.NetworkConfig{ + &internalapi.RuntimeConfig{ + NetworkConfig: &internalapi.NetworkConfig{ PodCidr: podCIDR, }, }) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go index cbeea102cda0..8ddda747e0b2 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go @@ -35,11 +35,11 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/util/flowcontrol" featuregatetesting "k8s.io/component-base/featuregate/testing" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - apitest "k8s.io/cri-api/pkg/apis/testing" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/credentialprovider" "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" + apitest "k8s.io/kubernetes/pkg/kubelet/apis/cri/testing" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" @@ -73,7 +73,7 @@ type sandboxTemplate struct { pod *v1.Pod attempt uint32 createdAt int64 - state runtimeapi.PodSandboxState + state internalapi.PodSandboxState running bool terminating bool } @@ -85,7 +85,7 @@ type containerTemplate struct { sandboxAttempt uint32 attempt int createdAt int64 - state runtimeapi.ContainerState + state internalapi.ContainerState } // makeAndSetFakePod is a helper function to create and set one fake sandbox for a pod and @@ -95,7 +95,7 @@ func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime * sandbox := makeFakePodSandbox(t, m, sandboxTemplate{ pod: pod, createdAt: fakeCreatedAt, - state: runtimeapi.PodSandboxState_SANDBOX_READY, + state: internalapi.PodSandboxState_SANDBOX_READY, }) var containers []*apitest.FakeContainer @@ -104,7 +104,7 @@ func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime * pod: pod, container: c, createdAt: fakeCreatedAt, - state: runtimeapi.ContainerState_CONTAINER_RUNNING, + state: internalapi.ContainerState_CONTAINER_RUNNING, } } podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(c *v1.Container, containerType podutil.ContainerType) bool { @@ -124,12 +124,12 @@ func makeFakePodSandbox(t *testing.T, m *kubeGenericRuntimeManager, template san podSandboxID := apitest.BuildSandboxName(config.Metadata) podSandBoxStatus := &apitest.FakePodSandbox{ - PodSandboxStatus: runtimeapi.PodSandboxStatus{ + PodSandboxStatus: internalapi.PodSandboxStatus{ Id: podSandboxID, Metadata: config.Metadata, State: template.state, CreatedAt: template.createdAt, - Network: &runtimeapi.PodSandboxNetworkStatus{ + Network: &internalapi.PodSandboxNetworkStatus{ Ip: apitest.FakePodSandboxIPs[0], }, Labels: config.Labels, @@ -137,9 +137,9 @@ func makeFakePodSandbox(t *testing.T, m *kubeGenericRuntimeManager, template san } // assign additional IPs additionalIPs := apitest.FakePodSandboxIPs[1:] - additionalPodIPs := make([]*runtimeapi.PodIP, 0, len(additionalIPs)) + additionalPodIPs := make([]*internalapi.PodIP, 0, len(additionalIPs)) for _, ip := range additionalIPs { - additionalPodIPs = append(additionalPodIPs, &runtimeapi.PodIP{ + additionalPodIPs = append(additionalPodIPs, &internalapi.PodIP{ Ip: ip, }) } @@ -170,7 +170,7 @@ func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template cont containerID := apitest.BuildContainerName(containerConfig.Metadata, podSandboxID) imageRef := containerConfig.Image.Image return &apitest.FakeContainer{ - ContainerStatus: runtimeapi.ContainerStatus{ + ContainerStatus: internalapi.ContainerStatus{ Id: containerID, Metadata: containerConfig.Metadata, Image: containerConfig.Image, @@ -250,7 +250,7 @@ func verifyFakeContainerList(fakeRuntime *apitest.FakeRuntimeService, expected s type cRecord struct { name string attempt uint32 - state runtimeapi.ContainerState + state internalapi.ContainerState } type cRecordList []*cRecord @@ -365,7 +365,7 @@ func TestGetPods(t *testing.T) { containers := make([]*kubecontainer.Container, len(fakeContainers)) for i := range containers { fakeContainer := fakeContainers[i] - c, err := m.toKubeContainer(&runtimeapi.Container{ + c, err := m.toKubeContainer(&internalapi.Container{ Id: fakeContainer.Id, Metadata: fakeContainer.Metadata, State: fakeContainer.State, @@ -380,7 +380,7 @@ func TestGetPods(t *testing.T) { containers[i] = c } // Convert fakeSandbox to kubecontainer.Container - sandbox, err := m.sandboxToKubeContainer(&runtimeapi.PodSandbox{ + sandbox, err := m.sandboxToKubeContainer(&internalapi.PodSandbox{ Id: fakeSandbox.Id, Metadata: fakeSandbox.Metadata, State: fakeSandbox.State, @@ -452,7 +452,7 @@ func TestKillPod(t *testing.T) { containers := make([]*kubecontainer.Container, len(fakeContainers)) for i := range containers { fakeContainer := fakeContainers[i] - c, err := m.toKubeContainer(&runtimeapi.Container{ + c, err := m.toKubeContainer(&internalapi.Container{ Id: fakeContainer.Id, Metadata: fakeContainer.Metadata, State: fakeContainer.State, @@ -485,10 +485,10 @@ func TestKillPod(t *testing.T) { assert.Equal(t, 3, len(fakeRuntime.Containers)) assert.Equal(t, 1, len(fakeRuntime.Sandboxes)) for _, sandbox := range fakeRuntime.Sandboxes { - assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.State) + assert.Equal(t, internalapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.State) } for _, c := range fakeRuntime.Containers { - assert.Equal(t, runtimeapi.ContainerState_CONTAINER_EXITED, c.State) + assert.Equal(t, internalapi.ContainerState_CONTAINER_EXITED, c.State) } } @@ -526,10 +526,10 @@ func TestSyncPod(t *testing.T) { assert.Equal(t, 2, len(fakeImage.Images)) assert.Equal(t, 1, len(fakeRuntime.Sandboxes)) for _, sandbox := range fakeRuntime.Sandboxes { - assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.State) + assert.Equal(t, internalapi.PodSandboxState_SANDBOX_READY, sandbox.State) } for _, c := range fakeRuntime.Containers { - assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.State) + assert.Equal(t, internalapi.ContainerState_CONTAINER_RUNNING, c.State) } } @@ -551,12 +551,12 @@ func TestPruneInitContainers(t *testing.T) { } templates := []containerTemplate{ - {pod: pod, container: &init1, attempt: 3, createdAt: 3, state: runtimeapi.ContainerState_CONTAINER_EXITED}, - {pod: pod, container: &init1, attempt: 2, createdAt: 2, state: runtimeapi.ContainerState_CONTAINER_EXITED}, - {pod: pod, container: &init2, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_EXITED}, - {pod: pod, container: &init1, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_UNKNOWN}, - {pod: pod, container: &init2, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED}, - {pod: pod, container: &init1, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED}, + {pod: pod, container: &init1, attempt: 3, createdAt: 3, state: internalapi.ContainerState_CONTAINER_EXITED}, + {pod: pod, container: &init1, attempt: 2, createdAt: 2, state: internalapi.ContainerState_CONTAINER_EXITED}, + {pod: pod, container: &init2, attempt: 1, createdAt: 1, state: internalapi.ContainerState_CONTAINER_EXITED}, + {pod: pod, container: &init1, attempt: 1, createdAt: 1, state: internalapi.ContainerState_CONTAINER_UNKNOWN}, + {pod: pod, container: &init2, attempt: 0, createdAt: 0, state: internalapi.ContainerState_CONTAINER_EXITED}, + {pod: pod, container: &init1, attempt: 0, createdAt: 0, state: internalapi.ContainerState_CONTAINER_EXITED}, } fakes := makeFakeContainers(t, m, templates) fakeRuntime.SetFakeContainers(fakes) @@ -613,7 +613,7 @@ func TestSyncPodWithInitContainers(t *testing.T) { result := m.SyncPod(pod, podStatus, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) expected := []*cRecord{ - {name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING}, + {name: initContainers[0].Name, attempt: 0, state: internalapi.ContainerState_CONTAINER_RUNNING}, } verifyContainerStatuses(t, fakeRuntime, expected, "start only the init container") @@ -638,9 +638,9 @@ func TestSyncPodWithInitContainers(t *testing.T) { result = m.SyncPod(pod, podStatus, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) expected = []*cRecord{ - {name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED}, - {name: containers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING}, - {name: containers[1].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING}, + {name: initContainers[0].Name, attempt: 0, state: internalapi.ContainerState_CONTAINER_EXITED}, + {name: containers[0].Name, attempt: 0, state: internalapi.ContainerState_CONTAINER_RUNNING}, + {name: containers[1].Name, attempt: 0, state: internalapi.ContainerState_CONTAINER_RUNNING}, } verifyContainerStatuses(t, fakeRuntime, expected, "init container completed; all app containers should be running") @@ -655,10 +655,10 @@ func TestSyncPodWithInitContainers(t *testing.T) { expected = []*cRecord{ // The first init container instance is purged and no longer visible. // The second (attempt == 1) instance has been started and is running. - {name: initContainers[0].Name, attempt: 1, state: runtimeapi.ContainerState_CONTAINER_RUNNING}, + {name: initContainers[0].Name, attempt: 1, state: internalapi.ContainerState_CONTAINER_RUNNING}, // All containers are killed. - {name: containers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED}, - {name: containers[1].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED}, + {name: containers[0].Name, attempt: 0, state: internalapi.ContainerState_CONTAINER_EXITED}, + {name: containers[1].Name, attempt: 0, state: internalapi.ContainerState_CONTAINER_EXITED}, } verifyContainerStatuses(t, fakeRuntime, expected, "kill all app containers, purge the existing init container, and restart a new one") } @@ -693,12 +693,12 @@ func makeBasePodAndStatus() (*v1.Pod, *kubecontainer.PodStatus) { ID: pod.UID, Name: pod.Name, Namespace: pod.Namespace, - SandboxStatuses: []*runtimeapi.PodSandboxStatus{ + SandboxStatuses: []*internalapi.PodSandboxStatus{ { Id: "sandboxID", - State: runtimeapi.PodSandboxState_SANDBOX_READY, - Metadata: &runtimeapi.PodSandboxMetadata{Name: pod.Name, Namespace: pod.Namespace, Uid: "sandboxuid", Attempt: uint32(0)}, - Network: &runtimeapi.PodSandboxNetworkStatus{Ip: "10.0.0.1"}, + State: internalapi.PodSandboxState_SANDBOX_READY, + Metadata: &internalapi.PodSandboxMetadata{Name: pod.Name, Namespace: pod.Namespace, Uid: "sandboxuid", Attempt: uint32(0)}, + Network: &internalapi.PodSandboxNetworkStatus{Ip: "10.0.0.1"}, }, }, ContainerStatuses: []*kubecontainer.Status{ @@ -747,7 +747,7 @@ func TestComputePodActions(t *testing.T) { "start pod sandbox and all containers for a new pod": { mutateStatusFn: func(status *kubecontainer.PodStatus) { // No container or sandbox exists. - status.SandboxStatuses = []*runtimeapi.PodSandboxStatus{} + status.SandboxStatuses = []*internalapi.PodSandboxStatus{} status.ContainerStatuses = []*kubecontainer.Status{} }, actions: podActions{ @@ -806,7 +806,7 @@ func TestComputePodActions(t *testing.T) { "Kill pod and recreate everything if the pod sandbox is dead, and RestartPolicy == Always": { mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, mutateStatusFn: func(status *kubecontainer.PodStatus) { - status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY + status.SandboxStatuses[0].State = internalapi.PodSandboxState_SANDBOX_NOTREADY }, actions: podActions{ KillPod: true, @@ -820,7 +820,7 @@ func TestComputePodActions(t *testing.T) { "Kill pod and recreate all containers (except for the succeeded one) if the pod sandbox is dead, and RestartPolicy == OnFailure": { mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure }, mutateStatusFn: func(status *kubecontainer.PodStatus) { - status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY + status.SandboxStatuses[0].State = internalapi.PodSandboxState_SANDBOX_NOTREADY status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited status.ContainerStatuses[1].ExitCode = 0 }, @@ -897,7 +897,7 @@ func TestComputePodActions(t *testing.T) { }, mutateStatusFn: func(status *kubecontainer.PodStatus) { // no ready sandbox - status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY + status.SandboxStatuses[0].State = internalapi.PodSandboxState_SANDBOX_NOTREADY status.SandboxStatuses[0].Metadata.Attempt = uint32(1) // all containers exited for i := range status.ContainerStatuses { @@ -920,7 +920,7 @@ func TestComputePodActions(t *testing.T) { }, mutateStatusFn: func(status *kubecontainer.PodStatus) { // no ready sandbox - status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY + status.SandboxStatuses[0].State = internalapi.PodSandboxState_SANDBOX_NOTREADY status.SandboxStatuses[0].Metadata.Attempt = uint32(1) // all containers succeeded for i := range status.ContainerStatuses { @@ -943,7 +943,7 @@ func TestComputePodActions(t *testing.T) { }, mutateStatusFn: func(status *kubecontainer.PodStatus) { // no ready sandbox - status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY + status.SandboxStatuses[0].State = internalapi.PodSandboxState_SANDBOX_NOTREADY status.SandboxStatuses[0].Metadata.Attempt = uint32(2) // no visible containers status.ContainerStatuses = []*kubecontainer.Status{} @@ -1053,7 +1053,7 @@ func TestComputePodActionsWithInitContainers(t *testing.T) { "Kill pod and restart the first init container if the pod sandbox is dead": { mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, mutateStatusFn: func(status *kubecontainer.PodStatus) { - status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY + status.SandboxStatuses[0].State = internalapi.PodSandboxState_SANDBOX_NOTREADY }, actions: podActions{ KillPod: true, @@ -1140,7 +1140,7 @@ func TestComputePodActionsWithInitContainers(t *testing.T) { "Pod sandbox not ready, init container failed, but RestartPolicy == Never; kill pod only": { mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, mutateStatusFn: func(status *kubecontainer.PodStatus) { - status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY + status.SandboxStatuses[0].State = internalapi.PodSandboxState_SANDBOX_NOTREADY }, actions: podActions{ KillPod: true, @@ -1154,7 +1154,7 @@ func TestComputePodActionsWithInitContainers(t *testing.T) { "Pod sandbox not ready, and RestartPolicy == Never, but no visible init containers; create a new pod sandbox": { mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, mutateStatusFn: func(status *kubecontainer.PodStatus) { - status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY + status.SandboxStatuses[0].State = internalapi.PodSandboxState_SANDBOX_NOTREADY status.ContainerStatuses = []*kubecontainer.Status{} }, actions: podActions{ @@ -1170,7 +1170,7 @@ func TestComputePodActionsWithInitContainers(t *testing.T) { "Pod sandbox not ready, init container failed, and RestartPolicy == OnFailure; create a new pod sandbox": { mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure }, mutateStatusFn: func(status *kubecontainer.PodStatus) { - status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY + status.SandboxStatuses[0].State = internalapi.PodSandboxState_SANDBOX_NOTREADY status.ContainerStatuses[2].ExitCode = 137 }, actions: podActions{ @@ -1311,7 +1311,7 @@ func TestComputePodActionsWithInitAndEphemeralContainers(t *testing.T) { "Create a new pod sandbox if the pod sandbox is dead, init container failed and RestartPolicy == OnFailure": { mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure }, mutateStatusFn: func(status *kubecontainer.PodStatus) { - status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY + status.SandboxStatuses[0].State = internalapi.PodSandboxState_SANDBOX_NOTREADY status.ContainerStatuses = status.ContainerStatuses[3:] status.ContainerStatuses[0].ExitCode = 137 }, @@ -1328,7 +1328,7 @@ func TestComputePodActionsWithInitAndEphemeralContainers(t *testing.T) { "Kill pod and do not restart ephemeral container if the pod sandbox is dead": { mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, mutateStatusFn: func(status *kubecontainer.PodStatus) { - status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY + status.SandboxStatuses[0].State = internalapi.PodSandboxState_SANDBOX_NOTREADY }, actions: podActions{ KillPod: true, diff --git a/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go b/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go index dbdeb9450dd5..841b3087f5bc 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go @@ -25,9 +25,9 @@ import ( v1 "k8s.io/api/core/v1" kubetypes "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util" @@ -75,12 +75,12 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32 } // generatePodSandboxConfig generates pod sandbox config from v1.Pod. -func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeapi.PodSandboxConfig, error) { +func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*internalapi.PodSandboxConfig, error) { // TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup // Refer https://github.com/kubernetes/kubernetes/issues/29871 podUID := string(pod.UID) - podSandboxConfig := &runtimeapi.PodSandboxConfig{ - Metadata: &runtimeapi.PodSandboxMetadata{ + podSandboxConfig := &internalapi.PodSandboxConfig{ + Metadata: &internalapi.PodSandboxMetadata{ Name: pod.Name, Namespace: pod.Namespace, Uid: podUID, @@ -112,7 +112,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp logDir := BuildPodLogsDirectory(pod.Namespace, pod.Name, pod.UID) podSandboxConfig.LogDirectory = logDir - portMappings := []*runtimeapi.PortMapping{} + portMappings := []*internalapi.PortMapping{} for _, c := range pod.Spec.Containers { containerPortMappings := kubecontainer.MakePortMappings(&c) @@ -121,7 +121,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp hostPort := int32(port.HostPort) containerPort := int32(port.ContainerPort) protocol := toRuntimeProtocol(port.Protocol) - portMappings = append(portMappings, &runtimeapi.PortMapping{ + portMappings = append(portMappings, &internalapi.PortMapping{ HostIp: port.HostIP, HostPort: hostPort, ContainerPort: containerPort, @@ -159,11 +159,11 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp // We've to call PodSandboxLinuxConfig always irrespective of the underlying OS as securityContext is not part of // podSandboxConfig. It is currently part of LinuxPodSandboxConfig. In future, if we have securityContext pulled out // in podSandboxConfig we should be able to use it. -func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (*runtimeapi.LinuxPodSandboxConfig, error) { +func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (*internalapi.LinuxPodSandboxConfig, error) { cgroupParent := m.runtimeHelper.GetPodCgroupParent(pod) - lc := &runtimeapi.LinuxPodSandboxConfig{ + lc := &internalapi.LinuxPodSandboxConfig{ CgroupParent: cgroupParent, - SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{ + SecurityContext: &internalapi.LinuxSandboxSecurityContext{ Privileged: kubecontainer.HasPrivilegedContainer(pod), // TODO: Deprecated, remove after we switch to Seccomp field @@ -171,8 +171,8 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) ( // use least privileged seccomp profiles at pod level. Issue #84623 SeccompProfilePath: v1.SeccompProfileRuntimeDefault, - Seccomp: &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_RuntimeDefault, + Seccomp: &internalapi.SecurityProfile{ + ProfileType: internalapi.SecurityProfile_RuntimeDefault, }, }, } @@ -189,10 +189,10 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) ( if pod.Spec.SecurityContext != nil { sc := pod.Spec.SecurityContext if sc.RunAsUser != nil && runtime.GOOS != "windows" { - lc.SecurityContext.RunAsUser = &runtimeapi.Int64Value{Value: int64(*sc.RunAsUser)} + lc.SecurityContext.RunAsUser = &internalapi.Int64Value{Value: int64(*sc.RunAsUser)} } if sc.RunAsGroup != nil && runtime.GOOS != "windows" { - lc.SecurityContext.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*sc.RunAsGroup)} + lc.SecurityContext.RunAsGroup = &internalapi.Int64Value{Value: int64(*sc.RunAsGroup)} } lc.SecurityContext.NamespaceOptions = namespacesForPod(pod) @@ -208,7 +208,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) ( } } if sc.SELinuxOptions != nil && runtime.GOOS != "windows" { - lc.SecurityContext.SelinuxOptions = &runtimeapi.SELinuxOption{ + lc.SecurityContext.SelinuxOptions = &internalapi.SELinuxOption{ User: sc.SELinuxOptions.User, Role: sc.SELinuxOptions.Role, Type: sc.SELinuxOptions.Type, @@ -223,9 +223,9 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) ( // generatePodSandboxWindowsConfig generates WindowsPodSandboxConfig from v1.Pod. // On Windows this will get called in addition to LinuxPodSandboxConfig because not all relevant fields have been added to // WindowsPodSandboxConfig at this time. -func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod) (*runtimeapi.WindowsPodSandboxConfig, error) { - wc := &runtimeapi.WindowsPodSandboxConfig{ - SecurityContext: &runtimeapi.WindowsSandboxSecurityContext{}, +func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod) (*internalapi.WindowsPodSandboxConfig, error) { + wc := &internalapi.WindowsPodSandboxConfig{ + SecurityContext: &internalapi.WindowsSandboxSecurityContext{}, } sc := pod.Spec.SecurityContext @@ -269,12 +269,12 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod) } // getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet. -func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi.PodSandbox, error) { - var filter *runtimeapi.PodSandboxFilter +func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*internalapi.PodSandbox, error) { + var filter *internalapi.PodSandboxFilter if !all { - readyState := runtimeapi.PodSandboxState_SANDBOX_READY - filter = &runtimeapi.PodSandboxFilter{ - State: &runtimeapi.PodSandboxStateValue{ + readyState := internalapi.PodSandboxState_SANDBOX_READY + filter = &internalapi.PodSandboxFilter{ + State: &internalapi.PodSandboxStateValue{ State: readyState, }, } @@ -290,7 +290,7 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi } // determinePodSandboxIP determines the IP addresses of the given pod sandbox. -func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName string, podSandbox *runtimeapi.PodSandboxStatus) []string { +func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName string, podSandbox *internalapi.PodSandboxStatus) []string { podIPs := make([]string, 0) if podSandbox.Network == nil { klog.InfoS("Pod Sandbox status doesn't have network information, cannot report IPs", "pod", klog.KRef(podNamespace, podName)) @@ -323,12 +323,12 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName // getPodSandboxID gets the sandbox id by podUID and returns ([]sandboxID, error). // Param state could be nil in order to get all sandboxes belonging to same pod. -func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) { - filter := &runtimeapi.PodSandboxFilter{ +func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, state *internalapi.PodSandboxState) ([]string, error) { + filter := &internalapi.PodSandboxFilter{ LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)}, } if state != nil { - filter.State = &runtimeapi.PodSandboxStateValue{ + filter.State = &internalapi.PodSandboxStateValue{ State: *state, } } @@ -361,7 +361,7 @@ func (m *kubeGenericRuntimeManager) GetPortForward(podName, podNamespace string, if len(sandboxIDs) == 0 { return nil, fmt.Errorf("failed to find sandboxID for pod %s", format.PodDesc(podName, podNamespace, podUID)) } - req := &runtimeapi.PortForwardRequest{ + req := &internalapi.PortForwardRequest{ PodSandboxId: sandboxIDs[0], Port: ports, } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux.go b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux.go index 77dc010acd33..d2705840c01e 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux.go @@ -22,13 +22,13 @@ package kuberuntime import ( v1 "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource" "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" ) -func (m *kubeGenericRuntimeManager) convertOverheadToLinuxResources(pod *v1.Pod) *runtimeapi.LinuxContainerResources { - resources := &runtimeapi.LinuxContainerResources{} +func (m *kubeGenericRuntimeManager) convertOverheadToLinuxResources(pod *v1.Pod) *internalapi.LinuxContainerResources { + resources := &internalapi.LinuxContainerResources{} if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) { cpu := pod.Spec.Overhead.Cpu() memory := pod.Spec.Overhead.Memory() @@ -41,12 +41,12 @@ func (m *kubeGenericRuntimeManager) convertOverheadToLinuxResources(pod *v1.Pod) return resources } -func (m *kubeGenericRuntimeManager) calculateSandboxResources(pod *v1.Pod) *runtimeapi.LinuxContainerResources { +func (m *kubeGenericRuntimeManager) calculateSandboxResources(pod *v1.Pod) *internalapi.LinuxContainerResources { req, lim := resourcehelper.PodRequestsAndLimitsWithoutOverhead(pod) return m.calculateLinuxResources(req.Cpu(), lim.Cpu(), lim.Memory()) } -func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error { +func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *internalapi.PodSandboxConfig) error { if config.Linux == nil { return nil diff --git a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux_test.go b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux_test.go index 19e4b0efd567..4d578fecb456 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux_test.go @@ -27,15 +27,15 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" ) func TestApplySandboxResources(t *testing.T) { _, _, m, err := createTestRuntimeManager() m.cpuCFSQuota = true - config := &runtimeapi.PodSandboxConfig{ - Linux: &runtimeapi.LinuxPodSandboxConfig{}, + config := &internalapi.PodSandboxConfig{ + Linux: &internalapi.LinuxPodSandboxConfig{}, } require.NoError(t, err) @@ -43,8 +43,8 @@ func TestApplySandboxResources(t *testing.T) { tests := []struct { description string pod *v1.Pod - expectedResource *runtimeapi.LinuxContainerResources - expectedOverhead *runtimeapi.LinuxContainerResources + expectedResource *internalapi.LinuxContainerResources + expectedOverhead *internalapi.LinuxContainerResources }{ { description: "pod with overhead defined", @@ -75,13 +75,13 @@ func TestApplySandboxResources(t *testing.T) { }, }, }, - expectedResource: &runtimeapi.LinuxContainerResources{ + expectedResource: &internalapi.LinuxContainerResources{ MemoryLimitInBytes: 268435456, CpuPeriod: 100000, CpuQuota: 400000, CpuShares: 2048, }, - expectedOverhead: &runtimeapi.LinuxContainerResources{ + expectedOverhead: &internalapi.LinuxContainerResources{ MemoryLimitInBytes: 134217728, CpuPeriod: 100000, CpuQuota: 100000, @@ -111,13 +111,13 @@ func TestApplySandboxResources(t *testing.T) { }, }, }, - expectedResource: &runtimeapi.LinuxContainerResources{ + expectedResource: &internalapi.LinuxContainerResources{ MemoryLimitInBytes: 268435456, CpuPeriod: 100000, CpuQuota: 0, CpuShares: 2, }, - expectedOverhead: &runtimeapi.LinuxContainerResources{}, + expectedOverhead: &internalapi.LinuxContainerResources{}, }, } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go index c7441d55b99c..67c72aef310d 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go @@ -28,8 +28,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" featuregatetesting "k8s.io/component-base/featuregate/testing" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" "k8s.io/kubernetes/pkg/kubelet/runtimeclass" rctest "k8s.io/kubernetes/pkg/kubelet/runtimeclass/testing" @@ -52,7 +52,7 @@ func TestCreatePodSandbox(t *testing.T) { id, _, err := m.createPodSandbox(pod, 1) assert.NoError(t, err) assert.Contains(t, fakeRuntime.Called, "RunPodSandbox") - sandboxes, err := fakeRuntime.ListPodSandbox(&runtimeapi.PodSandboxFilter{Id: id}) + sandboxes, err := fakeRuntime.ListPodSandbox(&internalapi.PodSandboxFilter{Id: id}) assert.NoError(t, err) assert.Equal(t, len(sandboxes), 1) // TODO Check pod sandbox configuration @@ -191,7 +191,7 @@ func TestGeneratePodSandboxWindowsConfig(t *testing.T) { name string hostProcessFeatureEnabled bool podSpec *v1.PodSpec - expectedWindowsConfig *runtimeapi.WindowsPodSandboxConfig + expectedWindowsConfig *internalapi.WindowsPodSandboxConfig expectedError error }{ { @@ -202,8 +202,8 @@ func TestGeneratePodSandboxWindowsConfig(t *testing.T) { Name: containerName, }}, }, - expectedWindowsConfig: &runtimeapi.WindowsPodSandboxConfig{ - SecurityContext: &runtimeapi.WindowsSandboxSecurityContext{}, + expectedWindowsConfig: &internalapi.WindowsPodSandboxConfig{ + SecurityContext: &internalapi.WindowsSandboxSecurityContext{}, }, expectedError: nil, }, @@ -220,8 +220,8 @@ func TestGeneratePodSandboxWindowsConfig(t *testing.T) { Name: containerName, }}, }, - expectedWindowsConfig: &runtimeapi.WindowsPodSandboxConfig{ - SecurityContext: &runtimeapi.WindowsSandboxSecurityContext{ + expectedWindowsConfig: &internalapi.WindowsPodSandboxConfig{ + SecurityContext: &internalapi.WindowsSandboxSecurityContext{ CredentialSpec: "gmsa-creds", }, }, @@ -240,8 +240,8 @@ func TestGeneratePodSandboxWindowsConfig(t *testing.T) { Name: containerName, }}, }, - expectedWindowsConfig: &runtimeapi.WindowsPodSandboxConfig{ - SecurityContext: &runtimeapi.WindowsSandboxSecurityContext{ + expectedWindowsConfig: &internalapi.WindowsPodSandboxConfig{ + SecurityContext: &internalapi.WindowsSandboxSecurityContext{ RunAsUsername: "SYSTEM", }, }, @@ -316,8 +316,8 @@ func TestGeneratePodSandboxWindowsConfig(t *testing.T) { Name: containerName, }}, }, - expectedWindowsConfig: &runtimeapi.WindowsPodSandboxConfig{ - SecurityContext: &runtimeapi.WindowsSandboxSecurityContext{ + expectedWindowsConfig: &internalapi.WindowsPodSandboxConfig{ + SecurityContext: &internalapi.WindowsSandboxSecurityContext{ HostProcess: true, }, }, diff --git a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_unsupported.go b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_unsupported.go index 005f15f976a5..b2d32750b755 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_unsupported.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_unsupported.go @@ -21,9 +21,9 @@ package kuberuntime import ( v1 "k8s.io/api/core/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" ) -func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error { +func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *internalapi.PodSandboxConfig) error { return nil } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_windows.go b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_windows.go index 4a9a0af95943..367e453e0637 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_windows.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_windows.go @@ -21,9 +21,9 @@ package kuberuntime import ( v1 "k8s.io/api/core/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" ) -func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *runtimeapi.PodSandboxConfig) error { +func (m *kubeGenericRuntimeManager) applySandboxResources(pod *v1.Pod, config *internalapi.PodSandboxConfig) error { return nil } diff --git a/pkg/kubelet/kuberuntime/logs/logs.go b/pkg/kubelet/kuberuntime/logs/logs.go index 428cc2eb65bb..b0184bf23f8b 100644 --- a/pkg/kubelet/kuberuntime/logs/logs.go +++ b/pkg/kubelet/kuberuntime/logs/logs.go @@ -33,8 +33,7 @@ import ( "k8s.io/klog/v2" v1 "k8s.io/api/core/v1" - internalapi "k8s.io/cri-api/pkg/apis" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/util/tail" ) @@ -63,13 +62,13 @@ var ( // delimiter is the delimiter for timestamp and stream type in log line. delimiter = []byte{' '} // tagDelimiter is the delimiter for log tags. - tagDelimiter = []byte(runtimeapi.LogTagDelimiter) + tagDelimiter = []byte(internalapi.LogTagDelimiter) ) // logMessage is the CRI internal log type. type logMessage struct { timestamp time.Time - stream runtimeapi.LogStreamType + stream internalapi.LogStreamType log []byte } @@ -142,8 +141,8 @@ func parseCRILog(log []byte, msg *logMessage) error { if idx < 0 { return fmt.Errorf("stream type is not found") } - msg.stream = runtimeapi.LogStreamType(log[:idx]) - if msg.stream != runtimeapi.Stdout && msg.stream != runtimeapi.Stderr { + msg.stream = internalapi.LogStreamType(log[:idx]) + if msg.stream != internalapi.Stdout && msg.stream != internalapi.Stderr { return fmt.Errorf("unexpected stream type %q", msg.stream) } @@ -155,7 +154,7 @@ func parseCRILog(log []byte, msg *logMessage) error { } // Keep this forward compatible. tags := bytes.Split(log[:idx], tagDelimiter) - partial := runtimeapi.LogTag(tags[0]) == runtimeapi.LogTagPartial + partial := internalapi.LogTag(tags[0]) == internalapi.LogTagPartial // Trim the tailing new line if this is a partial line. if partial && len(log) > 0 && log[len(log)-1] == '\n' { log = log[:len(log)-1] @@ -191,7 +190,7 @@ func parseDockerJSONLog(log []byte, msg *logMessage) error { return fmt.Errorf("failed with %v to unmarshal log %q", err, l) } msg.timestamp = l.Created - msg.stream = runtimeapi.LogStreamType(l.Stream) + msg.stream = internalapi.LogStreamType(l.Stream) msg.log = []byte(l.Log) return nil } @@ -251,9 +250,9 @@ func (w *logWriter) write(msg *logMessage) error { // Get the proper stream to write to. var stream io.Writer switch msg.stream { - case runtimeapi.Stdout: + case internalapi.Stdout: stream = w.stdout - case runtimeapi.Stderr: + case internalapi.Stderr: stream = w.stderr default: return fmt.Errorf("unexpected stream type %q", msg.stream) @@ -416,7 +415,7 @@ func isContainerRunning(id string, r internalapi.RuntimeService) (bool, error) { return false, err } // Only keep following container log when it is running. - if s.State != runtimeapi.ContainerState_CONTAINER_RUNNING { + if s.State != internalapi.ContainerState_CONTAINER_RUNNING { klog.V(5).InfoS("Container is not running", "containerId", id, "state", s.State) // Do not return error because it's normal that the container stops // during waiting. diff --git a/pkg/kubelet/kuberuntime/logs/logs_test.go b/pkg/kubelet/kuberuntime/logs/logs_test.go index 37c23947d4e9..7d0a380979c1 100644 --- a/pkg/kubelet/kuberuntime/logs/logs_test.go +++ b/pkg/kubelet/kuberuntime/logs/logs_test.go @@ -20,17 +20,17 @@ import ( "bytes" "context" "io/ioutil" - apitesting "k8s.io/cri-api/pkg/apis/testing" - "k8s.io/utils/pointer" "os" "testing" "time" "github.com/stretchr/testify/assert" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" + apitesting "k8s.io/kubernetes/pkg/kubelet/apis/cri/testing" + "k8s.io/utils/pointer" ) func TestLogOptions(t *testing.T) { @@ -177,15 +177,15 @@ func TestReadLogs(t *testing.T) { fakeRuntimeService := &apitesting.FakeRuntimeService{ Containers: map[string]*apitesting.FakeContainer{ containerID: { - ContainerStatus: runtimeapi.ContainerStatus{ - State: runtimeapi.ContainerState_CONTAINER_RUNNING, + ContainerStatus: internalapi.ContainerStatus{ + State: internalapi.ContainerState_CONTAINER_RUNNING, }, }, }, } // If follow is specified, mark the container as exited or else ReadLogs will run indefinitely if tc.podLogOptions.Follow { - fakeRuntimeService.Containers[containerID].State = runtimeapi.ContainerState_CONTAINER_EXITED + fakeRuntimeService.Containers[containerID].State = internalapi.ContainerState_CONTAINER_EXITED } opts := NewLogOptions(&tc.podLogOptions, time.Now()) @@ -219,7 +219,7 @@ func TestParseLog(t *testing.T) { line: `{"log":"docker stdout test log","stream":"stdout","time":"2016-10-20T18:39:20.57606443Z"}` + "\n", msg: &logMessage{ timestamp: timestamp, - stream: runtimeapi.Stdout, + stream: internalapi.Stdout, log: []byte("docker stdout test log"), }, }, @@ -227,7 +227,7 @@ func TestParseLog(t *testing.T) { line: `{"log":"docker stderr test log","stream":"stderr","time":"2016-10-20T18:39:20.57606443Z"}` + "\n", msg: &logMessage{ timestamp: timestamp, - stream: runtimeapi.Stderr, + stream: internalapi.Stderr, log: []byte("docker stderr test log"), }, }, @@ -235,7 +235,7 @@ func TestParseLog(t *testing.T) { line: "2016-10-20T18:39:20.57606443Z stdout F cri stdout test log\n", msg: &logMessage{ timestamp: timestamp, - stream: runtimeapi.Stdout, + stream: internalapi.Stdout, log: []byte("cri stdout test log\n"), }, }, @@ -243,7 +243,7 @@ func TestParseLog(t *testing.T) { line: "2016-10-20T18:39:20.57606443Z stderr F cri stderr test log\n", msg: &logMessage{ timestamp: timestamp, - stream: runtimeapi.Stderr, + stream: internalapi.Stderr, log: []byte("cri stderr test log\n"), }, }, @@ -256,7 +256,7 @@ func TestParseLog(t *testing.T) { line: "2016-10-20T18:39:20.57606443Z stdout P cri stdout partial test log\n", msg: &logMessage{ timestamp: timestamp, - stream: runtimeapi.Stdout, + stream: internalapi.Stdout, log: []byte("cri stdout partial test log"), }, }, @@ -264,7 +264,7 @@ func TestParseLog(t *testing.T) { line: "2016-10-20T18:39:20.57606443Z stdout P:TAG1:TAG2 cri stdout partial test log\n", msg: &logMessage{ timestamp: timestamp, - stream: runtimeapi.Stdout, + stream: internalapi.Stdout, log: []byte("cri stdout partial test log"), }, }, @@ -287,26 +287,26 @@ func TestWriteLogs(t *testing.T) { log := "abcdefg\n" for c, test := range []struct { - stream runtimeapi.LogStreamType + stream internalapi.LogStreamType since time.Time timestamp bool expectStdout string expectStderr string }{ { // stderr log - stream: runtimeapi.Stderr, + stream: internalapi.Stderr, expectStderr: log, }, { // stdout log - stream: runtimeapi.Stdout, + stream: internalapi.Stdout, expectStdout: log, }, { // since is after timestamp - stream: runtimeapi.Stdout, + stream: internalapi.Stdout, since: timestamp.Add(1 * time.Second), }, { // timestamp enabled - stream: runtimeapi.Stderr, + stream: internalapi.Stderr, timestamp: true, expectStderr: timestamp.Format(timeFormatOut) + " " + log, }, @@ -383,13 +383,13 @@ func TestWriteLogsWithBytesLimit(t *testing.T) { stderrBuf := bytes.NewBuffer(nil) w := newLogWriter(stdoutBuf, stderrBuf, &LogOptions{timestamp: test.timestamp, bytes: int64(test.bytes)}) for i := 0; i < test.stdoutLines; i++ { - msg.stream = runtimeapi.Stdout + msg.stream = internalapi.Stdout if err := w.write(msg); err != nil { assert.EqualError(t, err, errMaximumWrite.Error()) } } for i := 0; i < test.stderrLines; i++ { - msg.stream = runtimeapi.Stderr + msg.stream = internalapi.Stderr if err := w.write(msg); err != nil { assert.EqualError(t, err, errMaximumWrite.Error()) } diff --git a/pkg/kubelet/kuberuntime/security_context.go b/pkg/kubelet/kuberuntime/security_context.go index 7a43ebf157da..484d4a1db698 100644 --- a/pkg/kubelet/kuberuntime/security_context.go +++ b/pkg/kubelet/kuberuntime/security_context.go @@ -18,17 +18,17 @@ package kuberuntime import ( v1 "k8s.io/api/core/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/security/apparmor" "k8s.io/kubernetes/pkg/securitycontext" ) // determineEffectiveSecurityContext gets container's security context from v1.Pod and v1.Container. -func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Pod, container *v1.Container, uid *int64, username string) *runtimeapi.LinuxContainerSecurityContext { +func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Pod, container *v1.Container, uid *int64, username string) *internalapi.LinuxContainerSecurityContext { effectiveSc := securitycontext.DetermineEffectiveSecurityContext(pod, container) synthesized := convertToRuntimeSecurityContext(effectiveSc) if synthesized == nil { - synthesized = &runtimeapi.LinuxContainerSecurityContext{ + synthesized = &internalapi.LinuxContainerSecurityContext{ MaskedPaths: securitycontext.ConvertToRuntimeMaskedPaths(effectiveSc.ProcMount), ReadonlyPaths: securitycontext.ConvertToRuntimeReadonlyPaths(effectiveSc.ProcMount), } @@ -46,7 +46,7 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po // set RunAsUser. if synthesized.RunAsUser == nil { if uid != nil { - synthesized.RunAsUser = &runtimeapi.Int64Value{Value: *uid} + synthesized.RunAsUser = &internalapi.Int64Value{Value: *uid} } synthesized.RunAsUsername = username } @@ -77,21 +77,21 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po return synthesized } -// convertToRuntimeSecurityContext converts v1.SecurityContext to runtimeapi.SecurityContext. -func convertToRuntimeSecurityContext(securityContext *v1.SecurityContext) *runtimeapi.LinuxContainerSecurityContext { +// convertToRuntimeSecurityContext converts v1.SecurityContext to internalapi.SecurityContext. +func convertToRuntimeSecurityContext(securityContext *v1.SecurityContext) *internalapi.LinuxContainerSecurityContext { if securityContext == nil { return nil } - sc := &runtimeapi.LinuxContainerSecurityContext{ + sc := &internalapi.LinuxContainerSecurityContext{ Capabilities: convertToRuntimeCapabilities(securityContext.Capabilities), SelinuxOptions: convertToRuntimeSELinuxOption(securityContext.SELinuxOptions), } if securityContext.RunAsUser != nil { - sc.RunAsUser = &runtimeapi.Int64Value{Value: int64(*securityContext.RunAsUser)} + sc.RunAsUser = &internalapi.Int64Value{Value: int64(*securityContext.RunAsUser)} } if securityContext.RunAsGroup != nil { - sc.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*securityContext.RunAsGroup)} + sc.RunAsGroup = &internalapi.Int64Value{Value: int64(*securityContext.RunAsGroup)} } if securityContext.Privileged != nil { sc.Privileged = *securityContext.Privileged @@ -103,13 +103,13 @@ func convertToRuntimeSecurityContext(securityContext *v1.SecurityContext) *runti return sc } -// convertToRuntimeSELinuxOption converts v1.SELinuxOptions to runtimeapi.SELinuxOption. -func convertToRuntimeSELinuxOption(opts *v1.SELinuxOptions) *runtimeapi.SELinuxOption { +// convertToRuntimeSELinuxOption converts v1.SELinuxOptions to internalapi.SELinuxOption. +func convertToRuntimeSELinuxOption(opts *v1.SELinuxOptions) *internalapi.SELinuxOption { if opts == nil { return nil } - return &runtimeapi.SELinuxOption{ + return &internalapi.SELinuxOption{ User: opts.User, Role: opts.Role, Type: opts.Type, @@ -117,13 +117,13 @@ func convertToRuntimeSELinuxOption(opts *v1.SELinuxOptions) *runtimeapi.SELinuxO } } -// convertToRuntimeCapabilities converts v1.Capabilities to runtimeapi.Capability. -func convertToRuntimeCapabilities(opts *v1.Capabilities) *runtimeapi.Capability { +// convertToRuntimeCapabilities converts v1.Capabilities to internalapi.Capability. +func convertToRuntimeCapabilities(opts *v1.Capabilities) *internalapi.Capability { if opts == nil { return nil } - capabilities := &runtimeapi.Capability{ + capabilities := &internalapi.Capability{ AddCapabilities: make([]string, len(opts.Add)), DropCapabilities: make([]string, len(opts.Drop)), } diff --git a/pkg/kubelet/logs/container_log_manager.go b/pkg/kubelet/logs/container_log_manager.go index a59fe054e19e..edb31ef0a05f 100644 --- a/pkg/kubelet/logs/container_log_manager.go +++ b/pkg/kubelet/logs/container_log_manager.go @@ -31,8 +31,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/wait" - internalapi "k8s.io/cri-api/pkg/apis" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/utils/clock" ) @@ -212,7 +211,7 @@ func (c *containerLogManager) rotateLogs() error { c.mutex.Lock() defer c.mutex.Unlock() // TODO(#59998): Use kubelet pod cache. - containers, err := c.runtimeService.ListContainers(&runtimeapi.ContainerFilter{}) + containers, err := c.runtimeService.ListContainers(&internalapi.ContainerFilter{}) if err != nil { return fmt.Errorf("failed to list containers: %v", err) } @@ -220,7 +219,7 @@ func (c *containerLogManager) rotateLogs() error { for _, container := range containers { // Only rotate logs for running containers. Non-running containers won't // generate new output, it doesn't make sense to keep an empty latest log. - if container.GetState() != runtimeapi.ContainerState_CONTAINER_RUNNING { + if container.GetState() != internalapi.ContainerState_CONTAINER_RUNNING { continue } id := container.GetId() diff --git a/pkg/kubelet/logs/container_log_manager_test.go b/pkg/kubelet/logs/container_log_manager_test.go index 580e4d1eec50..6ad8627c15ae 100644 --- a/pkg/kubelet/logs/container_log_manager_test.go +++ b/pkg/kubelet/logs/container_log_manager_test.go @@ -30,8 +30,8 @@ import ( "github.com/stretchr/testify/require" "k8s.io/kubernetes/pkg/kubelet/container" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - critest "k8s.io/cri-api/pkg/apis/testing" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" + critest "k8s.io/kubernetes/pkg/kubelet/apis/cri/testing" testingclock "k8s.io/utils/clock/testing" ) @@ -119,30 +119,30 @@ func TestRotateLogs(t *testing.T) { } testContainers := []*critest.FakeContainer{ { - ContainerStatus: runtimeapi.ContainerStatus{ + ContainerStatus: internalapi.ContainerStatus{ Id: "container-not-need-rotate", - State: runtimeapi.ContainerState_CONTAINER_RUNNING, + State: internalapi.ContainerState_CONTAINER_RUNNING, LogPath: filepath.Join(dir, testLogs[0]), }, }, { - ContainerStatus: runtimeapi.ContainerStatus{ + ContainerStatus: internalapi.ContainerStatus{ Id: "container-need-rotate", - State: runtimeapi.ContainerState_CONTAINER_RUNNING, + State: internalapi.ContainerState_CONTAINER_RUNNING, LogPath: filepath.Join(dir, testLogs[1]), }, }, { - ContainerStatus: runtimeapi.ContainerStatus{ + ContainerStatus: internalapi.ContainerStatus{ Id: "container-has-excess-log", - State: runtimeapi.ContainerState_CONTAINER_RUNNING, + State: internalapi.ContainerState_CONTAINER_RUNNING, LogPath: filepath.Join(dir, testLogs[2]), }, }, { - ContainerStatus: runtimeapi.ContainerStatus{ + ContainerStatus: internalapi.ContainerStatus{ Id: "container-is-not-running", - State: runtimeapi.ContainerState_CONTAINER_EXITED, + State: internalapi.ContainerState_CONTAINER_EXITED, LogPath: filepath.Join(dir, testLogs[3]), }, }, @@ -197,23 +197,23 @@ func TestClean(t *testing.T) { } testContainers := []*critest.FakeContainer{ { - ContainerStatus: runtimeapi.ContainerStatus{ + ContainerStatus: internalapi.ContainerStatus{ Id: "container-1", - State: runtimeapi.ContainerState_CONTAINER_RUNNING, + State: internalapi.ContainerState_CONTAINER_RUNNING, LogPath: filepath.Join(dir, testLogs[0]), }, }, { - ContainerStatus: runtimeapi.ContainerStatus{ + ContainerStatus: internalapi.ContainerStatus{ Id: "container-2", - State: runtimeapi.ContainerState_CONTAINER_RUNNING, + State: internalapi.ContainerState_CONTAINER_RUNNING, LogPath: filepath.Join(dir, testLogs[1]), }, }, { - ContainerStatus: runtimeapi.ContainerStatus{ + ContainerStatus: internalapi.ContainerStatus{ Id: "container-3", - State: runtimeapi.ContainerState_CONTAINER_EXITED, + State: internalapi.ContainerState_CONTAINER_EXITED, LogPath: filepath.Join(dir, testLogs[2]), }, }, diff --git a/pkg/kubelet/network/dns/dns.go b/pkg/kubelet/network/dns/dns.go index 02a2289efcd6..22b4d2401411 100644 --- a/pkg/kubelet/network/dns/dns.go +++ b/pkg/kubelet/network/dns/dns.go @@ -30,9 +30,9 @@ import ( utilvalidation "k8s.io/apimachinery/pkg/util/validation" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/util/format" @@ -159,7 +159,7 @@ func (c *Configurer) formDNSNameserversFitsLimits(nameservers []string, pod *v1. return nameservers } -func (c *Configurer) formDNSConfigFitsLimits(dnsConfig *runtimeapi.DNSConfig, pod *v1.Pod) *runtimeapi.DNSConfig { +func (c *Configurer) formDNSConfigFitsLimits(dnsConfig *internalapi.DNSConfig, pod *v1.Pod) *internalapi.DNSConfig { dnsConfig.Servers = c.formDNSNameserversFitsLimits(dnsConfig.Servers, pod) dnsConfig.Searches = c.formDNSSearchFitsLimits(dnsConfig.Searches, pod) return dnsConfig @@ -278,7 +278,7 @@ func parseResolvConf(reader io.Reader) (nameservers []string, searches []string, return nameservers, searches, options, utilerrors.NewAggregate(allErrors) } -func (c *Configurer) getHostDNSConfig() (*runtimeapi.DNSConfig, error) { +func (c *Configurer) getHostDNSConfig() (*internalapi.DNSConfig, error) { var hostDNS, hostSearch, hostOptions []string // Get host DNS settings if c.ResolverConfig != "" { @@ -293,7 +293,7 @@ func (c *Configurer) getHostDNSConfig() (*runtimeapi.DNSConfig, error) { return nil, err } } - return &runtimeapi.DNSConfig{ + return &internalapi.DNSConfig{ Servers: hostDNS, Searches: hostSearch, Options: hostOptions, @@ -354,7 +354,7 @@ func mergeDNSOptions(existingDNSConfigOptions []string, dnsConfigOptions []v1.Po // appendDNSConfig appends DNS servers, search paths and options given by // PodDNSConfig to the existing DNS config. Duplicated entries will be merged. // This assumes existingDNSConfig and dnsConfig are not nil. -func appendDNSConfig(existingDNSConfig *runtimeapi.DNSConfig, dnsConfig *v1.PodDNSConfig) *runtimeapi.DNSConfig { +func appendDNSConfig(existingDNSConfig *internalapi.DNSConfig, dnsConfig *v1.PodDNSConfig) *internalapi.DNSConfig { existingDNSConfig.Servers = omitDuplicates(append(existingDNSConfig.Servers, dnsConfig.Nameservers...)) existingDNSConfig.Searches = omitDuplicates(append(existingDNSConfig.Searches, dnsConfig.Searches...)) existingDNSConfig.Options = mergeDNSOptions(existingDNSConfig.Options, dnsConfig.Options) @@ -362,7 +362,7 @@ func appendDNSConfig(existingDNSConfig *runtimeapi.DNSConfig, dnsConfig *v1.PodD } // GetPodDNS returns DNS settings for the pod. -func (c *Configurer) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) { +func (c *Configurer) GetPodDNS(pod *v1.Pod) (*internalapi.DNSConfig, error) { dnsConfig, err := c.getHostDNSConfig() if err != nil { return nil, err @@ -376,7 +376,7 @@ func (c *Configurer) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) { switch dnsType { case podDNSNone: // DNSNone should use empty DNS settings as the base. - dnsConfig = &runtimeapi.DNSConfig{} + dnsConfig = &internalapi.DNSConfig{} case podDNSCluster: if len(c.clusterDNS) != 0 { // For a pod with DNSClusterFirst policy, the cluster DNS server is diff --git a/pkg/kubelet/network/dns/dns_test.go b/pkg/kubelet/network/dns/dns_test.go index 8d781b8deb7a..a77982cbfd71 100644 --- a/pkg/kubelet/network/dns/dns_test.go +++ b/pkg/kubelet/network/dns/dns_test.go @@ -32,9 +32,9 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" featuregatetesting "k8s.io/component-base/featuregate/testing" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/features" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" netutils "k8s.io/utils/net" "github.com/stretchr/testify/assert" @@ -614,12 +614,12 @@ func TestGetPodDNSCustom(t *testing.T) { hostnetwork bool dnsPolicy v1.DNSPolicy dnsConfig *v1.PodDNSConfig - expectedDNSConfig *runtimeapi.DNSConfig + expectedDNSConfig *internalapi.DNSConfig }{ { desc: "DNSNone without DNSConfig should have empty DNS settings", dnsPolicy: v1.DNSNone, - expectedDNSConfig: &runtimeapi.DNSConfig{}, + expectedDNSConfig: &internalapi.DNSConfig{}, }, { desc: "DNSNone with DNSConfig should have a merged DNS settings", @@ -632,7 +632,7 @@ func TestGetPodDNSCustom(t *testing.T) { {Name: "debug"}, }, }, - expectedDNSConfig: &runtimeapi.DNSConfig{ + expectedDNSConfig: &internalapi.DNSConfig{ Servers: []string{"203.0.113.1"}, Searches: []string{"my.domain", "second.domain"}, Options: []string{"ndots:3", "debug"}, @@ -649,7 +649,7 @@ func TestGetPodDNSCustom(t *testing.T) { {Name: "debug"}, }, }, - expectedDNSConfig: &runtimeapi.DNSConfig{ + expectedDNSConfig: &internalapi.DNSConfig{ Servers: []string{testClusterNameserver, "10.0.0.11"}, Searches: []string{testNsSvcDomain, testSvcDomain, testClusterDNSDomain, testHostDomain, "my.domain"}, Options: []string{"ndots:3", "debug"}, @@ -667,7 +667,7 @@ func TestGetPodDNSCustom(t *testing.T) { {Name: "debug"}, }, }, - expectedDNSConfig: &runtimeapi.DNSConfig{ + expectedDNSConfig: &internalapi.DNSConfig{ Servers: []string{testClusterNameserver, "10.0.0.11"}, Searches: []string{testNsSvcDomain, testSvcDomain, testClusterDNSDomain, testHostDomain, "my.domain"}, Options: []string{"ndots:3", "debug"}, @@ -684,7 +684,7 @@ func TestGetPodDNSCustom(t *testing.T) { {Name: "debug"}, }, }, - expectedDNSConfig: &runtimeapi.DNSConfig{ + expectedDNSConfig: &internalapi.DNSConfig{ Servers: []string{testHostNameserver, "10.0.0.11"}, Searches: []string{testHostDomain, "my.domain"}, Options: []string{"ndots:3", "debug"}, @@ -709,7 +709,7 @@ func TestGetPodDNSCustom(t *testing.T) { } } -func dnsConfigsAreEqual(resConfig, expectedConfig *runtimeapi.DNSConfig) bool { +func dnsConfigsAreEqual(resConfig, expectedConfig *internalapi.DNSConfig) bool { if len(resConfig.Servers) != len(expectedConfig.Servers) || len(resConfig.Searches) != len(expectedConfig.Searches) || len(resConfig.Options) != len(expectedConfig.Options) { diff --git a/pkg/kubelet/pleg/generic.go b/pkg/kubelet/pleg/generic.go index edbc770ea364..b03bec02c6a5 100644 --- a/pkg/kubelet/pleg/generic.go +++ b/pkg/kubelet/pleg/generic.go @@ -24,8 +24,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog/v2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/utils/clock" @@ -382,7 +382,7 @@ func (g *GenericPLEG) getPodIPs(pid types.UID, status *kubecontainer.PodStatus) for _, sandboxStatus := range status.SandboxStatuses { // If at least one sandbox is ready, then use this status update's pod IP - if sandboxStatus.State == runtimeapi.PodSandboxState_SANDBOX_READY { + if sandboxStatus.State == internalapi.PodSandboxState_SANDBOX_READY { return status.IPs } } diff --git a/pkg/kubelet/pod_workers.go b/pkg/kubelet/pod_workers.go index 48a9542aeac7..4ac01e007f46 100644 --- a/pkg/kubelet/pod_workers.go +++ b/pkg/kubelet/pod_workers.go @@ -28,8 +28,8 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog/v2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/eviction" @@ -510,7 +510,7 @@ func isPodStatusCacheTerminal(status *kubecontainer.PodStatus) bool { } } for _, sb := range status.SandboxStatuses { - if sb.State == runtimeapi.PodSandboxState_SANDBOX_READY { + if sb.State == internalapi.PodSandboxState_SANDBOX_READY { runningSandboxes++ } } diff --git a/pkg/kubelet/server/server_test.go b/pkg/kubelet/server/server_test.go index 8a53f39584b1..ca43bc62cfc4 100644 --- a/pkg/kubelet/server/server_test.go +++ b/pkg/kubelet/server/server_test.go @@ -47,9 +47,9 @@ import ( "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/client-go/tools/record" "k8s.io/client-go/tools/remotecommand" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1" api "k8s.io/kubernetes/pkg/apis/core" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/utils/pointer" // Do some initialization to decode the query parameters correctly. @@ -204,7 +204,7 @@ func (fk *fakeKubelet) GetExec(podFullName string, podUID types.UID, containerNa fk.getExecCheck(podFullName, podUID, containerName, cmd, streamOpts) } // Always use testContainerID - resp, err := fk.streamingRuntime.GetExec(&runtimeapi.ExecRequest{ + resp, err := fk.streamingRuntime.GetExec(&internalapi.ExecRequest{ ContainerId: testContainerID, Cmd: cmd, Tty: streamOpts.TTY, @@ -223,7 +223,7 @@ func (fk *fakeKubelet) GetAttach(podFullName string, podUID types.UID, container fk.getAttachCheck(podFullName, podUID, containerName, streamOpts) } // Always use testContainerID - resp, err := fk.streamingRuntime.GetAttach(&runtimeapi.AttachRequest{ + resp, err := fk.streamingRuntime.GetAttach(&internalapi.AttachRequest{ ContainerId: testContainerID, Tty: streamOpts.TTY, Stdin: streamOpts.Stdin, @@ -241,7 +241,7 @@ func (fk *fakeKubelet) GetPortForward(podName, podNamespace string, podUID types fk.getPortForwardCheck(podName, podNamespace, podUID, portForwardOpts) } // Always use testPodSandboxID - resp, err := fk.streamingRuntime.GetPortForward(&runtimeapi.PortForwardRequest{ + resp, err := fk.streamingRuntime.GetPortForward(&internalapi.PortForwardRequest{ PodSandboxId: testPodSandboxID, Port: portForwardOpts.Ports, }) diff --git a/pkg/kubelet/stats/cri_stats_provider.go b/pkg/kubelet/stats/cri_stats_provider.go index e68aa7c42ac3..241496cf5b84 100644 --- a/pkg/kubelet/stats/cri_stats_provider.go +++ b/pkg/kubelet/stats/cri_stats_provider.go @@ -29,10 +29,9 @@ import ( cadvisorapiv2 "github.com/google/cadvisor/info/v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - internalapi "k8s.io/cri-api/pkg/apis" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog/v2" statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/server/stats" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" @@ -45,7 +44,7 @@ var ( // cpuUsageRecord holds the cpu usage stats and the calculated usageNanoCores. type cpuUsageRecord struct { - stats *runtimeapi.CpuUsage + stats *internalapi.CpuUsage usageNanoCores *uint64 } @@ -122,14 +121,14 @@ func (p *criStatsProvider) listPodStats(updateCPUNanoCoreUsage bool) ([]statsapi return nil, fmt.Errorf("failed to get rootFs info: %v", err) } - containers, err := p.runtimeService.ListContainers(&runtimeapi.ContainerFilter{}) + containers, err := p.runtimeService.ListContainers(&internalapi.ContainerFilter{}) if err != nil { return nil, fmt.Errorf("failed to list all containers: %v", err) } // Creates pod sandbox map. - podSandboxMap := make(map[string]*runtimeapi.PodSandbox) - podSandboxes, err := p.runtimeService.ListPodSandbox(&runtimeapi.PodSandboxFilter{}) + podSandboxMap := make(map[string]*internalapi.PodSandbox) + podSandboxes, err := p.runtimeService.ListPodSandbox(&internalapi.PodSandboxFilter{}) if err != nil { return nil, fmt.Errorf("failed to list all pod sandboxes: %v", err) } @@ -140,19 +139,19 @@ func (p *criStatsProvider) listPodStats(updateCPUNanoCoreUsage bool) ([]statsapi // fsIDtoInfo is a map from filesystem id to its stats. This will be used // as a cache to avoid querying cAdvisor for the filesystem stats with the // same filesystem id many times. - fsIDtoInfo := make(map[runtimeapi.FilesystemIdentifier]*cadvisorapiv2.FsInfo) + fsIDtoInfo := make(map[internalapi.FilesystemIdentifier]*cadvisorapiv2.FsInfo) // sandboxIDToPodStats is a temporary map from sandbox ID to its pod stats. sandboxIDToPodStats := make(map[string]*statsapi.PodStats) - resp, err := p.runtimeService.ListContainerStats(&runtimeapi.ContainerStatsFilter{}) + resp, err := p.runtimeService.ListContainerStats(&internalapi.ContainerStatsFilter{}) if err != nil { return nil, fmt.Errorf("failed to list all container stats: %v", err) } containers = removeTerminatedContainers(containers) // Creates container map. - containerMap := make(map[string]*runtimeapi.Container) + containerMap := make(map[string]*internalapi.Container) for _, c := range containers { containerMap[c.Id] = c } @@ -220,14 +219,14 @@ func (p *criStatsProvider) listPodStats(updateCPUNanoCoreUsage bool) ([]statsapi // ListPodCPUAndMemoryStats returns the CPU and Memory stats of all the pod-managed containers. func (p *criStatsProvider) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) { - containers, err := p.runtimeService.ListContainers(&runtimeapi.ContainerFilter{}) + containers, err := p.runtimeService.ListContainers(&internalapi.ContainerFilter{}) if err != nil { return nil, fmt.Errorf("failed to list all containers: %v", err) } // Creates pod sandbox map. - podSandboxMap := make(map[string]*runtimeapi.PodSandbox) - podSandboxes, err := p.runtimeService.ListPodSandbox(&runtimeapi.PodSandboxFilter{}) + podSandboxMap := make(map[string]*internalapi.PodSandbox) + podSandboxes, err := p.runtimeService.ListPodSandbox(&internalapi.PodSandboxFilter{}) if err != nil { return nil, fmt.Errorf("failed to list all pod sandboxes: %v", err) } @@ -239,14 +238,14 @@ func (p *criStatsProvider) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, erro // sandboxIDToPodStats is a temporary map from sandbox ID to its pod stats. sandboxIDToPodStats := make(map[string]*statsapi.PodStats) - resp, err := p.runtimeService.ListContainerStats(&runtimeapi.ContainerStatsFilter{}) + resp, err := p.runtimeService.ListContainerStats(&internalapi.ContainerStatsFilter{}) if err != nil { return nil, fmt.Errorf("failed to list all container stats: %v", err) } containers = removeTerminatedContainers(containers) // Creates container map. - containerMap := make(map[string]*runtimeapi.Container) + containerMap := make(map[string]*internalapi.Container) for _, c := range containers { containerMap[c.Id] = c } @@ -357,7 +356,7 @@ func (p *criStatsProvider) ImageFsDevice() (string, error) { // getFsInfo returns the information of the filesystem with the specified // fsID. If any error occurs, this function logs the error and returns // nil. -func (p *criStatsProvider) getFsInfo(fsID *runtimeapi.FilesystemIdentifier) *cadvisorapiv2.FsInfo { +func (p *criStatsProvider) getFsInfo(fsID *internalapi.FilesystemIdentifier) *cadvisorapiv2.FsInfo { if fsID == nil { klog.V(2).InfoS("Failed to get filesystem info: fsID is nil") return nil @@ -377,7 +376,7 @@ func (p *criStatsProvider) getFsInfo(fsID *runtimeapi.FilesystemIdentifier) *cad } // buildPodStats returns a PodStats that identifies the Pod managing cinfo -func buildPodStats(podSandbox *runtimeapi.PodSandbox) *statsapi.PodStats { +func buildPodStats(podSandbox *internalapi.PodSandbox) *statsapi.PodStats { return &statsapi.PodStats{ PodRef: statsapi.PodReference{ Name: podSandbox.Metadata.Name, @@ -506,11 +505,11 @@ func (p *criStatsProvider) addProcessStats( } func (p *criStatsProvider) makeContainerStats( - stats *runtimeapi.ContainerStats, - container *runtimeapi.Container, + stats *internalapi.ContainerStats, + container *internalapi.Container, rootFsInfo *cadvisorapiv2.FsInfo, - fsIDtoInfo map[runtimeapi.FilesystemIdentifier]*cadvisorapiv2.FsInfo, - meta *runtimeapi.PodSandboxMetadata, + fsIDtoInfo map[internalapi.FilesystemIdentifier]*cadvisorapiv2.FsInfo, + meta *internalapi.PodSandboxMetadata, updateCPUNanoCoreUsage bool, ) *statsapi.ContainerStats { result := &statsapi.ContainerStats{ @@ -589,8 +588,8 @@ func (p *criStatsProvider) makeContainerStats( } func (p *criStatsProvider) makeContainerCPUAndMemoryStats( - stats *runtimeapi.ContainerStats, - container *runtimeapi.Container, + stats *internalapi.ContainerStats, + container *internalapi.Container, ) *statsapi.ContainerStats { result := &statsapi.ContainerStats{ Name: stats.Attributes.Metadata.Name, @@ -629,7 +628,7 @@ func (p *criStatsProvider) makeContainerCPUAndMemoryStats( } // getContainerUsageNanoCores gets the cached usageNanoCores. -func (p *criStatsProvider) getContainerUsageNanoCores(stats *runtimeapi.ContainerStats) *uint64 { +func (p *criStatsProvider) getContainerUsageNanoCores(stats *internalapi.ContainerStats) *uint64 { if stats == nil || stats.Attributes == nil { return nil } @@ -649,7 +648,7 @@ func (p *criStatsProvider) getContainerUsageNanoCores(stats *runtimeapi.Containe // getContainerUsageNanoCores computes usageNanoCores based on the given and // the cached usageCoreNanoSeconds, updates the cache with the computed // usageNanoCores, and returns the usageNanoCores. -func (p *criStatsProvider) getAndUpdateContainerUsageNanoCores(stats *runtimeapi.ContainerStats) *uint64 { +func (p *criStatsProvider) getAndUpdateContainerUsageNanoCores(stats *internalapi.ContainerStats) *uint64 { if stats == nil || stats.Attributes == nil || stats.Cpu == nil || stats.Cpu.UsageCoreNanoSeconds == nil { return nil } @@ -710,8 +709,8 @@ func (p *criStatsProvider) cleanupOutdatedCaches() { // This is needed because: // 1) PodSandbox may be recreated; // 2) Pod may be recreated with the same name and namespace. -func removeTerminatedPods(pods []*runtimeapi.PodSandbox) []*runtimeapi.PodSandbox { - podMap := make(map[statsapi.PodReference][]*runtimeapi.PodSandbox) +func removeTerminatedPods(pods []*internalapi.PodSandbox) []*internalapi.PodSandbox { + podMap := make(map[statsapi.PodReference][]*internalapi.PodSandbox) // Sort order by create time sort.Slice(pods, func(i, j int) bool { return pods[i].CreatedAt < pods[j].CreatedAt @@ -725,7 +724,7 @@ func removeTerminatedPods(pods []*runtimeapi.PodSandbox) []*runtimeapi.PodSandbo podMap[refID] = append(podMap[refID], pod) } - result := make([]*runtimeapi.PodSandbox, 0) + result := make([]*internalapi.PodSandbox, 0) for _, refs := range podMap { if len(refs) == 1 { result = append(result, refs[0]) @@ -733,7 +732,7 @@ func removeTerminatedPods(pods []*runtimeapi.PodSandbox) []*runtimeapi.PodSandbo } found := false for i := 0; i < len(refs); i++ { - if refs[i].State == runtimeapi.PodSandboxState_SANDBOX_READY { + if refs[i].State == internalapi.PodSandboxState_SANDBOX_READY { found = true result = append(result, refs[i]) } @@ -747,8 +746,8 @@ func removeTerminatedPods(pods []*runtimeapi.PodSandbox) []*runtimeapi.PodSandbo // removeTerminatedContainers removes all terminated containers since they should // not be used for usage calculations. -func removeTerminatedContainers(containers []*runtimeapi.Container) []*runtimeapi.Container { - containerMap := make(map[containerID][]*runtimeapi.Container) +func removeTerminatedContainers(containers []*internalapi.Container) []*internalapi.Container { + containerMap := make(map[containerID][]*internalapi.Container) // Sort order by create time sort.Slice(containers, func(i, j int) bool { return containers[i].CreatedAt < containers[j].CreatedAt @@ -761,10 +760,10 @@ func removeTerminatedContainers(containers []*runtimeapi.Container) []*runtimeap containerMap[refID] = append(containerMap[refID], container) } - result := make([]*runtimeapi.Container, 0) + result := make([]*internalapi.Container, 0) for _, refs := range containerMap { for i := 0; i < len(refs); i++ { - if refs[i].State == runtimeapi.ContainerState_CONTAINER_RUNNING { + if refs[i].State == internalapi.ContainerState_CONTAINER_RUNNING { result = append(result, refs[i]) } } diff --git a/pkg/kubelet/stats/cri_stats_provider_test.go b/pkg/kubelet/stats/cri_stats_provider_test.go index a5857c1a0a9e..24a04df1e30e 100644 --- a/pkg/kubelet/stats/cri_stats_provider_test.go +++ b/pkg/kubelet/stats/cri_stats_provider_test.go @@ -33,9 +33,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" - critest "k8s.io/cri-api/pkg/apis/testing" statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" + critest "k8s.io/kubernetes/pkg/kubelet/apis/cri/testing" cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainertest "k8s.io/kubernetes/pkg/kubelet/container/testing" @@ -185,7 +185,7 @@ func TestCRIListPodStats(t *testing.T) { fakeRuntimeService.SetFakeContainers([]*critest.FakeContainer{ container0, container1, container2, container3, container4, container5, container6, container7, container8, }) - fakeRuntimeService.SetFakeContainerStats([]*runtimeapi.ContainerStats{ + fakeRuntimeService.SetFakeContainerStats([]*internalapi.ContainerStats{ containerStats0, containerStats1, containerStats2, containerStats3, containerStats4, containerStats5, containerStats6, containerStats7, containerStats8, }) @@ -252,7 +252,7 @@ func TestCRIListPodStats(t *testing.T) { assert.Equal(sandbox0.CreatedAt, p0.StartTime.UnixNano()) assert.Equal(2, len(p0.Containers)) - checkEphemeralStorageStats(assert, p0, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats0, containerStats1}, + checkEphemeralStorageStats(assert, p0, ephemeralVolumes, []*internalapi.ContainerStats{containerStats0, containerStats1}, []*volume.Metrics{containerLogStats0, containerLogStats1}, podLogStats0) containerStatsMap := make(map[string]statsapi.ContainerStats) @@ -280,7 +280,7 @@ func TestCRIListPodStats(t *testing.T) { assert.Equal(sandbox1.CreatedAt, p1.StartTime.UnixNano()) assert.Equal(1, len(p1.Containers)) - checkEphemeralStorageStats(assert, p1, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats2}, + checkEphemeralStorageStats(assert, p1, ephemeralVolumes, []*internalapi.ContainerStats{containerStats2}, []*volume.Metrics{containerLogStats2}, podLogStats1) c2 := p1.Containers[0] assert.Equal(cName2, c2.Name) @@ -296,7 +296,7 @@ func TestCRIListPodStats(t *testing.T) { assert.Equal(sandbox2.CreatedAt, p2.StartTime.UnixNano()) assert.Equal(1, len(p2.Containers)) - checkEphemeralStorageStats(assert, p2, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats4}, + checkEphemeralStorageStats(assert, p2, ephemeralVolumes, []*internalapi.ContainerStats{containerStats4}, []*volume.Metrics{containerLogStats4}, nil) c3 := p2.Containers[0] @@ -376,7 +376,7 @@ func TestAcceleratorUsageStatsCanBeDisabled(t *testing.T) { fakeRuntimeService.SetFakeContainers([]*critest.FakeContainer{ container0, container1, }) - fakeRuntimeService.SetFakeContainerStats([]*runtimeapi.ContainerStats{ + fakeRuntimeService.SetFakeContainerStats([]*internalapi.ContainerStats{ containerStats0, containerStats1, }) @@ -521,7 +521,7 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) { fakeRuntimeService.SetFakeContainers([]*critest.FakeContainer{ container0, container1, container2, container3, container4, container5, container6, container7, container8, container9, }) - fakeRuntimeService.SetFakeContainerStats([]*runtimeapi.ContainerStats{ + fakeRuntimeService.SetFakeContainerStats([]*internalapi.ContainerStats{ containerStats0, containerStats1, containerStats2, containerStats3, containerStats4, containerStats5, containerStats6, containerStats7, containerStats8, containerStats9, }) @@ -658,7 +658,7 @@ func TestCRIImagesFsStats(t *testing.T) { fakeImageService = critest.NewFakeImageService() ) mockCadvisor.EXPECT().GetDirFsInfo(imageFsMountpoint).Return(imageFsInfo, nil) - fakeImageService.SetFakeFilesystemUsage([]*runtimeapi.FilesystemUsage{ + fakeImageService.SetFakeFilesystemUsage([]*internalapi.FilesystemUsage{ imageFsUsage, }) @@ -688,18 +688,18 @@ func TestCRIImagesFsStats(t *testing.T) { func makeFakePodSandbox(name, uid, namespace string, terminated bool) *critest.FakePodSandbox { p := &critest.FakePodSandbox{ - PodSandboxStatus: runtimeapi.PodSandboxStatus{ - Metadata: &runtimeapi.PodSandboxMetadata{ + PodSandboxStatus: internalapi.PodSandboxStatus{ + Metadata: &internalapi.PodSandboxMetadata{ Name: name, Uid: uid, Namespace: namespace, }, - State: runtimeapi.PodSandboxState_SANDBOX_READY, + State: internalapi.PodSandboxState_SANDBOX_READY, CreatedAt: time.Now().UnixNano(), }, } if terminated { - p.PodSandboxStatus.State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY + p.PodSandboxStatus.State = internalapi.PodSandboxState_SANDBOX_NOTREADY } p.PodSandboxStatus.Id = strings.ReplaceAll(string(uuid.NewUUID()), "-", "") return p @@ -709,9 +709,9 @@ func makeFakeContainer(sandbox *critest.FakePodSandbox, name string, attempt uin sandboxID := sandbox.PodSandboxStatus.Id c := &critest.FakeContainer{ SandboxID: sandboxID, - ContainerStatus: runtimeapi.ContainerStatus{ - Metadata: &runtimeapi.ContainerMetadata{Name: name, Attempt: attempt}, - Image: &runtimeapi.ImageSpec{}, + ContainerStatus: internalapi.ContainerStatus{ + Metadata: &internalapi.ContainerMetadata{Name: name, Attempt: attempt}, + Image: &internalapi.ImageSpec{}, ImageRef: "fake-image-ref", CreatedAt: time.Now().UnixNano(), }, @@ -723,49 +723,49 @@ func makeFakeContainer(sandbox *critest.FakePodSandbox, name string, attempt uin "io.kubernetes.container.name": name, } if terminated { - c.ContainerStatus.State = runtimeapi.ContainerState_CONTAINER_EXITED + c.ContainerStatus.State = internalapi.ContainerState_CONTAINER_EXITED } else { - c.ContainerStatus.State = runtimeapi.ContainerState_CONTAINER_RUNNING + c.ContainerStatus.State = internalapi.ContainerState_CONTAINER_RUNNING } c.ContainerStatus.Id = strings.ReplaceAll(string(uuid.NewUUID()), "-", "") return c } -func makeFakeContainerStats(container *critest.FakeContainer, imageFsMountpoint string) *runtimeapi.ContainerStats { - containerStats := &runtimeapi.ContainerStats{ - Attributes: &runtimeapi.ContainerAttributes{ +func makeFakeContainerStats(container *critest.FakeContainer, imageFsMountpoint string) *internalapi.ContainerStats { + containerStats := &internalapi.ContainerStats{ + Attributes: &internalapi.ContainerAttributes{ Id: container.ContainerStatus.Id, Metadata: container.ContainerStatus.Metadata, }, - WritableLayer: &runtimeapi.FilesystemUsage{ + WritableLayer: &internalapi.FilesystemUsage{ Timestamp: time.Now().UnixNano(), - FsId: &runtimeapi.FilesystemIdentifier{Mountpoint: imageFsMountpoint}, - UsedBytes: &runtimeapi.UInt64Value{Value: rand.Uint64() / 100}, - InodesUsed: &runtimeapi.UInt64Value{Value: rand.Uint64() / 100}, + FsId: &internalapi.FilesystemIdentifier{Mountpoint: imageFsMountpoint}, + UsedBytes: &internalapi.UInt64Value{Value: rand.Uint64() / 100}, + InodesUsed: &internalapi.UInt64Value{Value: rand.Uint64() / 100}, }, } - if container.State == runtimeapi.ContainerState_CONTAINER_EXITED { + if container.State == internalapi.ContainerState_CONTAINER_EXITED { containerStats.Cpu = nil containerStats.Memory = nil } else { - containerStats.Cpu = &runtimeapi.CpuUsage{ + containerStats.Cpu = &internalapi.CpuUsage{ Timestamp: time.Now().UnixNano(), - UsageCoreNanoSeconds: &runtimeapi.UInt64Value{Value: rand.Uint64()}, + UsageCoreNanoSeconds: &internalapi.UInt64Value{Value: rand.Uint64()}, } - containerStats.Memory = &runtimeapi.MemoryUsage{ + containerStats.Memory = &internalapi.MemoryUsage{ Timestamp: time.Now().UnixNano(), - WorkingSetBytes: &runtimeapi.UInt64Value{Value: rand.Uint64()}, + WorkingSetBytes: &internalapi.UInt64Value{Value: rand.Uint64()}, } } return containerStats } -func makeFakeImageFsUsage(fsMountpoint string) *runtimeapi.FilesystemUsage { - return &runtimeapi.FilesystemUsage{ +func makeFakeImageFsUsage(fsMountpoint string) *internalapi.FilesystemUsage { + return &internalapi.FilesystemUsage{ Timestamp: time.Now().UnixNano(), - FsId: &runtimeapi.FilesystemIdentifier{Mountpoint: fsMountpoint}, - UsedBytes: &runtimeapi.UInt64Value{Value: rand.Uint64()}, - InodesUsed: &runtimeapi.UInt64Value{Value: rand.Uint64()}, + FsId: &internalapi.FilesystemIdentifier{Mountpoint: fsMountpoint}, + UsedBytes: &internalapi.UInt64Value{Value: rand.Uint64()}, + InodesUsed: &internalapi.UInt64Value{Value: rand.Uint64()}, } } @@ -819,7 +819,7 @@ func checkCRIAcceleratorStats(assert *assert.Assertions, actual statsapi.Contain } } -func checkCRIRootfsStats(assert *assert.Assertions, actual statsapi.ContainerStats, cs *runtimeapi.ContainerStats, imageFsInfo *cadvisorapiv2.FsInfo) { +func checkCRIRootfsStats(assert *assert.Assertions, actual statsapi.ContainerStats, cs *internalapi.ContainerStats, imageFsInfo *cadvisorapiv2.FsInfo) { assert.Equal(cs.WritableLayer.Timestamp, actual.Rootfs.Time.UnixNano()) if imageFsInfo != nil { assert.Equal(imageFsInfo.Available, *actual.Rootfs.AvailableBytes) @@ -849,7 +849,7 @@ func checkCRILogsStats(assert *assert.Assertions, actual statsapi.ContainerStats func checkEphemeralStorageStats(assert *assert.Assertions, actual statsapi.PodStats, volumes []statsapi.VolumeStats, - containers []*runtimeapi.ContainerStats, + containers []*internalapi.ContainerStats, containerLogStats []*volume.Metrics, podLogStats *volume.Metrics) { var totalUsed, inodesUsed uint64 @@ -916,7 +916,7 @@ func TestGetContainerUsageNanoCores(t *testing.T) { tests := []struct { desc string cpuUsageCache map[string]*cpuUsageRecord - stats *runtimeapi.ContainerStats + stats *internalapi.ContainerStats expected *uint64 }{ { @@ -926,8 +926,8 @@ func TestGetContainerUsageNanoCores(t *testing.T) { { desc: "should return nil if cpu stats is nil", cpuUsageCache: map[string]*cpuUsageRecord{}, - stats: &runtimeapi.ContainerStats{ - Attributes: &runtimeapi.ContainerAttributes{ + stats: &internalapi.ContainerStats{ + Attributes: &internalapi.ContainerAttributes{ Id: "1", }, Cpu: nil, @@ -936,11 +936,11 @@ func TestGetContainerUsageNanoCores(t *testing.T) { { desc: "should return nil if usageCoreNanoSeconds is nil", cpuUsageCache: map[string]*cpuUsageRecord{}, - stats: &runtimeapi.ContainerStats{ - Attributes: &runtimeapi.ContainerAttributes{ + stats: &internalapi.ContainerStats{ + Attributes: &internalapi.ContainerAttributes{ Id: "1", }, - Cpu: &runtimeapi.CpuUsage{ + Cpu: &internalapi.CpuUsage{ Timestamp: 1, UsageCoreNanoSeconds: nil, }, @@ -949,13 +949,13 @@ func TestGetContainerUsageNanoCores(t *testing.T) { { desc: "should return nil if cpu stats is not cached yet", cpuUsageCache: map[string]*cpuUsageRecord{}, - stats: &runtimeapi.ContainerStats{ - Attributes: &runtimeapi.ContainerAttributes{ + stats: &internalapi.ContainerStats{ + Attributes: &internalapi.ContainerAttributes{ Id: "1", }, - Cpu: &runtimeapi.CpuUsage{ + Cpu: &internalapi.CpuUsage{ Timestamp: 1, - UsageCoreNanoSeconds: &runtimeapi.UInt64Value{ + UsageCoreNanoSeconds: &internalapi.UInt64Value{ Value: 10000000000, }, }, @@ -963,22 +963,22 @@ func TestGetContainerUsageNanoCores(t *testing.T) { }, { desc: "should return zero value if cached cpu stats is equal to current value", - stats: &runtimeapi.ContainerStats{ - Attributes: &runtimeapi.ContainerAttributes{ + stats: &internalapi.ContainerStats{ + Attributes: &internalapi.ContainerAttributes{ Id: "1", }, - Cpu: &runtimeapi.CpuUsage{ + Cpu: &internalapi.CpuUsage{ Timestamp: 1, - UsageCoreNanoSeconds: &runtimeapi.UInt64Value{ + UsageCoreNanoSeconds: &internalapi.UInt64Value{ Value: 10000000000, }, }, }, cpuUsageCache: map[string]*cpuUsageRecord{ "1": { - stats: &runtimeapi.CpuUsage{ + stats: &internalapi.CpuUsage{ Timestamp: 0, - UsageCoreNanoSeconds: &runtimeapi.UInt64Value{ + UsageCoreNanoSeconds: &internalapi.UInt64Value{ Value: 10000000000, }, }, @@ -988,22 +988,22 @@ func TestGetContainerUsageNanoCores(t *testing.T) { }, { desc: "should return correct value if cached cpu stats is not equal to current value", - stats: &runtimeapi.ContainerStats{ - Attributes: &runtimeapi.ContainerAttributes{ + stats: &internalapi.ContainerStats{ + Attributes: &internalapi.ContainerAttributes{ Id: "1", }, - Cpu: &runtimeapi.CpuUsage{ + Cpu: &internalapi.CpuUsage{ Timestamp: int64(time.Second / time.Nanosecond), - UsageCoreNanoSeconds: &runtimeapi.UInt64Value{ + UsageCoreNanoSeconds: &internalapi.UInt64Value{ Value: 20000000000, }, }, }, cpuUsageCache: map[string]*cpuUsageRecord{ "1": { - stats: &runtimeapi.CpuUsage{ + stats: &internalapi.CpuUsage{ Timestamp: 0, - UsageCoreNanoSeconds: &runtimeapi.UInt64Value{ + UsageCoreNanoSeconds: &internalapi.UInt64Value{ Value: 10000000000, }, }, @@ -1013,22 +1013,22 @@ func TestGetContainerUsageNanoCores(t *testing.T) { }, { desc: "should return correct value if elapsed UsageCoreNanoSeconds exceeds 18446744073", - stats: &runtimeapi.ContainerStats{ - Attributes: &runtimeapi.ContainerAttributes{ + stats: &internalapi.ContainerStats{ + Attributes: &internalapi.ContainerAttributes{ Id: "1", }, - Cpu: &runtimeapi.CpuUsage{ + Cpu: &internalapi.CpuUsage{ Timestamp: int64(time.Second / time.Nanosecond), - UsageCoreNanoSeconds: &runtimeapi.UInt64Value{ + UsageCoreNanoSeconds: &internalapi.UInt64Value{ Value: 68172016162105, }, }, }, cpuUsageCache: map[string]*cpuUsageRecord{ "1": { - stats: &runtimeapi.CpuUsage{ + stats: &internalapi.CpuUsage{ Timestamp: 0, - UsageCoreNanoSeconds: &runtimeapi.UInt64Value{ + UsageCoreNanoSeconds: &internalapi.UInt64Value{ Value: 67983588375722, }, }, @@ -1038,22 +1038,22 @@ func TestGetContainerUsageNanoCores(t *testing.T) { }, { desc: "should return nil if cpuacct is reset to 0 in a live container", - stats: &runtimeapi.ContainerStats{ - Attributes: &runtimeapi.ContainerAttributes{ + stats: &internalapi.ContainerStats{ + Attributes: &internalapi.ContainerAttributes{ Id: "1", }, - Cpu: &runtimeapi.CpuUsage{ + Cpu: &internalapi.CpuUsage{ Timestamp: 2, - UsageCoreNanoSeconds: &runtimeapi.UInt64Value{ + UsageCoreNanoSeconds: &internalapi.UInt64Value{ Value: 0, }, }, }, cpuUsageCache: map[string]*cpuUsageRecord{ "1": { - stats: &runtimeapi.CpuUsage{ + stats: &internalapi.CpuUsage{ Timestamp: 1, - UsageCoreNanoSeconds: &runtimeapi.UInt64Value{ + UsageCoreNanoSeconds: &internalapi.UInt64Value{ Value: 10000000000, }, }, diff --git a/pkg/kubelet/stats/provider.go b/pkg/kubelet/stats/provider.go index fd3c5dd82480..19dd88603a8e 100644 --- a/pkg/kubelet/stats/provider.go +++ b/pkg/kubelet/stats/provider.go @@ -22,8 +22,8 @@ import ( cadvisorapiv1 "github.com/google/cadvisor/info/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - internalapi "k8s.io/cri-api/pkg/apis" statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cadvisor" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubepod "k8s.io/kubernetes/pkg/kubelet/pod" diff --git a/pkg/kubemark/hollow_kubelet.go b/pkg/kubemark/hollow_kubelet.go index cd789e7c3e49..80d857a36a36 100644 --- a/pkg/kubemark/hollow_kubelet.go +++ b/pkg/kubemark/hollow_kubelet.go @@ -25,12 +25,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" - internalapi "k8s.io/cri-api/pkg/apis" kubeletapp "k8s.io/kubernetes/cmd/kubelet/app" "k8s.io/kubernetes/cmd/kubelet/app/options" "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubelet" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/cm" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" diff --git a/staging/src/k8s.io/cri-api/go.mod b/staging/src/k8s.io/cri-api/go.mod index 913ba302f708..fde14bf61fc0 100644 --- a/staging/src/k8s.io/cri-api/go.mod +++ b/staging/src/k8s.io/cri-api/go.mod @@ -5,17 +5,11 @@ module k8s.io/cri-api go 1.16 require ( - github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 - github.com/kr/text v0.2.0 // indirect - github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect - github.com/stretchr/testify v1.7.0 golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55 // indirect google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect google.golang.org/grpc v1.38.0 - gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) replace k8s.io/cri-api => ../cri-api diff --git a/staging/src/k8s.io/cri-api/go.sum b/staging/src/k8s.io/cri-api/go.sum index 9387cda667f1..99dab5d28de2 100644 --- a/staging/src/k8s.io/cri-api/go.sum +++ b/staging/src/k8s.io/cri-api/go.sum @@ -3,10 +3,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= @@ -37,19 +34,10 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -140,11 +128,6 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/test/e2e_node/container_manager_test.go b/test/e2e_node/container_manager_test.go index 1f16ca8aed4d..6688a7ff311b 100644 --- a/test/e2e_node/container_manager_test.go +++ b/test/e2e_node/container_manager_test.go @@ -32,7 +32,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" @@ -157,9 +157,9 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { ginkgo.By("Dump all running containers") runtime, _, err := getCRIClient() framework.ExpectNoError(err) - containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{ - State: &runtimeapi.ContainerStateValue{ - State: runtimeapi.ContainerState_CONTAINER_RUNNING, + containers, err := runtime.ListContainers(&internalapi.ContainerFilter{ + State: &internalapi.ContainerStateValue{ + State: internalapi.ContainerState_CONTAINER_RUNNING, }, }) framework.ExpectNoError(err) diff --git a/test/e2e_node/cpu_manager_test.go b/test/e2e_node/cpu_manager_test.go index a23edd8631d2..f075aa753aa3 100644 --- a/test/e2e_node/cpu_manager_test.go +++ b/test/e2e_node/cpu_manager_test.go @@ -28,8 +28,8 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" cpumanagerstate "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" @@ -116,7 +116,7 @@ func waitForContainerRemoval(containerName, podName, podNS string) { rs, _, err := getCRIClient() framework.ExpectNoError(err) gomega.Eventually(func() bool { - containers, err := rs.ListContainers(&runtimeapi.ContainerFilter{ + containers, err := rs.ListContainers(&internalapi.ContainerFilter{ LabelSelector: map[string]string{ types.KubernetesPodNameLabel: podName, types.KubernetesPodNamespaceLabel: podNS, diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index ff371416631b..464bc289e107 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -22,10 +22,9 @@ import ( "strconv" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - internalapi "k8s.io/cri-api/pkg/apis" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/test/e2e/framework" @@ -151,7 +150,7 @@ func containerGCTest(f *framework.Framework, test testRun) { // Initialize the getContainerNames function to use CRI runtime client. pod.getContainerNames = func() ([]string, error) { relevantContainers := []string{} - containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{ + containers, err := runtime.ListContainers(&internalapi.ContainerFilter{ LabelSelector: map[string]string{ types.KubernetesPodNameLabel: pod.podName, types.KubernetesPodNamespaceLabel: f.Namespace.Name, diff --git a/test/e2e_node/image_list.go b/test/e2e_node/image_list.go index 7c6aaa6002d9..90420a7a528b 100644 --- a/test/e2e_node/image_list.go +++ b/test/e2e_node/image_list.go @@ -29,8 +29,7 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" - internalapi "k8s.io/cri-api/pkg/apis" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" commontest "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu" @@ -132,11 +131,11 @@ func (rp *remotePuller) Name() string { } func (rp *remotePuller) Pull(image string) ([]byte, error) { - imageStatus, err := rp.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image}) + imageStatus, err := rp.imageService.ImageStatus(&internalapi.ImageSpec{Image: image}) if err == nil && imageStatus != nil { return nil, nil } - _, err = rp.imageService.PullImage(&runtimeapi.ImageSpec{Image: image}, nil, nil) + _, err = rp.imageService.PullImage(&internalapi.ImageSpec{Image: image}, nil, nil) return nil, err } diff --git a/test/e2e_node/topology_manager_test.go b/test/e2e_node/topology_manager_test.go index 6b43ffd62568..61f476aa2d5e 100644 --- a/test/e2e_node/topology_manager_test.go +++ b/test/e2e_node/topology_manager_test.go @@ -34,8 +34,8 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/types" @@ -387,7 +387,7 @@ func waitForAllContainerRemoval(podName, podNS string) { rs, _, err := getCRIClient() framework.ExpectNoError(err) gomega.Eventually(func() bool { - containers, err := rs.ListContainers(&runtimeapi.ContainerFilter{ + containers, err := rs.ListContainers(&internalapi.ContainerFilter{ LabelSelector: map[string]string{ types.KubernetesPodNameLabel: podName, types.KubernetesPodNamespaceLabel: podNS, diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index a609011af520..a18cb8ca6649 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -36,7 +36,6 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" "k8s.io/component-base/featuregate" - internalapi "k8s.io/cri-api/pkg/apis" "k8s.io/klog/v2" kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" kubeletpodresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1" @@ -44,6 +43,7 @@ import ( stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/features" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/apis/podresources" "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/cri/remote" @@ -379,7 +379,7 @@ func getCRIClient() (internalapi.RuntimeService, internalapi.ImageManagerService // connection timeout for CRI service connection const connectionTimeout = 2 * time.Minute runtimeEndpoint := framework.TestContext.ContainerRuntimeEndpoint - r, err := remote.NewRemoteRuntimeService(runtimeEndpoint, connectionTimeout) + r, err := remote.NewRemoteRuntimeService(runtimeEndpoint, connectionTimeout, "") if err != nil { return nil, nil, err } @@ -389,7 +389,7 @@ func getCRIClient() (internalapi.RuntimeService, internalapi.ImageManagerService //explicitly specified imageManagerEndpoint = framework.TestContext.ImageServiceEndpoint } - i, err := remote.NewRemoteImageService(imageManagerEndpoint, connectionTimeout) + i, err := remote.NewRemoteImageService(imageManagerEndpoint, connectionTimeout, r.APIVersion()) if err != nil { return nil, nil, err } diff --git a/vendor/modules.txt b/vendor/modules.txt index 7cb836a4fb67..8e98b9887e18 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1397,6 +1397,7 @@ k8s.io/apimachinery/pkg/runtime/serializer/yaml k8s.io/apimachinery/pkg/selection k8s.io/apimachinery/pkg/types k8s.io/apimachinery/pkg/util/cache +k8s.io/apimachinery/pkg/util/clock k8s.io/apimachinery/pkg/util/diff k8s.io/apimachinery/pkg/util/duration k8s.io/apimachinery/pkg/util/errors @@ -1999,9 +2000,8 @@ k8s.io/controller-manager/pkg/leadermigration/config k8s.io/controller-manager/pkg/leadermigration/options # k8s.io/cri-api v0.0.0 => ./staging/src/k8s.io/cri-api ## explicit -k8s.io/cri-api/pkg/apis +k8s.io/cri-api/pkg/apis/runtime/v1 k8s.io/cri-api/pkg/apis/runtime/v1alpha2 -k8s.io/cri-api/pkg/apis/testing # k8s.io/csi-translation-lib v0.0.0 => ./staging/src/k8s.io/csi-translation-lib ## explicit k8s.io/csi-translation-lib