Skip to content

Commit

Permalink
shimv2: fix the issue bring by updating containerd vendor
Browse files Browse the repository at this point in the history
Fix the mismatch bring by the upgrading of vendor of  containerd,
cgroup and runtime spec.

Fixes: #1441

Signed-off-by: fupan.lfp <fupan.lfp@antgroup.com>
  • Loading branch information
fupan.lfp committed Jun 30, 2021
1 parent 79e632b commit f607641
Show file tree
Hide file tree
Showing 91 changed files with 14,488 additions and 799 deletions.
6 changes: 3 additions & 3 deletions src/runtime/cli/kata-env.go
Expand Up @@ -265,9 +265,9 @@ func getMemoryInfo() MemoryInfo {
}

return MemoryInfo{
Total: mi.MemTotal,
Free: mi.MemFree,
Available: mi.MemAvailable,
Total: *mi.MemTotal,
Free: *mi.MemFree,
Available: *mi.MemAvailable,
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/runtime/containerd-shim-v2/create.go
Expand Up @@ -22,9 +22,9 @@ import (
otelTrace "go.opentelemetry.io/otel/trace"

// only register the proto type
crioption "github.com/containerd/containerd/pkg/runtimeoptions/v1"
_ "github.com/containerd/containerd/runtime/linux/runctypes"
_ "github.com/containerd/containerd/runtime/v2/runc/options"
crioption "github.com/containerd/cri-containerd/pkg/api/runtimeoptions/v1"

"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils"
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
Expand Down
54 changes: 27 additions & 27 deletions src/runtime/containerd-shim-v2/metrics.go
Expand Up @@ -8,7 +8,7 @@ package containerdshim
import (
"context"

"github.com/containerd/cgroups"
cgroupsv1 "github.com/containerd/cgroups/stats/v1"
"github.com/containerd/typeurl"

google_protobuf "github.com/gogo/protobuf/types"
Expand All @@ -31,11 +31,11 @@ func marshalMetrics(ctx context.Context, s *service, containerID string) (*googl
return data, nil
}

func statsToMetrics(stats *vc.ContainerStats) *cgroups.Metrics {
metrics := &cgroups.Metrics{}
func statsToMetrics(stats *vc.ContainerStats) *cgroupsv1.Metrics {
metrics := &cgroupsv1.Metrics{}

if stats.CgroupStats != nil {
metrics = &cgroups.Metrics{
metrics = &cgroupsv1.Metrics{
Hugetlb: setHugetlbStats(stats.CgroupStats.HugetlbStats),
Pids: setPidsStats(stats.CgroupStats.PidsStats),
CPU: setCPUStats(stats.CgroupStats.CPUStats),
Expand All @@ -49,12 +49,12 @@ func statsToMetrics(stats *vc.ContainerStats) *cgroups.Metrics {
return metrics
}

func setHugetlbStats(vcHugetlb map[string]vc.HugetlbStats) []*cgroups.HugetlbStat {
var hugetlbStats []*cgroups.HugetlbStat
func setHugetlbStats(vcHugetlb map[string]vc.HugetlbStats) []*cgroupsv1.HugetlbStat {
var hugetlbStats []*cgroupsv1.HugetlbStat
for _, v := range vcHugetlb {
hugetlbStats = append(
hugetlbStats,
&cgroups.HugetlbStat{
&cgroupsv1.HugetlbStat{
Usage: v.Usage,
Max: v.MaxUsage,
Failcnt: v.Failcnt,
Expand All @@ -64,28 +64,28 @@ func setHugetlbStats(vcHugetlb map[string]vc.HugetlbStats) []*cgroups.HugetlbSta
return hugetlbStats
}

func setPidsStats(vcPids vc.PidsStats) *cgroups.PidsStat {
pidsStats := &cgroups.PidsStat{
func setPidsStats(vcPids vc.PidsStats) *cgroupsv1.PidsStat {
pidsStats := &cgroupsv1.PidsStat{
Current: vcPids.Current,
Limit: vcPids.Limit,
}

return pidsStats
}

func setCPUStats(vcCPU vc.CPUStats) *cgroups.CPUStat {
func setCPUStats(vcCPU vc.CPUStats) *cgroupsv1.CPUStat {

var perCPU []uint64
perCPU = append(perCPU, vcCPU.CPUUsage.PercpuUsage...)

cpuStats := &cgroups.CPUStat{
Usage: &cgroups.CPUUsage{
cpuStats := &cgroupsv1.CPUStat{
Usage: &cgroupsv1.CPUUsage{
Total: vcCPU.CPUUsage.TotalUsage,
Kernel: vcCPU.CPUUsage.UsageInKernelmode,
User: vcCPU.CPUUsage.UsageInUsermode,
PerCPU: perCPU,
},
Throttling: &cgroups.Throttle{
Throttling: &cgroupsv1.Throttle{
Periods: vcCPU.ThrottlingData.Periods,
ThrottledPeriods: vcCPU.ThrottlingData.ThrottledPeriods,
ThrottledTime: vcCPU.ThrottlingData.ThrottledTime,
Expand All @@ -95,27 +95,27 @@ func setCPUStats(vcCPU vc.CPUStats) *cgroups.CPUStat {
return cpuStats
}

func setMemoryStats(vcMemory vc.MemoryStats) *cgroups.MemoryStat {
memoryStats := &cgroups.MemoryStat{
Usage: &cgroups.MemoryEntry{
func setMemoryStats(vcMemory vc.MemoryStats) *cgroupsv1.MemoryStat {
memoryStats := &cgroupsv1.MemoryStat{
Usage: &cgroupsv1.MemoryEntry{
Limit: vcMemory.Usage.Limit,
Usage: vcMemory.Usage.Usage,
Max: vcMemory.Usage.MaxUsage,
Failcnt: vcMemory.Usage.Failcnt,
},
Swap: &cgroups.MemoryEntry{
Swap: &cgroupsv1.MemoryEntry{
Limit: vcMemory.SwapUsage.Limit,
Usage: vcMemory.SwapUsage.Usage,
Max: vcMemory.SwapUsage.MaxUsage,
Failcnt: vcMemory.SwapUsage.Failcnt,
},
Kernel: &cgroups.MemoryEntry{
Kernel: &cgroupsv1.MemoryEntry{
Limit: vcMemory.KernelUsage.Limit,
Usage: vcMemory.KernelUsage.Usage,
Max: vcMemory.KernelUsage.MaxUsage,
Failcnt: vcMemory.KernelUsage.Failcnt,
},
KernelTCP: &cgroups.MemoryEntry{
KernelTCP: &cgroupsv1.MemoryEntry{
Limit: vcMemory.KernelTCPUsage.Limit,
Usage: vcMemory.KernelTCPUsage.Usage,
Max: vcMemory.KernelTCPUsage.MaxUsage,
Expand Down Expand Up @@ -145,8 +145,8 @@ func setMemoryStats(vcMemory vc.MemoryStats) *cgroups.MemoryStat {
return memoryStats
}

func setBlkioStats(vcBlkio vc.BlkioStats) *cgroups.BlkIOStat {
blkioStats := &cgroups.BlkIOStat{
func setBlkioStats(vcBlkio vc.BlkioStats) *cgroupsv1.BlkIOStat {
blkioStats := &cgroupsv1.BlkIOStat{
IoServiceBytesRecursive: copyBlkio(vcBlkio.IoServiceBytesRecursive),
IoServicedRecursive: copyBlkio(vcBlkio.IoServicedRecursive),
IoQueuedRecursive: copyBlkio(vcBlkio.IoQueuedRecursive),
Expand All @@ -160,10 +160,10 @@ func setBlkioStats(vcBlkio vc.BlkioStats) *cgroups.BlkIOStat {
return blkioStats
}

func copyBlkio(s []vc.BlkioStatEntry) []*cgroups.BlkIOEntry {
ret := make([]*cgroups.BlkIOEntry, len(s))
func copyBlkio(s []vc.BlkioStatEntry) []*cgroupsv1.BlkIOEntry {
ret := make([]*cgroupsv1.BlkIOEntry, len(s))
for i, v := range s {
ret[i] = &cgroups.BlkIOEntry{
ret[i] = &cgroupsv1.BlkIOEntry{
Op: v.Op,
Major: v.Major,
Minor: v.Minor,
Expand All @@ -174,10 +174,10 @@ func copyBlkio(s []vc.BlkioStatEntry) []*cgroups.BlkIOEntry {
return ret
}

func setNetworkStats(vcNetwork []*vc.NetworkStats) []*cgroups.NetworkStat {
networkStats := make([]*cgroups.NetworkStat, len(vcNetwork))
func setNetworkStats(vcNetwork []*vc.NetworkStats) []*cgroupsv1.NetworkStat {
networkStats := make([]*cgroupsv1.NetworkStat, len(vcNetwork))
for i, v := range vcNetwork {
networkStats[i] = &cgroups.NetworkStat{
networkStats[i] = &cgroupsv1.NetworkStat{
Name: v.Name,
RxBytes: v.RxBytes,
RxPackets: v.RxPackets,
Expand Down
4 changes: 2 additions & 2 deletions src/runtime/containerd-shim-v2/metrics_test.go
Expand Up @@ -10,7 +10,7 @@ import (
"context"
"testing"

"github.com/containerd/cgroups"
"github.com/containerd/cgroups/stats/v1"
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/vcmock"
"github.com/stretchr/testify/assert"
Expand All @@ -29,7 +29,7 @@ func TestStatNetworkMetric(t *testing.T) {
},
}

expectedNetwork := []*cgroups.NetworkStat{
expectedNetwork := []*v1.NetworkStat{
{
Name: "test-network",
RxBytes: 10,
Expand Down
41 changes: 27 additions & 14 deletions src/runtime/containerd-shim-v2/service.go
Expand Up @@ -68,7 +68,7 @@ var shimLog = logrus.WithFields(logrus.Fields{
})

// New returns a new shim service that can be used via GRPC
func New(ctx context.Context, id string, publisher events.Publisher) (cdshim.Shim, error) {
func New(ctx context.Context, id string, publisher cdshim.Publisher, shutdown func()) (cdshim.Shim, error) {
shimLog = shimLog.WithFields(logrus.Fields{
"sandbox": id,
"pid": os.Getpid(),
Expand All @@ -84,16 +84,14 @@ func New(ctx context.Context, id string, publisher events.Publisher) (cdshim.Shi
vci.SetLogger(ctx, shimLog)
katautils.SetLogger(ctx, shimLog, shimLog.Logger.Level)

ctx, cancel := context.WithCancel(ctx)

s := &service{
id: id,
pid: uint32(os.Getpid()),
ctx: ctx,
containers: make(map[string]*container),
events: make(chan interface{}, chSize),
ec: make(chan exit, bufferSize),
cancel: cancel,
cancel: shutdown,
}

go s.processExits()
Expand Down Expand Up @@ -138,7 +136,7 @@ type service struct {
id string
}

func newCommand(ctx context.Context, containerdBinary, id, containerdAddress string) (*sysexec.Cmd, error) {
func newCommand(ctx context.Context, id, containerdBinary, containerdAddress string) (*sysexec.Cmd, error) {
ns, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return nil, err
Expand Down Expand Up @@ -176,13 +174,13 @@ func newCommand(ctx context.Context, containerdBinary, id, containerdAddress str

// StartShim willl start a kata shimv2 daemon which will implemented the
// ShimV2 APIs such as create/start/update etc containers.
func (s *service) StartShim(ctx context.Context, id, containerdBinary, containerdAddress string) (string, error) {
func (s *service) StartShim(ctx context.Context, opts cdshim.StartOpts) (_ string, retErr error) {
bundlePath, err := os.Getwd()
if err != nil {
return "", err
}

address, err := getAddress(ctx, bundlePath, id)
address, err := getAddress(ctx, bundlePath, opts.Address, opts.ID)
if err != nil {
return "", err
}
Expand All @@ -193,34 +191,49 @@ func (s *service) StartShim(ctx context.Context, id, containerdBinary, container
return address, nil
}

cmd, err := newCommand(ctx, containerdBinary, id, containerdAddress)
cmd, err := newCommand(ctx, opts.ID, opts.ContainerdBinary, opts.Address)
if err != nil {
return "", err
}

address, err = cdshim.SocketAddress(ctx, id)
address, err = cdshim.SocketAddress(ctx, opts.Address, opts.ID)
if err != nil {
return "", err
}

socket, err := cdshim.NewSocket(address)

if err != nil {
return "", err
if !cdshim.SocketEaddrinuse(err) {
return "", err
}
if err := cdshim.RemoveSocket(address); err != nil {
return "", errors.Wrap(err, "remove already used socket")
}
if socket, err = cdshim.NewSocket(address); err != nil {
return "", err
}
}
defer socket.Close()

defer func() {
if retErr != nil {
socket.Close()
_ = cdshim.RemoveSocket(address)
}
}()

f, err := socket.File()
if err != nil {
return "", err
}
defer f.Close()

cmd.ExtraFiles = append(cmd.ExtraFiles, f)

if err := cmd.Start(); err != nil {
return "", err
}
defer func() {
if err != nil {
if retErr != nil {
cmd.Process.Kill()
}
}()
Expand Down Expand Up @@ -290,7 +303,7 @@ func getTopic(e interface{}) string {

func trace(ctx context.Context, name string) (otelTrace.Span, context.Context) {
if ctx == nil {
logrus.WithField("type", "bug").Error("trace called before context set")
logrus.WithFields(logrus.Fields{"type": "bug", "name": name}).Error("called before context set")
ctx = context.Background()
}
tracer := otel.Tracer("kata")
Expand Down
4 changes: 2 additions & 2 deletions src/runtime/containerd-shim-v2/utils.go
Expand Up @@ -78,7 +78,7 @@ func validBundle(containerID, bundlePath string) (string, error) {
return resolved, nil
}

func getAddress(ctx context.Context, bundlePath, id string) (string, error) {
func getAddress(ctx context.Context, bundlePath, address, id string) (string, error) {
var err error

// Checks the MUST and MUST NOT from OCI runtime specification
Expand All @@ -101,7 +101,7 @@ func getAddress(ctx context.Context, bundlePath, id string) (string, error) {
if err != nil {
return "", err
}
address, err := cdshim.SocketAddress(ctx, sandboxID)
address, err := cdshim.SocketAddress(ctx, address, sandboxID)
if err != nil {
return "", err
}
Expand Down
3 changes: 3 additions & 0 deletions src/runtime/go.sum
Expand Up @@ -120,6 +120,7 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/checkpoint-restore/go-criu/v4 v4.1.0 h1:WW2B2uxx9KWF6bGlHqhm8Okiafwwx7Y2kcpn8lCpjgo=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
Expand Down Expand Up @@ -585,6 +586,7 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mrunalp/fileutils v0.5.0 h1:NKzVxiH7eSk+OQ4M+ZYW1K6h27RUV3MI6NUTsHhU6Z4=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
Expand Down Expand Up @@ -743,6 +745,7 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit f607641

Please sign in to comment.