From e0d1e745549fe8507b60c777738295ca2599125a Mon Sep 17 00:00:00 2001 From: ChengyuZhu6 Date: Sun, 28 Apr 2024 15:41:23 +0800 Subject: [PATCH] snapshot: fix error on proxy driver when switching different snapshotter During image pull, the containerd client calls Prepare API with the label containerd.io/snapshot.ref. When an image is pulled by other snapshotter, containerd doesn't send the label "containerd.io/snapshot.ref" to nydus snapshotter to let snapshotter prepare ro layers. Consequently, the code logic in nydus snapshotter cannot find label "containerd.io/snapshot/nydus-proxy-mode" and the logic of guest pull (proxy) is skipped. This occurs while reading the label data of parent snapshots(ro layers) during the preparation of the active snapshot(rw layer). Thus, when the snapshotter driver is configured to proxy mode, nydus snapshotter is compelled to implement the proxy logic. Fixes: #592 Signed-off-by: ChengyuZhu6 --- misc/snapshotter/Dockerfile | 10 +++++- snapshot/process.go | 10 ++++++ snapshot/snapshot.go | 69 ++++++++++++++++++++++++++++++++++++- 3 files changed, 87 insertions(+), 2 deletions(-) diff --git a/misc/snapshotter/Dockerfile b/misc/snapshotter/Dockerfile index 4517de8d09..47f2e75c7f 100644 --- a/misc/snapshotter/Dockerfile +++ b/misc/snapshotter/Dockerfile @@ -2,7 +2,9 @@ FROM alpine:3.17.0 AS base FROM base AS sourcer ARG NYDUS_VER=v2.2.4 - +ENV https_proxy=http://proxy.cd.intel.com:912 +ENV http_proxy=http://proxy.cd.intel.com:912 +ENV no_proxy="10.239.0.0/16,172.168.0.0/16,127.0.0.0/8,localhost,10.0.0.0/8,192.168.0.0/16,192.168.14.0/24,.intel.com,100.64.0.0/10,172.16.0.0/12,10.112.0.0/16" RUN apk add --no-cache curl && \ apk add --no-cache --upgrade grep && \ curl -OL https://github.com/dragonflyoss/nydus/releases/download/$NYDUS_VER/nydus-static-$NYDUS_VER-linux-amd64.tgz && \ @@ -13,6 +15,9 @@ RUN apk add --no-cache curl && \ && rm -rf /nydus-overlayfs FROM base AS kubectl-sourcer +ENV https_proxy=http://proxy.cd.intel.com:912 +ENV http_proxy=http://proxy.cd.intel.com:912 +ENV no_proxy="10.239.0.0/16,172.168.0.0/16,127.0.0.0/8,localhost,10.0.0.0/8,192.168.0.0/16,192.168.14.0/24,.intel.com,100.64.0.0/10,172.16.0.0/12,10.112.0.0/16" RUN apk add --no-cache curl && \ ARCH=$(uname -m) && \ if [ "${ARCH}" = "x86_64" ]; then ARCH=amd64; fi && \ @@ -27,6 +32,9 @@ ARG BINARY_DESTINATION=${DESTINATION}/usr/local/bin ARG SCRIPT_DESTINATION=${DESTINATION}/opt/nydus WORKDIR /root/ +ENV https_proxy=http://proxy.cd.intel.com:912 +ENV http_proxy=http://proxy.cd.intel.com:912 +ENV no_proxy="10.239.0.0/16,172.168.0.0/16,127.0.0.0/8,localhost,10.0.0.0/8,192.168.0.0/16,192.168.14.0/24,.intel.com,100.64.0.0/10,172.16.0.0/12,10.112.0.0/16" RUN apk add --no-cache libc6-compat bash VOLUME /var/lib/containerd-nydus /run/containerd-nydus diff --git a/snapshot/process.go b/snapshot/process.go index 9f0da66f2d..22a9c64e76 100644 --- a/snapshot/process.go +++ b/snapshot/process.go @@ -57,6 +57,12 @@ func chooseProcessor(ctx context.Context, logger *logrus.Entry, } } + proxyhandler := func() (bool, []mount.Mount, error) { + logger.Infof("Nydus proxy handler") + mounts, err := sn.mountProxy(ctx, s) + return false, mounts, err + } + // OCI image is also marked with "containerd.io/snapshot.ref" by Containerd target, isRoLayer := labels[label.TargetSnapshotRef] @@ -118,6 +124,10 @@ func chooseProcessor(ctx context.Context, logger *logrus.Entry, // It should not be committed during this Prepare() operation. pID, pInfo, _, pErr := snapshot.GetSnapshotInfo(ctx, sn.ms, parent) + if checkErr := checkLabelsWithDriver(pInfo.Labels); checkErr != nil { + logger.Infof("not found proxy label: %v, err = %v", pInfo.Labels, checkErr) + handler = proxyhandler + } if pErr == nil && label.IsNydusProxyMode(pInfo.Labels) { logger.Infof("Prepare active snapshot %s in proxy mode", key) handler = remoteHandler(pID, pInfo.Labels) diff --git a/snapshot/snapshot.go b/snapshot/snapshot.go index f829d3132c..7eb76f2e61 100644 --- a/snapshot/snapshot.go +++ b/snapshot/snapshot.go @@ -22,9 +22,9 @@ import ( "github.com/containerd/containerd/snapshots" "github.com/containerd/containerd/snapshots/storage" "github.com/containerd/continuity/fs" - "github.com/containerd/nydus-snapshotter/config" "github.com/containerd/nydus-snapshotter/config/daemonconfig" + rafs "github.com/containerd/nydus-snapshotter/pkg/rafs" "github.com/containerd/nydus-snapshotter/pkg/cache" "github.com/containerd/nydus-snapshotter/pkg/cgroup" @@ -432,6 +432,11 @@ func (o *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, er return nil, errors.Wrapf(err, "get snapshot %s", key) } + if checkErr := checkLabelsWithDriver(info.Labels); checkErr != nil { + log.L.Debug("[Mounts] not found proxy label") + return o.mountProxy(ctx, *snap) + } + if needRemoteMounts { return o.mountRemote(ctx, info.Labels, *snap, metaSnapshotID, key) } @@ -838,6 +843,53 @@ func overlayMount(options []string) []mount.Mount { } } +// Handle proxy mount which the snapshot has been prepared by other snapshotter, mainly used for pause image in containerd +func (o *snapshotter) mountProxy(ctx context.Context, s storage.Snapshot) ([]mount.Mount, error) { + var overlayOptions []string + if s.Kind == snapshots.KindActive { + overlayOptions = append(overlayOptions, + fmt.Sprintf("workdir=%s", o.workPath(s.ID)), + fmt.Sprintf("upperdir=%s", o.upperPath(s.ID)), + ) + } + + log.G(ctx).Debugf("len(s.ParentIDs) = %v", len(s.ParentIDs)) + parentPaths := make([]string, 0, len(s.ParentIDs)+1) + if len(s.ParentIDs) == 0 { + parentPaths = append(parentPaths, config.GetSnapshotsRootDir()) + } else { + for _, id := range s.ParentIDs { + parentPaths = append(parentPaths, o.upperPath(id)) + } + } + + lowerDirOption := fmt.Sprintf("lowerdir=%s", strings.Join(parentPaths, ":")) + overlayOptions = append(overlayOptions, lowerDirOption) + log.G(ctx).Infof("proxy mount options %v", overlayOptions) + options, err := o.mountWithProxyVolume(rafs.Rafs{ + FsDriver: config.GetFsDriver(), + ImageID: "", + SnapshotID: "", + SnapshotDir: "", + Annotations: make(map[string]string), + }) + if err != nil { + return []mount.Mount{}, errors.Wrapf(err, "create kata volume for proxy") + } + if len(options) > 0 { + overlayOptions = append(overlayOptions, options...) + } + log.G(ctx).Debugf("fuse.nydus-overlayfs mount options %v", overlayOptions) + mounts := []mount.Mount{ + { + Type: "fuse.nydus-overlayfs", + Source: "overlay", + Options: overlayOptions, + }, + } + return mounts, nil +} + // `s` is the upmost snapshot and `id` refers to the nydus meta snapshot // `s` and `id` can represent a different layer, it's useful when View an image func (o *snapshotter) mountRemote(ctx context.Context, labels map[string]string, s storage.Snapshot, id, key string) ([]mount.Mount, error) { @@ -1011,3 +1063,18 @@ func (o *snapshotter) snapshotRoot() string { func (o *snapshotter) snapshotDir(id string) string { return filepath.Join(o.snapshotRoot(), id) } + +func checkLabelsWithDriver(labels map[string]string) error { + isProxyDriver := config.GetFsDriver() == config.FsDriverProxy + isProxyLabel := label.IsNydusProxyMode(labels) + _, isProxyImage := labels[label.CRIImageRef] + log.G(context.Background()).Debugf("isProxyDriver = %t, isProxyLabel = %t, isProxyImage = %t", isProxyDriver, isProxyLabel, isProxyImage) + switch { + case isProxyDriver && isProxyImage: + return nil + case isProxyDriver != isProxyLabel: + return errors.Errorf("check Labels With Driver failed, driver = %q, labels = %q", config.GetFsDriver(), labels) + default: + return nil + } +}