diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 66b3ee57d..c8ee94a99 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -13,7 +13,6 @@ jobs: strategy: matrix: go-version: [1.16.x, 1.17.x] - storage-type: [btrfs, overlay] privilege-level: [priv, unpriv] steps: - uses: actions/checkout@v2 @@ -24,7 +23,7 @@ jobs: - name: install dependencies run: | sudo apt-get update - sudo apt-get install -yy lxc-utils lxc-dev libacl1-dev jq libcap-dev libbtrfs-dev libseccomp-dev libpam-dev bats parallel + sudo apt-get install -yy lxc-utils lxc-dev libacl1-dev jq libcap-dev libseccomp-dev libpam-dev bats parallel GO111MODULE=off go get github.com/opencontainers/umoci/cmd/umoci sudo cp ~/go/bin/umoci /usr/bin curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin @@ -32,7 +31,7 @@ jobs: (cd /tmp && git clone https://github.com/AgentD/squashfs-tools-ng && cd squashfs-tools-ng && ./autogen.sh && ./configure --prefix=/usr && make -j2 && sudo make -j2 install && sudo ldconfig -v) (cd /tmp && git clone https://github.com/anuvu/squashfs && cd squashfs && make && sudo cp squashtool/squashtool /usr/bin) - run: | - make check STORAGE_TYPE=${{ matrix.storage-type }} PRIVILEGE_LEVEL=${{ matrix.privilege-level }} + make check PRIVILEGE_LEVEL=${{ matrix.privilege-level }} - if: github.event_name == 'release' && github.event.action == 'published' name: Publish artifacts on releases uses: Roang-zero1/github-upload-release-artifacts-action@master diff --git a/Makefile b/Makefile index ec5c9cb4a..b8cef0127 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ VERSION_FULL=$(if $(shell git status --porcelain --untracked-files=no),$(VERSION LXC_VERSION?=$(shell pkg-config --modversion lxc) -BUILD_TAGS = exclude_graphdriver_devicemapper containers_image_openpgp osusergo netgo static_build +BUILD_TAGS = exclude_graphdriver_btrfs exclude_graphdriver_devicemapper containers_image_openpgp osusergo netgo static_build STACKER_OPTS=--oci-dir=.build/oci --roots-dir=.build/roots --stacker-dir=.build/stacker --storage-type=overlay @@ -42,16 +42,13 @@ lint: cmd/lxc-wrapper/lxc-wrapper $(GO_SRC) TEST?=$(patsubst test/%.bats,%,$(wildcard test/*.bats)) PRIVILEGE_LEVEL?= -STORAGE_TYPE?= # make check TEST=basic will run only the basic test # make check PRIVILEGE_LEVEL=unpriv will run only unprivileged tests -# make check STORAGE_TYPE=btrfs will run only btrfs tests .PHONY: check check: stacker lint sudo -E PATH="$$PATH" LXC_BRANCH="$(LXC_BRANCH)" LXC_CLONE_URL="$(LXC_CLONE_URL)" ./test/main.py \ $(shell [ -z $(PRIVILEGE_LEVEL) ] || echo --privilege-level=$(PRIVILEGE_LEVEL)) \ - $(shell [ -z $(STORAGE_TYPE) ] || echo --storage-type=$(STORAGE_TYPE)) \ $(patsubst %,test/%.bats,$(TEST)) .PHONY: vendorup diff --git a/btrfs/btrfs.go b/btrfs/btrfs.go deleted file mode 100644 index d685c4ff4..000000000 --- a/btrfs/btrfs.go +++ /dev/null @@ -1,561 +0,0 @@ -package btrfs - -import ( - "fmt" - "io/ioutil" - "math/rand" - "os" - "os/exec" - "os/user" - "path" - "path/filepath" - "sort" - "strconv" - "strings" - "syscall" - "time" - - "github.com/freddierice/go-losetup" - "github.com/lxc/lxd/shared" - "github.com/opencontainers/umoci" - "github.com/opencontainers/umoci/oci/casext" - "github.com/pkg/errors" - "github.com/project-stacker/stacker/log" - "github.com/project-stacker/stacker/mount" - "github.com/project-stacker/stacker/types" - "golang.org/x/sys/unix" -) - -func DetectBtrfs(p string) (bool, error) { - fs := syscall.Statfs_t{} - - err := syscall.Statfs(p, &fs) - if err != nil { - return false, errors.Wrapf(err, "couldn't stat to detect btrfs") - } - - /* btrfs superblock magic number */ - return fs.Type == 0x9123683E, nil -} - -func NewLoopback(c types.StackerConfig) (types.Storage, error) { - currentUser, err := user.Current() - if err != nil { - return nil, err - } - - if err := os.MkdirAll(c.StackerDir, 0755); err != nil { - return nil, err - } - - loopback := path.Join(c.StackerDir, "btrfs.loop") - size := 100 * 1024 * 1024 * 1024 - uid, err := strconv.Atoi(currentUser.Uid) - if err != nil { - return nil, err - } - - gid, err := strconv.Atoi(currentUser.Gid) - if err != nil { - return nil, err - } - - err = MakeLoopbackBtrfs(loopback, int64(size), uid, gid, c.RootFSDir) - if err != nil { - return nil, err - } - return &btrfs{c: c, needsUmount: true}, nil -} - -func NewExisting(c types.StackerConfig) types.Storage { - return &btrfs{c: c} -} - -type btrfs struct { - c types.StackerConfig - needsUmount bool -} - -func (b *btrfs) Name() string { - return "btrfs" -} - -func (b *btrfs) sync(subvol string) error { - p := path.Join(b.c.RootFSDir, subvol) - fd, err := unix.Open(p, unix.O_RDONLY, 0) - if err != nil { - return errors.Wrapf(err, "couldn't open %s to sync", p) - } - defer unix.Close(fd) - return errors.Wrapf(unix.Syncfs(fd), "couldn't sync fs at %s", subvol) -} - -func (b *btrfs) Create(source string) error { - output, err := exec.Command( - "btrfs", - "subvolume", - "create", - path.Join(b.c.RootFSDir, source)).CombinedOutput() - if err != nil { - return errors.Errorf("btrfs create: %s: %s", err, output) - } - - return nil -} - -func (b *btrfs) SetupEmptyRootfs(name string) error { - return errors.Wrapf(os.Mkdir(path.Join(b.c.RootFSDir, name, "rootfs"), 0755), "couldn't init empty rootfs") -} - -func (b *btrfs) Snapshot(source string, target string) error { - if err := b.sync(source); err != nil { - return err - } - - output, err := exec.Command( - "btrfs", - "subvolume", - "snapshot", - "-r", - path.Join(b.c.RootFSDir, source), - path.Join(b.c.RootFSDir, target)).CombinedOutput() - if err != nil { - return errors.Errorf("btrfs snapshot %s to %s: %s: %s", source, target, err, output) - } - - return nil -} - -func (b *btrfs) Restore(source string, target string) error { - output, err := exec.Command( - "btrfs", - "subvolume", - "snapshot", - path.Join(b.c.RootFSDir, source), - path.Join(b.c.RootFSDir, target)).CombinedOutput() - if err != nil { - return errors.Errorf("btrfs restore: %s: %s", err, output) - } - - // Since we create snapshots as readonly above, we must re-mark them - // writable here. - output, err = exec.Command( - "btrfs", - "property", - "set", - "-ts", - path.Join(b.c.RootFSDir, target), - "ro", - "false").CombinedOutput() - if err != nil { - return errors.Errorf("btrfs mark writable: %s: %s", err, output) - } - - return nil -} - -func (b *btrfs) UpdateFSMetadata(name string, newPath casext.DescriptorPath) error { - rootPath := path.Join(b.c.RootFSDir, name) - newName := strings.Replace(newPath.Descriptor().Digest.String(), ":", "_", 1) + ".mtree" - - infos, err := ioutil.ReadDir(rootPath) - if err != nil { - return err - } - - for _, fi := range infos { - if !strings.HasSuffix(fi.Name(), ".mtree") { - continue - } - - err = os.Rename(path.Join(rootPath, fi.Name()), path.Join(rootPath, newName)) - if err != nil { - return errors.Wrapf(err, "couldn't update mtree name") - } - } - - return umoci.WriteBundleMeta(rootPath, umoci.Meta{ - Version: umoci.MetaVersion, - From: newPath, - }) -} - -func (b *btrfs) Finalize(thing string) error { - if err := b.sync(thing); err != nil { - return err - } - - output, err := exec.Command( - "btrfs", - "property", - "set", - "-ts", - path.Join(b.c.RootFSDir, thing), - "ro", - "true").CombinedOutput() - if err != nil { - return errors.Errorf("btrfs mark readonly: %s: %s", err, output) - } - return nil -} - -// isBtrfsSubVolume returns true if the given Path is a btrfs subvolume else -// false. -func isBtrfsSubVolume(subvolPath string) (bool, error) { - fs := syscall.Stat_t{} - err := syscall.Lstat(subvolPath, &fs) - if err != nil { - return false, errors.Wrapf(err, "failed testing %s for subvol", subvolPath) - } - - // Check if BTRFS_FIRST_FREE_OBJECTID - if fs.Ino != 256 { - return false, nil - } - - // btrfs roots have the same inode as above, but they are not - // subvolumes (and we can't delete them) so exlcude the path if it is a - // mountpoint. - mountpoint, err := mount.IsMountpoint(subvolPath) - if err != nil { - return false, err - } - - if mountpoint { - return false, nil - } - - return true, nil -} - -func btrfsSubVolumesGet(path string) ([]string, error) { - result := []string{} - - if !strings.HasSuffix(path, "/") { - path = path + "/" - } - - // Unprivileged users can't get to fs internals - err := filepath.Walk(path, func(fpath string, fi os.FileInfo, err error) error { - // Skip walk errors - if err != nil { - return nil - } - - // Subvolumes can only be directories - if !fi.IsDir() { - return nil - } - - // Check if a btrfs subvolume - isSubvol, err := isBtrfsSubVolume(fpath) - if err != nil { - return err - } - - if isSubvol { - result = append(result, strings.TrimPrefix(fpath, path)) - } - - return nil - }) - if err != nil { - return nil, err - } - - return result, nil -} - -func btrfsSubVolumesDelete(root string) error { - subvols, err := btrfsSubVolumesGet(root) - if err != nil { - return err - } - - subvolsReversed := make([]string, len(subvols)) - copy(subvolsReversed, subvols) - - sort.Sort(sort.StringSlice(subvols)) - sort.Sort(sort.Reverse(sort.StringSlice(subvolsReversed))) - - for _, subvol := range subvols { - // Since we create snapshots as readonly above, we must re-mark them - // writable here before we can delete them. - output, err := exec.Command( - "btrfs", - "property", - "set", - "-ts", - path.Join(root, subvol), - "ro", - "false").CombinedOutput() - if err != nil { - return errors.Errorf("btrfs mark writable: %s: %s", err, output) - } - } - - for _, subvol := range subvolsReversed { - output, err := exec.Command( - "btrfs", - "subvolume", - "delete", - "-c", - path.Join(root, subvol)).CombinedOutput() - if err != nil { - return errors.Errorf("btrfs delete: %s: %s", err, output) - } - - err = os.RemoveAll(path.Join(root, subvol)) - if err != nil { - return errors.Wrapf(err, "failed to delete subvolume %s", subvol) - } - } - - return nil -} - -func (b *btrfs) Delete(source string) error { - return btrfsSubVolumesDelete(path.Join(b.c.RootFSDir, source)) -} - -func (b *btrfs) Detach() error { - if b.needsUmount { - // Need to use DETACH here because we still hold the rootfs .lock file. - err := syscall.Unmount(b.c.RootFSDir, syscall.MNT_DETACH) - err2 := os.RemoveAll(b.c.RootFSDir) - if err != nil { - return err - } - - if err2 != nil { - return err2 - } - } - - return nil -} - -func (b *btrfs) Exists(thing string) bool { - _, err := os.Stat(path.Join(b.c.RootFSDir, thing)) - return err == nil -} - -func (b *btrfs) TemporaryWritableSnapshot(source string) (string, func(), error) { - dir, err := ioutil.TempDir(b.c.RootFSDir, fmt.Sprintf("temp-snapshot-%s-", source)) - if err != nil { - return "", nil, errors.Wrapf(err, "couldn't create temporary snapshot dir for %s", source) - } - - err = os.RemoveAll(dir) - if err != nil { - return "", nil, errors.Wrapf(err, "couldn't remove tempdir for %s", source) - } - - dir = path.Base(dir) - - output, err := exec.Command( - "btrfs", - "subvolume", - "snapshot", - path.Join(b.c.RootFSDir, source), - path.Join(b.c.RootFSDir, dir)).CombinedOutput() - if err != nil { - return "", nil, errors.Errorf("temporary snapshot %s to %s: %s: %s", source, dir, err, string(output)) - } - - cleanup := func() { - err = b.Delete(dir) - if err != nil { - log.Infof("problem deleting temp subvolume %s: %v", dir, err) - return - } - err = os.RemoveAll(dir) - if err != nil { - log.Infof("problem deleting temp subvolume dir %s: %v", dir, err) - } - } - - return dir, cleanup, nil -} - -func (b *btrfs) Clean() error { - subvolErr := btrfsSubVolumesDelete(b.c.RootFSDir) - loopback := path.Join(b.c.StackerDir, "btrfs.loop") - - var umountErr error - _, err := os.Stat(loopback) - if err == nil { - // if we are inside a userns we can't unmount the loopback - // (probably because someone did `sudo stacker unpriv-setup`); - // they'll need to be root to unmount it as well. - if shared.RunningInUserNS() { - return errors.Errorf("can't fully clean btrfs from userns (try stacker clean ... as root)") - } - - // Need to use DETACH here because we still hold the rootfs .lock file. - umountErr = errors.Wrapf(syscall.Unmount(b.c.RootFSDir, syscall.MNT_DETACH), "unable to umount rootfs") - if err = os.RemoveAll(loopback); err != nil { - log.Infof("failed removing btrfs loopback file: %v", err) - } - } - if err = os.RemoveAll(b.c.RootFSDir); err != nil { - log.Infof("failed removing roots dir: %v", err) - } - if subvolErr != nil && umountErr != nil { - return errors.Errorf("both subvol delete and umount failed: %v, %v", subvolErr, umountErr) - } - - if subvolErr != nil { - return subvolErr - } - - return umountErr -} - -func (b *btrfs) GetLXCRootfsConfig(name string) (string, error) { - return fmt.Sprintf("dir:%s", path.Join(b.c.RootFSDir, name, "rootfs")), nil -} - -func (b *btrfs) TarExtractLocation(name string) string { - return path.Join(b.c.RootFSDir, name, "rootfs") -} - -// MakeLoopbackBtrfs creates a btrfs filesystem mounted at dest out of a loop -// device and allows the specified uid to delete subvolumes on it. -func MakeLoopbackBtrfs(loopback string, size int64, uid int, gid int, dest string) error { - mounted, err := mount.IsMountpoint(dest) - if err != nil { - return err - } - - /* if it's already mounted, don't do anything */ - if mounted { - return nil - } - - if err := setupLoopback(loopback, uid, gid, size); err != nil { - return err - } - - /* Now we know that file is a valid btrfs "file" and that it's - * not mounted, so let's mount it. - */ - dev, err := attachToLoop(loopback) - if err != nil { - return errors.Errorf("Failed to attach loop device: %v", err) - } - defer dev.Detach() - - err = syscall.Mount(dev.Path(), dest, "btrfs", 0, "user_subvol_rm_allowed") - if err != nil { - return errors.Errorf("Failed mount fs: %v", err) - } - - if err := os.Chown(dest, uid, gid); err != nil { - return errors.Errorf("couldn't chown %s: %v", dest, err) - } - - return nil -} - -// attachToLoop attaches the path to a loop device, retrying for a while if it -// gets -EBUSY. -func attachToLoop(path string) (dev losetup.Device, err error) { - // We can race between when we ask the kernel which loop device - // is free and when we actually attach to it. This window is - // pretty small, but still happens e.g. when we run the stacker - // test suite. So let's sleep for a random number of ms and - // retry the whole process again. - for i := 0; i < 10; i++ { - dev, err = losetup.Attach(path, 0, false) - if err == nil { - return dev, nil - } - - // time.Durations are nanoseconds - ms := rand.Int63n(100 * 1000 * 1000) - time.Sleep(time.Duration(ms)) - } - - return dev, errors.Wrapf(err, "couldn't attach btrfs loop, too many retries") -} - -func setupLoopback(path string, uid int, gid int, size int64) error { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0644) - if err != nil { - if !os.IsExist(err) { - return err - } - - return nil - } - defer f.Close() - - if err := f.Chown(uid, gid); err != nil { - os.RemoveAll(f.Name()) - return err - } - - /* TODO: make this configurable */ - if err := syscall.Ftruncate(int(f.Fd()), size); err != nil { - os.RemoveAll(f.Name()) - return err - } - - output, err := exec.Command("mkfs.btrfs", f.Name()).CombinedOutput() - if err != nil { - os.RemoveAll(f.Name()) - return errors.Errorf("mkfs.btrfs: %s: %s", err, output) - } - - return nil -} - -func (b *btrfs) SetOverlayDirs(name string, overlayDirs []types.OverlayDir, layerTypes []types.LayerType) error { - if len(overlayDirs) == 0 { - return nil - } - return errors.Errorf("Using overlay_dirs with btrfs storage is forbidden, use overlay storage instead") -} - -func Check(config types.StackerConfig) error { - isBtrfs, err := DetectBtrfs(config.RootFSDir) - if err != nil { - return err - } - - // it's already a mounted btrfs, nothing to worry about - if isBtrfs { - return nil - } - - source, err := ioutil.TempFile(config.RootFSDir, "source") - if err != nil { - return errors.Wrapf(err, "couldn't create source for btrfs test mount") - } - defer source.Close() - defer os.Remove(source.Name()) - - dest, err := ioutil.TempDir(config.RootFSDir, "dest") - if err != nil { - return errors.Wrapf(err, "couldn't create dest for btrfs test mount") - } - defer os.RemoveAll(dest) - - err = syscall.Mount(source.Name(), dest, "btrfs", 0, "") - switch err { - case syscall.ENOTBLK: - // it complained because source was not a block device; that - // means it found btrfs in the kernel and btrfs rejected the - // source - return nil - case syscall.EPERM: - return errors.Errorf("not enough perms to mount btrfs") - case syscall.ENODEV: - return errors.Errorf("btrfs missing from kernel") - default: - // we always expect one of the above failures, since we didn't - // pass a valid filesystem or block device to mount()... - return errors.Errorf("incomprehensible btrfs mount err: %#v", err) - } -} diff --git a/btrfs/gc.go b/btrfs/gc.go deleted file mode 100644 index 3375fc543..000000000 --- a/btrfs/gc.go +++ /dev/null @@ -1,82 +0,0 @@ -package btrfs - -import ( - "context" - "io/ioutil" - "path" - - "github.com/opencontainers/umoci" - stackeroci "github.com/project-stacker/stacker/oci" -) - -func gcForOCILayout(s *btrfs, layout string, thingsToKeep map[string]bool) error { - oci, err := umoci.OpenLayout(layout) - if err != nil { - return err - } - defer oci.Close() - - err = oci.GC(context.Background()) - if err != nil { - return err - } - - tags, err := oci.ListReferences(context.Background()) - if err != nil { - return err - } - - for _, t := range tags { - manifest, err := stackeroci.LookupManifest(oci, t) - if err != nil { - return err - } - - // keep both tags and hashes - thingsToKeep[t] = true - - for _, layer := range manifest.Layers { - hash, err := ComputeAggregateHash(manifest, layer) - if err != nil { - return err - } - - thingsToKeep[hash] = true - } - } - - return nil -} - -func (b *btrfs) GC() error { - thingsToKeep := map[string]bool{} - - err := gcForOCILayout(b, b.c.OCIDir, thingsToKeep) - if err != nil { - return err - } - - err = gcForOCILayout(b, path.Join(b.c.StackerDir, "layer-bases", "oci"), thingsToKeep) - if err != nil { - return err - } - - entries, err := ioutil.ReadDir(b.c.RootFSDir) - if err != nil { - return err - } - - for _, ent := range entries { - _, used := thingsToKeep[ent.Name()] - if used { - continue - } - - err = b.Delete(ent.Name()) - if err != nil { - return err - } - } - - return nil -} diff --git a/btrfs/repack.go b/btrfs/repack.go deleted file mode 100644 index 98e441113..000000000 --- a/btrfs/repack.go +++ /dev/null @@ -1,199 +0,0 @@ -package btrfs - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path" - "time" - - ispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/opencontainers/umoci" - "github.com/opencontainers/umoci/mutate" - "github.com/opencontainers/umoci/oci/casext" - "github.com/opencontainers/umoci/pkg/mtreefilter" - "github.com/pkg/errors" - "github.com/project-stacker/stacker/lib" - stackermtree "github.com/project-stacker/stacker/mtree" - stackeroci "github.com/project-stacker/stacker/oci" - "github.com/project-stacker/stacker/squashfs" - "github.com/project-stacker/stacker/storage" - "github.com/project-stacker/stacker/types" -) - -func (b *btrfs) initEmptyLayer(name string, layerType types.LayerType) error { - var oci casext.Engine - var err error - - tag := layerType.LayerName(name) - ociDir := b.c.OCIDir - bundlePath := path.Join(b.c.RootFSDir, name) - - if _, statErr := os.Stat(ociDir); statErr != nil { - oci, err = umoci.CreateLayout(ociDir) - } else { - oci, err = umoci.OpenLayout(ociDir) - } - if err != nil { - return errors.Wrapf(err, "Failed creating layout for %s", ociDir) - } - - err = umoci.NewImage(oci, tag) - if err != nil { - return err - } - - // kind of a hack, but the API won't let us init an empty image in a - // bundle with data already in it, which is probably reasonable. so - // what we do instead is: unpack the empty image above into a temp - // directory, then copy the mtree/umoci metadata over to our rootfs. - dir, err := ioutil.TempDir("", "umoci-init-empty") - if err != nil { - return errors.Wrapf(err, "couldn't create temp dir") - } - defer os.RemoveAll(dir) - - err = doUnpack(b.c, tag, ociDir, dir, "") - if err != nil { - return err - } - - ents, err := ioutil.ReadDir(dir) - if err != nil { - return errors.Wrapf(err, "couldn't read temp dir") - } - - for _, ent := range ents { - if ent.Name() == "rootfs" { - continue - } - - // copy all metadata to the real dir - err = lib.FileCopy(path.Join(bundlePath, ent.Name()), path.Join(dir, ent.Name())) - if err != nil { - return err - } - } - - return nil -} - -func determineLayerType(ociDir, tag string) (types.LayerType, error) { - oci, err := umoci.OpenLayout(ociDir) - if err != nil { - return types.LayerType(""), err - } - defer oci.Close() - - manifest, err := stackeroci.LookupManifest(oci, tag) - if err != nil { - return types.LayerType(""), err - } - - return types.NewLayerTypeManifest(manifest) -} - -func (b *btrfs) Repack(name string, layerTypes []types.LayerType, sfm types.StackerFiles) error { - if len(layerTypes) != 1 { - return errors.Errorf("btrfs backend does not support multiple layer types") - } - - layerType := layerTypes[0] - - // first, let's copy whatever we can from wherever we can, either - // import from the output if we already built a layer with this, or - // import from the cache if nothing was ever built based on this - baseTag, baseLayer, foundBase, err := storage.FindFirstBaseInOutput(name, sfm) - if err != nil { - return err - } - - initialized := false - if foundBase { - cacheDir := path.Join(b.c.StackerDir, "layer-bases", "oci") - // if it's from a containers image import and the layer types match, just copy it to the output - if types.IsContainersImageLayer(baseLayer.From.Type) { - cacheTag, err := baseLayer.From.ParseTag() - if err != nil { - return err - } - - sourceLayerType, err := determineLayerType(cacheDir, cacheTag) - if err != nil { - return err - } - if layerType == sourceLayerType { - err = lib.ImageCopy(lib.ImageCopyOpts{ - Src: fmt.Sprintf("oci:%s:%s", cacheDir, cacheTag), - Dest: fmt.Sprintf("oci:%s:%s", b.c.OCIDir, layerType.LayerName(name)), - }) - if err != nil { - return err - } - initialized = true - } - } else if !baseLayer.BuildOnly { - // otherwise if it's already been built and the base - // types match, import it from there - err = lib.ImageCopy(lib.ImageCopyOpts{ - Src: fmt.Sprintf("oci:%s:%s", b.c.OCIDir, layerType.LayerName(baseTag)), - Dest: fmt.Sprintf("oci:%s:%s", b.c.OCIDir, layerType.LayerName(name)), - }) - if err != nil { - return err - } - initialized = true - } - } - - if !initialized { - if err = b.initEmptyLayer(name, layerType); err != nil { - return err - } - } - - return doRepack(name, b.c.OCIDir, path.Join(b.c.RootFSDir, name), layerType) -} - -func doRepack(tag string, ociDir string, bundlePath string, layerType types.LayerType) error { - oci, err := umoci.OpenLayout(ociDir) - if err != nil { - return err - } - defer oci.Close() - - meta, err := umoci.ReadBundleMeta(bundlePath) - if err != nil { - return err - } - - mutator, err := mutate.New(oci, meta.From) - if err != nil { - return err - } - - imageMeta, err := mutator.Meta(context.Background()) - if err != nil { - return err - } - - layerName := layerType.LayerName(tag) - switch layerType { - case "tar": - now := time.Now() - history := &ispec.History{ - Author: imageMeta.Author, - Created: &now, - CreatedBy: "stacker umoci repack", - EmptyLayer: false, - } - - filters := []mtreefilter.FilterFunc{stackermtree.LayerGenerationIgnoreRoot} - return umoci.Repack(oci, layerName, bundlePath, meta, history, filters, true, mutator) - case "squashfs": - return squashfs.GenerateSquashfsLayer(layerName, imageMeta.Author, bundlePath, ociDir, oci) - default: - return errors.Errorf("unknown layer type %s", layerType) - } -} diff --git a/btrfs/unpack.go b/btrfs/unpack.go deleted file mode 100644 index 12e26a53e..000000000 --- a/btrfs/unpack.go +++ /dev/null @@ -1,374 +0,0 @@ -package btrfs - -import ( - "context" - "io/ioutil" - "os" - "path" - "strings" - - ispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/opencontainers/umoci" - "github.com/opencontainers/umoci/oci/casext" - "github.com/opencontainers/umoci/oci/layer" - "github.com/opencontainers/umoci/pkg/fseval" - "github.com/pkg/errors" - "github.com/project-stacker/stacker/lib" - "github.com/project-stacker/stacker/log" - stackeroci "github.com/project-stacker/stacker/oci" - "github.com/project-stacker/stacker/squashfs" - "github.com/project-stacker/stacker/types" -) - -func (b *btrfs) Unpack(tag, name string) error { - oci, err := umoci.OpenLayout(b.c.OCIDir) - if err != nil { - return err - } - defer oci.Close() - - cacheDir := path.Join(b.c.StackerDir, "layer-bases", "oci") - cacheOCI, err := umoci.OpenLayout(cacheDir) - if err != nil { - return err - } - defer cacheOCI.Close() - - manifest, err := stackeroci.LookupManifest(cacheOCI, tag) - if err != nil { - return err - } - - bundlePath := path.Join(b.c.RootFSDir, name) - lastLayer, highestHash, err := b.findPreviousExtraction(cacheOCI, manifest) - if err != nil { - return err - } - - dps, err := cacheOCI.ResolveReference(context.Background(), tag) - if err != nil { - return err - } - - // restore whatever we already extracted - if highestHash != "" { - // Delete the previously created working snapshot; we're about - // to create a new one. - err = b.Delete(name) - if err != nil { - return err - } - - err = b.Restore(highestHash, name) - if err != nil { - return err - } - } - - // if we're done, just prepare the metadata - if lastLayer+1 == len(manifest.Layers) { - err = prepareUmociMetadata(b, name, bundlePath, dps[0], highestHash) - if err != nil { - return err - } - } else { - // otherwise, finish extracting - startFrom := manifest.Layers[lastLayer+1] - - // again, if we restored from something that already been unpacked but - // we're going to unpack stuff on top of it, we need to delete the old - // metadata. - err = cleanUmociMetadata(bundlePath) - if err != nil { - return err - } - - err = doUnpack(b.c, tag, cacheDir, bundlePath, startFrom.Digest.String()) - - if err != nil { - return err - } - - // Ok, now that we have extracted and computed the mtree, let's - // re-snapshot. The problem is that the snapshot in the callback won't - // contain an mtree file, because the final mtree is generated after - // the callback is called. - hash, err := ComputeAggregateHash(manifest, manifest.Layers[len(manifest.Layers)-1]) - if err != nil { - return err - } - err = b.Delete(hash) - if err != nil { - return err - } - - err = b.Snapshot(name, hash) - if err != nil { - return err - } - } - return nil -} - -func (b *btrfs) findPreviousExtraction(oci casext.Engine, manifest ispec.Manifest) (int, string, error) { - lastLayer := -1 - highestHash := "" - for i, layerDesc := range manifest.Layers { - hash, err := ComputeAggregateHash(manifest, layerDesc) - if err != nil { - return lastLayer, highestHash, err - } - - if b.Exists(hash) { - highestHash = hash - lastLayer = i - log.Debugf("found previous extraction of %s", layerDesc.Digest.String()) - } else { - break - } - } - - return lastLayer, highestHash, nil -} - -func doUnpack(config types.StackerConfig, tag, ociDir, bundlePath, startFromDigest string) error { - oci, err := umoci.OpenLayout(ociDir) - if err != nil { - return err - } - defer oci.Close() - - // Other unpack drivers will probably want to do something fancier for - // their unpacks and will exec a different code path, so we can/should - // assume this is btrfs for now. Additionally, we can assume its an - // existing btrfs, since the loopback device should have been mounted - // by the parent. - storage := NewExisting(config) - manifest, err := stackeroci.LookupManifest(oci, tag) - if err != nil { - return err - } - - startFrom := ispec.Descriptor{} - for _, desc := range manifest.Layers { - if desc.Digest.String() == startFromDigest { - startFrom = desc - break - } - } - - if startFromDigest != "" && startFrom.MediaType == "" { - return errors.Errorf("couldn't find starting hash %s", startFromDigest) - } - - var callback layer.AfterLayerUnpackCallback - if config.StorageType == "btrfs" { - // TODO: we could always share the empty layer, but that's more code - // and seems extreme... - callback = func(manifest ispec.Manifest, desc ispec.Descriptor) error { - hash, err := ComputeAggregateHash(manifest, desc) - if err != nil { - return err - } - - log.Debugf("creating intermediate snapshot %s", hash) - return storage.Snapshot(path.Base(bundlePath), hash) - } - } - - if len(manifest.Layers) != 0 && manifest.Layers[0].MediaType == stackeroci.MediaTypeLayerSquashfs { - log.Debugf("Unpack squashfs: %s", tag) - return squashfsUnpack(ociDir, oci, tag, bundlePath, callback, startFrom) - } - - return tarUnpack(config, oci, tag, bundlePath, callback, startFrom) -} - -func squashfsUnpack(ociDir string, oci casext.Engine, tag string, bundlePath string, callback layer.AfterLayerUnpackCallback, startFrom ispec.Descriptor) error { - manifest, err := stackeroci.LookupManifest(oci, tag) - if err != nil { - return err - } - - found := false - for _, layer := range manifest.Layers { - if !found && startFrom.MediaType != "" && layer.Digest.String() != startFrom.Digest.String() { - continue - } - found = true - - rootfs := path.Join(bundlePath, "rootfs") - squashfsFile := path.Join(ociDir, "blobs", "sha256", layer.Digest.Encoded()) - err = squashfs.ExtractSingleSquash(squashfsFile, rootfs, "btrfs") - if err != nil { - return err - } - err = callback(manifest, layer) - if err != nil { - return err - } - } - - dps, err := oci.ResolveReference(context.Background(), tag) - if err != nil { - return err - } - - mtreeName := strings.Replace(dps[0].Descriptor().Digest.String(), ":", "_", 1) - err = umoci.GenerateBundleManifest(mtreeName, bundlePath, fseval.Rootless) - if err != nil { - return err - } - - err = umoci.WriteBundleMeta(bundlePath, umoci.Meta{ - Version: umoci.MetaVersion, - From: casext.DescriptorPath{ - Walk: []ispec.Descriptor{dps[0].Descriptor()}, - }, - }) - - if err != nil { - return err - } - return nil -} - -func tarUnpack(config types.StackerConfig, oci casext.Engine, tag string, bundlePath string, callback layer.AfterLayerUnpackCallback, startFrom ispec.Descriptor) error { - whiteoutMode := layer.OCIStandardWhiteout - if config.StorageType == "overlay" { - whiteoutMode = layer.OverlayFSWhiteout - } - - opts := layer.UnpackOptions{ - KeepDirlinks: true, - AfterLayerUnpack: callback, - StartFrom: startFrom, - WhiteoutMode: whiteoutMode, - } - return umoci.Unpack(oci, tag, bundlePath, opts) -} - -func prepareUmociMetadata(storage *btrfs, name string, bundlePath string, dp casext.DescriptorPath, highestHash string) error { - // We need the mtree metadata to be present, but since these - // intermediate snapshots were created after each layer was - // extracted and the metadata wasn't, it won't necessarily - // exist. We could create it at extract time, but that would - // make everything really slow, since we'd have to walk the - // whole FS after every layer which would probably slow things - // way down. - // - // Instead, check to see if the metadata has been generated. If - // it hasn't, we generate it, and then re-snapshot back (since - // we can't write to the old snapshots) with the metadata. - // - // This means the first restore will be slower, but after that - // it will be very fast. - // - // A further complication is that umoci metadata is stored in terms of - // the manifest that corresponds to the layers. When a config changes - // (or e.g. a manifest is updated to reflect new layers), the old - // manifest will be unreferenced and eventually GC'd. However, the - // underlying layers were the same, since the hash here is the - // aggregate hash of only the bits in the layers, and not of anything - // related to the manifest. Then, when some "older" build comes along - // referencing these same layers but with a different manifest, we'll - // fail. - // - // Since the manifest doesn't actually affect the bits on disk, we can - // essentially just copy the old manifest over to whatever the new - // manifest will be if the hashes don't match. We re-snapshot since - // snapshotting is generally cheap and we assume that the "new" - // manifest will be the default. However, this code will still be - // triggered if we go back to the old manifest. - mtreeName := strings.Replace(dp.Descriptor().Digest.String(), ":", "_", 1) - _, err := os.Stat(path.Join(bundlePath, "umoci.json")) - if err == nil { - mtreePath := path.Join(bundlePath, mtreeName+".mtree") - _, err := os.Stat(mtreePath) - if err == nil { - // The best case: this layer's mtree and metadata match - // what we're currently trying to extract. Do nothing. - return nil - } - - // The mtree file didn't match. Find the other mtree (it must - // exist) in this directory (since any are necessarily correct - // per above) and move it to this mtree name, then regenerate - // umoci's metadata. - entries, err := ioutil.ReadDir(bundlePath) - if err != nil { - return err - } - - generated := false - for _, ent := range entries { - if !strings.HasSuffix(ent.Name(), ".mtree") { - continue - } - - generated = true - oldMtreePath := path.Join(bundlePath, ent.Name()) - err = lib.FileCopy(mtreePath, oldMtreePath) - if err != nil { - return err - } - - os.RemoveAll(oldMtreePath) - break - } - - if !generated { - return errors.Errorf("couldn't find old umoci metadata in %s", bundlePath) - } - } else { - // Umoci's metadata wasn't present. Let's generate it. - log.Infof("generating mtree metadata for snapshot (this may take a bit)...") - err = umoci.GenerateBundleManifest(mtreeName, bundlePath, fseval.Default) - if err != nil { - return err - } - - } - - meta := umoci.Meta{ - Version: umoci.MetaVersion, - MapOptions: layer.MapOptions{}, - From: dp, - } - - err = umoci.WriteBundleMeta(bundlePath, meta) - if err != nil { - return err - } - - err = storage.Delete(highestHash) - if err != nil { - return err - } - - err = storage.Snapshot(name, highestHash) - if err != nil { - return err - } - - return nil -} - -// clean all the umoci metadata (config.json for the OCI runtime, umoci.json -// for its metadata, anything named *.mtree) -func cleanUmociMetadata(bundlePath string) error { - ents, err := ioutil.ReadDir(bundlePath) - if err != nil { - return err - } - - for _, ent := range ents { - if ent.Name() == "rootfs" { - continue - } - - os.Remove(path.Join(bundlePath, ent.Name())) - } - - return nil -} diff --git a/btrfs/unpriv-setup.go b/btrfs/unpriv-setup.go deleted file mode 100644 index 12faa89b5..000000000 --- a/btrfs/unpriv-setup.go +++ /dev/null @@ -1,12 +0,0 @@ -package btrfs - -import ( - "path" - - "github.com/project-stacker/stacker/types" -) - -func UnprivSetup(config types.StackerConfig, uid, gid int) error { - size := int64(100 * 1024 * 1024 * 1024) - return MakeLoopbackBtrfs(path.Join(config.StackerDir, "btrfs.loop"), size, uid, gid, config.RootFSDir) -} diff --git a/btrfs/utils.go b/btrfs/utils.go deleted file mode 100644 index a8c8dc5e1..000000000 --- a/btrfs/utils.go +++ /dev/null @@ -1,32 +0,0 @@ -package btrfs - -import ( - "fmt" - - "github.com/minio/sha256-simd" - ispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func ComputeAggregateHash(manifest ispec.Manifest, descriptor ispec.Descriptor) (string, error) { - h := sha256.New() - found := false - - for _, l := range manifest.Layers { - _, err := h.Write([]byte(l.Digest.String())) - if err != nil { - return "", err - } - - if l.Digest.String() == descriptor.Digest.String() { - found = true - break - } - } - - if !found { - return "", errors.Errorf("couldn't find descriptor %s in manifest %s", descriptor.Digest.String(), manifest.Annotations["org.opencontainers.image.ref.name"]) - } - - return fmt.Sprintf("%x", h.Sum(nil)), nil -} diff --git a/build.go b/build.go index 552fb3085..e569fe972 100644 --- a/build.go +++ b/build.go @@ -251,11 +251,6 @@ func (b *Builder) updateOCIConfigForOutput(sf *types.Stackerfile, s types.Storag return err } - err = s.UpdateFSMetadata(name, newPath) - if err != nil { - return err - } - return nil } @@ -416,9 +411,6 @@ func (b *Builder) build(s types.Storage, file string) error { return errors.Wrapf(err, "error saving config file for %s", name) } - if err := s.Finalize(name); err != nil { - return err - } log.Infof("setup for %s complete", name) continue } @@ -449,10 +441,6 @@ func (b *Builder) build(s types.Storage, file string) error { // imported into future images. Let's just snapshot it and add // a bogus entry to our cache. if l.BuildOnly { - if err := s.Finalize(name); err != nil { - return err - } - log.Debugf("build only layer, skipping OCI diff generation") // A small hack: for build only layers, we keep track @@ -491,10 +479,6 @@ func (b *Builder) build(s types.Storage, file string) error { return err } - if err := s.Finalize(name); err != nil { - return err - } - log.Infof("filesystem %s built successfully", name) } @@ -510,9 +494,6 @@ func (b *Builder) BuildMultiple(paths []string) error { if err != nil { return err } - if !opts.LeaveUnladen { - defer s.Detach() - } defer locks.Unlock() // Read all the stacker recipes diff --git a/cmd/build.go b/cmd/build.go index b5857fdfb..7aa251bc7 100644 --- a/cmd/build.go +++ b/cmd/build.go @@ -1,7 +1,6 @@ package main import ( - "github.com/pkg/errors" "github.com/project-stacker/stacker" "github.com/project-stacker/stacker/types" "github.com/urfave/cli" @@ -27,10 +26,6 @@ func initBuildFlags() []cli.Flag { func initCommonBuildFlags() []cli.Flag { return []cli.Flag{ - cli.BoolFlag{ - Name: "leave-unladen", - Usage: "leave the built rootfs mount after image building", - }, cli.BoolFlag{ Name: "no-cache", Usage: "don't use the previous build cache", @@ -64,10 +59,6 @@ func initCommonBuildFlags() []cli.Flag { } func beforeBuild(ctx *cli.Context) error { - if config.StorageType == "overlay" && ctx.Bool("leave-unladen") { - return errors.Errorf("cannot use --storage-type=overlay and --leave-unladen together") - } - // Validate build failure arguments err := validateBuildFailureFlags(ctx) if err != nil { @@ -85,7 +76,6 @@ func beforeBuild(ctx *cli.Context) error { func newBuildArgs(ctx *cli.Context) (stacker.BuildArgs, error) { args := stacker.BuildArgs{ Config: config, - LeaveUnladen: ctx.Bool("leave-unladen"), NoCache: ctx.Bool("no-cache"), Substitute: ctx.StringSlice("substitute"), OnRunFailure: ctx.String("on-run-failure"), diff --git a/cmd/check.go b/cmd/check.go index f8f9b37cd..045c18e81 100644 --- a/cmd/check.go +++ b/cmd/check.go @@ -4,7 +4,6 @@ import ( "os" "github.com/pkg/errors" - "github.com/project-stacker/stacker/btrfs" "github.com/project-stacker/stacker/overlay" "github.com/urfave/cli" ) @@ -23,8 +22,6 @@ func doCheck(ctx *cli.Context) error { switch config.StorageType { case "overlay": return overlay.Check(config) - case "btrfs": - return btrfs.Check(config) default: return errors.Errorf("invalid storage type %v", config.StorageType) } diff --git a/cmd/chroot.go b/cmd/chroot.go index ec424490d..d068cbaff 100644 --- a/cmd/chroot.go +++ b/cmd/chroot.go @@ -41,7 +41,6 @@ func doChroot(ctx *cli.Context) error { if err != nil { return err } - defer s.Detach() defer locks.Unlock() tag := "" diff --git a/cmd/clean.go b/cmd/clean.go index fefa356aa..a8e39c115 100644 --- a/cmd/clean.go +++ b/cmd/clean.go @@ -29,7 +29,6 @@ func doClean(ctx *cli.Context) error { if err != nil { return err } - s.Detach() err = s.Clean() if err != nil { log.Infof("problem cleaning roots %v", err) diff --git a/cmd/gc.go b/cmd/gc.go index b29ac9a5e..3a24b0a79 100644 --- a/cmd/gc.go +++ b/cmd/gc.go @@ -7,7 +7,7 @@ import ( var gcCmd = cli.Command{ Name: "gc", - Usage: "gc unused OCI imports/outputs and btrfs snapshots", + Usage: "gc unused OCI imports/outputs snapshots", Action: doGC, } @@ -16,7 +16,6 @@ func doGC(ctx *cli.Context) error { if err != nil { return err } - defer s.Detach() defer locks.Unlock() return s.GC() } diff --git a/cmd/grab.go b/cmd/grab.go index 3a6491969..06f920dcc 100644 --- a/cmd/grab.go +++ b/cmd/grab.go @@ -25,7 +25,6 @@ func doGrab(ctx *cli.Context) error { if err != nil { return err } - defer s.Detach() defer locks.Unlock() parts := strings.SplitN(ctx.Args().First(), ":", 2) diff --git a/cmd/main.go b/cmd/main.go index feadca6ef..2c0264422 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -144,7 +144,7 @@ func main() { }, cli.StringFlag{ Name: "storage-type", - Usage: "storage type (one of \"overlay\" or \"btrfs\", defaults to overlay)", + Usage: "storage type (must be \"overlay\", left for compatibility)", Value: "overlay", }, cli.BoolFlag{ diff --git a/doc/running.md b/doc/running.md index 690acd568..2531a6d7a 100644 --- a/doc/running.md +++ b/doc/running.md @@ -5,10 +5,6 @@ Stacker execs various tools in order to accomplish its goals. For example, in order to generate squashfs images, the `mksquashfs` binary needs to be present in `$PATH`. -stacker has two storage backends: an overlayfs based backend and an older (and -slower) btrfs backend. By default, stacker uses the overlay backend though the -overlayfs backend requires a very new kernel (see below for discussion). - `stacker` builds things in the host's network namespace, re-exports any of `HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY` and their lowercase counterparts inside the environment, and bind mounts in the host's /etc/resolv.conf. This means @@ -34,16 +30,11 @@ Note that unlike other container tools, stacker generally assumes what's inside the container is a "sane" rootfs, i.e. it can exec `sh` to implement the `run:` section. -### The overlay backend - -The overlayfs backend is considerably faster than the btrfs version, because it -skips all the mtree metadata generation steps. It also extracts things in -parallel, so filesystems with many layers will be imported faster than in the -btrfs backend. +### The overlay filesystem -The overlay backend cannot be itself backed by an underlying overlayfs, since -stacker needs to create whiteout files, and the kernel (rightfully) forbids -manual creation of whiteout files on overlay filesystems. +Stacker cannot itself be backed by an underlying overlayfs, since stacker needs +to create whiteout files, and the kernel (rightfully) forbids manual creation +of whiteout files on overlay filesystems. Additionally, here are no additional userspace dependencies required to use the overlayfs backend. @@ -65,36 +56,6 @@ Stacker has checks to ensure that it can run with all these environment requirements, and will fail fast if it can't do something it should be able to do. -### The btrfs backend - -First, there is a runtime dependency as well, namely, the btrfs tools. These -can be installed on ubuntu with: - - apt install btrfs-progs - -#### Kernel version - -To use unprivileged stacker, you will need a kernel with user namespaces -enabled (>= 3.10). However, many features related to user namespaces have -landed since then, so it is best to use the most up to date kernel. For example -user namespaced file capabilities were introduced in kernel commit 8db6c34f1db, -which landed in 4.14-rc1. Stock rhel/centos images use file capabilities to -avoid making executables like ping setuid, and so unprivileged stacker will -need a >= 4.14 kernel to work with these images. Fortunately, the Ubuntu -kernels have these patches backported, so any ubuntu >= 16.04 will work. - -#### Underlying filesystem - -If you are running in a btrfs filesystem, nothing needs to be done. - -If you are running in a non-btrfs filesystem, but as root, then stacker -will automatically create and mount a loopback btrfs to use. - -If you are running as non-root in a non-btrfs filesystem, then you need -to prepare by running `sudo stacker unpriv-setup`. Note that you'll need to -mount this filesystem on every reboot, either by running `unpriv-setup` again, -or setting up the mount in systemd or fstab or something. - #### Importing squashfs images In order to correctly import squashfs-based images using the btrfs backend, diff --git a/go.mod b/go.mod index b4ee4a14c..fe5832955 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,6 @@ require ( github.com/docker/distribution v2.8.0+incompatible // indirect github.com/docker/docker v20.10.11+incompatible // indirect github.com/dustin/go-humanize v1.0.0 - github.com/freddierice/go-losetup v0.0.0-20210416171645-f09b6c574057 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/justincormack/go-memfd v0.0.0-20170219213707-6e4af0518993 diff --git a/go.sum b/go.sum index a4c13fa0a..e65339e49 100644 --- a/go.sum +++ b/go.sum @@ -458,8 +458,6 @@ github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03D github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.13.1/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= -github.com/freddierice/go-losetup v0.0.0-20210416171645-f09b6c574057 h1:OT9ORws9rTVs4YdjKwAWz2asWNngUyogcK0zZ/iKhdE= -github.com/freddierice/go-losetup v0.0.0-20210416171645-f09b6c574057/go.mod h1:zAk7fcFx45euzK9Az14j6Hd9n8Cwhnjp/NBfhSIAmFg= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= diff --git a/oci/oci.go b/oci/oci.go index 8d143b395..25ebfd5c7 100644 --- a/oci/oci.go +++ b/oci/oci.go @@ -2,7 +2,6 @@ package oci import ( "context" - "io" ispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/umoci/oci/casext" @@ -55,41 +54,6 @@ func LookupConfig(oci casext.Engine, desc ispec.Descriptor) (ispec.Image, error) } -// AddBlobNoCompression adds a blob to an OCI tag without compressing it (i.e. -// not through umoci.mutator). -func AddBlobNoCompression(oci casext.Engine, name string, content io.Reader) (ispec.Descriptor, error) { - blobDigest, blobSize, err := oci.PutBlob(context.Background(), content) - if err != nil { - return ispec.Descriptor{}, err - } - - desc := ispec.Descriptor{ - MediaType: MediaTypeLayerSquashfs, - Digest: blobDigest, - Size: blobSize, - } - - return AddBlobByDescriptor(oci, name, desc) -} - -// AddBlobByDescriptor adds a layer to an OCI tag based on layer's Descriptor -func AddBlobByDescriptor(oci casext.Engine, name string, desc ispec.Descriptor) (ispec.Descriptor, error) { - manifest, err := LookupManifest(oci, name) - if err != nil { - return ispec.Descriptor{}, err - } - - config, err := LookupConfig(oci, manifest.Config) - if err != nil { - return ispec.Descriptor{}, err - } - - manifest.Layers = append(manifest.Layers, desc) - config.RootFS.DiffIDs = append(config.RootFS.DiffIDs, desc.Digest) - - return UpdateImageConfig(oci, name, config, manifest) -} - // UpdateImageConfig updates an oci tag with new config and new manifest func UpdateImageConfig(oci casext.Engine, name string, newConfig ispec.Image, newManifest ispec.Manifest) (ispec.Descriptor, error) { configDigest, configSize, err := oci.PutBlobJSON(context.Background(), newConfig) diff --git a/overlay/overlay.go b/overlay/overlay.go index 4c17c0885..e675c859d 100644 --- a/overlay/overlay.go +++ b/overlay/overlay.go @@ -13,7 +13,6 @@ import ( "path" "syscall" - "github.com/opencontainers/umoci/oci/casext" "github.com/pkg/errors" "github.com/project-stacker/stacker/types" "golang.org/x/sys/unix" @@ -178,20 +177,6 @@ func (o *overlay) Exists(thing string) bool { return err == nil } -func (o *overlay) Detach() error { - return nil -} - -func (o *overlay) UpdateFSMetadata(name string, path casext.DescriptorPath) error { - // no-op; we get our layer contents by just looking at the contents of - // the upperdir - return nil -} - -func (o *overlay) Finalize(thing string) error { - return nil -} - func (o *overlay) TemporaryWritableSnapshot(source string) (string, func(), error) { // should use create maybe? dir, err := ioutil.TempDir(o.config.RootFSDir, fmt.Sprintf("temp-snapshot-%s-", source)) @@ -214,10 +199,6 @@ func (o *overlay) TemporaryWritableSnapshot(source string) (string, func(), erro } func (o *overlay) Clean() error { - err := o.Detach() - if err != nil { - return errors.Wrapf(err, "problem unmounting overlays") - } return errors.Wrapf(os.RemoveAll(o.config.RootFSDir), "couldn't clean rootfs dir") } diff --git a/publisher.go b/publisher.go index 3c24add9c..8b162783e 100644 --- a/publisher.go +++ b/publisher.go @@ -65,13 +65,6 @@ func (p *Publisher) Publish(file string) error { } defer oci.Close() - s, locks, err := NewStorage(opts.Config) - if err != nil { - return err - } - defer s.Detach() - defer locks.Unlock() - buildCache, err := OpenCache(opts.Config, oci, p.stackerfiles) if err != nil { return err diff --git a/squashfs/squashfs.go b/squashfs/squashfs.go index c71a61661..b22af6a98 100644 --- a/squashfs/squashfs.go +++ b/squashfs/squashfs.go @@ -4,7 +4,6 @@ package squashfs import ( "bytes" - "fmt" "io" "io/ioutil" "os" @@ -12,15 +11,7 @@ import ( "path" "strings" - ispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/opencontainers/umoci" - "github.com/opencontainers/umoci/oci/casext" - "github.com/opencontainers/umoci/pkg/fseval" - "github.com/opencontainers/umoci/pkg/mtreefilter" "github.com/pkg/errors" - stackermtree "github.com/project-stacker/stacker/mtree" - stackeroci "github.com/project-stacker/stacker/oci" - "github.com/vbatts/go-mtree" "golang.org/x/sys/unix" ) @@ -148,125 +139,6 @@ func MakeSquashfs(tempdir string, rootfs string, eps *ExcludePaths) (io.ReadClos return os.Open(tmpSquashfs.Name()) } -func GenerateSquashfsLayer(name, author, bundlepath, ocidir string, oci casext.Engine) error { - meta, err := umoci.ReadBundleMeta(bundlepath) - if err != nil { - return err - } - - mtreeName := strings.Replace(meta.From.Descriptor().Digest.String(), ":", "_", 1) - mtreePath := path.Join(bundlepath, mtreeName+".mtree") - - mfh, err := os.Open(mtreePath) - if err != nil { - return errors.Wrapf(err, "opening mtree") - } - - spec, err := mtree.ParseSpec(mfh) - if err != nil { - return err - } - - fsEval := fseval.Rootless - rootfsPath := path.Join(bundlepath, "rootfs") - newDH, err := mtree.Walk(rootfsPath, nil, umoci.MtreeKeywords, fsEval) - if err != nil { - return errors.Wrapf(err, "couldn't mtree walk %s", rootfsPath) - } - - diffs, err := mtree.CompareSame(spec, newDH, umoci.MtreeKeywords) - if err != nil { - return err - } - - diffs = mtreefilter.FilterDeltas(diffs, - stackermtree.LayerGenerationIgnoreRoot, - mtreefilter.SimplifyFilter(diffs)) - - // This is a pretty massive hack, because there's no library for - // generating squashfs images. However, mksquashfs does take a list of - // files to exclude from the image. So we go through and accumulate a - // list of these files. - // - // For missing files, since we're going to use overlayfs with - // squashfs, we use overlayfs' mechanism for whiteouts, which is a - // character device with device numbers 0/0. But since there's no - // library for generating squashfs images, we have to write these to - // the actual filesystem, and then remember what they are so we can - // delete them later. - missing := []string{} - defer func() { - for _, f := range missing { - os.Remove(f) - } - }() - - // we only need to generate a layer if anything was added, modified, or - // deleted; if everything is the same this should be a no-op. - needsLayer := false - paths := NewExcludePaths() - for _, diff := range diffs { - switch diff.Type() { - case mtree.Modified, mtree.Extra: - needsLayer = true - p := path.Join(rootfsPath, diff.Path()) - paths.AddInclude(p, diff.New().IsDir()) - case mtree.Missing: - needsLayer = true - p := path.Join(rootfsPath, diff.Path()) - missing = append(missing, p) - paths.AddInclude(p, diff.Old().IsDir()) - if err := unix.Mknod(p, unix.S_IFCHR, int(unix.Mkdev(0, 0))); err != nil { - if !os.IsNotExist(err) && err != unix.ENOTDIR { - // No privilege to create device nodes. Create a .wh.$filename instead. - dirname := path.Dir(diff.Path()) - fname := fmt.Sprintf(".wh.%s", path.Base(diff.Path())) - whPath := path.Join(rootfsPath, dirname, fname) - fd, err := os.Create(whPath) - if err != nil { - return errors.Wrapf(err, "couldn't mknod whiteout for %s", diff.Path()) - } - fd.Close() - } - } - case mtree.Same: - paths.AddExclude(path.Join(rootfsPath, diff.Path())) - } - } - - if !needsLayer { - return nil - } - - tmpSquashfs, err := MakeSquashfs(ocidir, rootfsPath, paths) - if err != nil { - return err - } - defer tmpSquashfs.Close() - - desc, err := stackeroci.AddBlobNoCompression(oci, name, tmpSquashfs) - if err != nil { - return err - } - - newName := strings.Replace(desc.Digest.String(), ":", "_", 1) + ".mtree" - err = umoci.GenerateBundleManifest(newName, bundlepath, fsEval) - if err != nil { - return err - } - - os.Remove(mtreePath) - meta.From = casext.DescriptorPath{ - Walk: []ispec.Descriptor{desc}, - } - err = umoci.WriteBundleMeta(bundlepath, meta) - if err != nil { - return err - } - - return nil -} - func ExtractSingleSquash(squashFile string, extractDir string, storageType string) error { err := os.MkdirAll(extractDir, 0755) if err != nil { diff --git a/storage.go b/storage.go index 38e50c02c..45540d45a 100644 --- a/storage.go +++ b/storage.go @@ -6,7 +6,6 @@ import ( "path" "github.com/pkg/errors" - "github.com/project-stacker/stacker/btrfs" "github.com/project-stacker/stacker/log" "github.com/project-stacker/stacker/overlay" "github.com/project-stacker/stacker/storage" @@ -26,24 +25,6 @@ func openStorage(c types.StackerConfig, storageType string) (types.Storage, erro } return overlay.NewOverlay(c) - case "btrfs": - err := btrfs.Check(c) - if err != nil { - return nil, err - } - - isBtrfs, err := btrfs.DetectBtrfs(c.RootFSDir) - if err != nil { - log.Infof("error from DetectBtrfs %v", err) - return nil, err - } - - if !isBtrfs { - log.Debugf("no btrfs detected, creating a loopback device") - return btrfs.NewLoopback(c) - } - - return btrfs.NewExisting(c), nil default: return nil, errors.Errorf("unknown storage type %s", storageType) } @@ -160,7 +141,6 @@ func NewStorage(c types.StackerConfig) (types.Storage, *StackerLocks, error) { // there is no attachment for overlay), so that's safe. locks, err := lock(c) if err != nil { - s.Detach() return nil, nil, err } @@ -176,8 +156,6 @@ func UnprivSetup(c types.StackerConfig, username string, uid, gid int) error { switch c.StorageType { case "overlay": return overlay.UnprivSetup(c, uid, gid) - case "btrfs": - return btrfs.UnprivSetup(c, uid, gid) default: return errors.Errorf("unknown storage type %s", c.StorageType) } diff --git a/test/build-only.bats b/test/build-only.bats index 77ca83c51..874ac86c2 100644 --- a/test/build-only.bats +++ b/test/build-only.bats @@ -128,7 +128,6 @@ EOF } @test "build only + unpriv + overlay clears state" { - require_storage overlay cat > stacker.yaml <<"EOF" first: from: @@ -150,7 +149,6 @@ EOF } @test "multiple build onlys in final chain rebuild OK" { - require_storage overlay cat > stacker.yaml <<"EOF" one: from: diff --git a/test/caching.bats b/test/caching.bats index 1e7bccc75..52953e6d2 100644 --- a/test/caching.bats +++ b/test/caching.bats @@ -203,10 +203,10 @@ test: EOF if [ "$PRIVILEGE_LEVEL" = "priv" ]; then - ./stacker/stacker --storage-type=$STORAGE_TYPE --debug build + ./stacker/stacker --debug build else skip_if_no_unpriv_overlay - sudo -u $SUDO_USER ./stacker/stacker --storage-type=$STORAGE_TYPE --debug build + sudo -u $SUDO_USER ./stacker/stacker --debug build fi stacker build diff --git a/test/check.bats b/test/check.bats index 509d2775e..238c1a1b7 100644 --- a/test/check.bats +++ b/test/check.bats @@ -1,32 +1,12 @@ load helpers -@test "stacker check is reasonable priv btrfs" { - require_privilege priv - require_storage btrfs - stacker check -} - -@test "stacker check is reasonable unpriv btrfs" { - require_privilege unpriv - require_storage btrfs - - # assuming we're not on btrfs currently, this will fail... - bad_stacker check - - # but after setup, it will succeed - stacker_setup - stacker check -} - @test "stacker check is reasonable priv overlay" { require_privilege priv - require_storage overlay stacker check } @test "stacker check is reasonable unpriv overlay" { require_privilege unpriv - require_storage overlay # if we don't have overlay support, stacker check should fail, otherwise it # should succeed diff --git a/test/clean.bats b/test/clean.bats index 4dfaed318..f0102df43 100644 --- a/test/clean.bats +++ b/test/clean.bats @@ -8,105 +8,7 @@ function teardown() { cleanup } -@test "clean on a non-loopback btrfs works" { - require_storage btrfs - - truncate -s 10G btrfs.loop - mkfs.btrfs btrfs.loop - mkdir -p parent - mount -o loop,user_subvol_rm_allowed btrfs.loop parent - mkdir -p parent/roots - chmod -R 777 parent/roots - - stacker --stacker-dir .otherstacker --roots-dir=parent/roots clean -} - -@test "clean in the face of subvolumes works" { - require_storage btrfs - - truncate -s 10G btrfs.loop - mkfs.btrfs btrfs.loop - run_as mkdir -p parent - mount -o loop,user_subvol_rm_allowed btrfs.loop parent - chmod 777 parent - run_as mkdir -p parent/roots - - # create some subvolumes and make them all readonly - run_as btrfs subvol create parent/roots/a - run_as btrfs property set -ts parent/roots/a ro true - run_as btrfs subvol create parent/roots/b - run_as btrfs property set -ts parent/roots/b ro true - run_as btrfs subvol create parent/roots/c - run_as btrfs property set -ts parent/roots/c ro true - - # stacker clean with a roots dir that is already on btrfs should succeed - stacker --stacker-dir .otherstacker --roots-dir=parent/roots clean - - [ -d parent ] - tree parent - [ "$PRIVILEGE_LEVEL" == "unpriv" ] || [ ! -d parent/roots ] -} - -@test "unpriv subvol clean works" { - require_storage btrfs - - truncate -s 10G btrfs.loop - mkfs.btrfs btrfs.loop - mkdir -p parent - mount -o loop,user_subvol_rm_allowed btrfs.loop parent - chmod 777 parent - run_as mkdir -p parent/roots - - # create some subvolumes and make them all readonly - btrfs subvol create parent/roots/a - btrfs subvol create parent/roots/a/b - sudo chown -R $SUDO_USER:$SUDO_USER . - btrfs property set -ts parent/roots/a/b ro true - btrfs property set -ts parent/roots/a ro true - - stacker --stacker-dir .otherstacker --roots-dir=parent/roots clean - [ ! -d parent/roots/a ] - [ ! -d parent/roots/a/b ] -} - -@test "extra dirs don't get cleaned" { - require_storage btrfs - - truncate -s 10G btrfs.loop - mkfs.btrfs btrfs.loop - run_as mkdir -p parent - mount -o loop,user_subvol_rm_allowed btrfs.loop parent - chmod 777 parent - run_as mkdir -p parent/roots - - run_as btrfs subvol create parent/roots/a - # we had a bad bug one time where we forgot to join the root path with the - # subvolume we were deleting, so these got deleted. - mkdir a - stacker --stacker-dir .otherstacker --roots-dir=parent/roots clean - [ ! -d parent/roots/a ] - [ -d a ] -} - -@test "clean in loopback mode works" { - require_storage btrfs - require_privilege priv - - cat > stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml <<'EOF' -child-child: - from: - type: oci - url: oci-import:child - run: | - ls / - stat --format="%a" /000 - [ "$(stat --format="%a" /000)" = "0" ] - [ -f /child ] -parent-child: - from: - type: oci - url: oci-import:parent - run: | - ls / - [ "$(stat --format="%a" /000)" = "0" ] - [ ! -f /child ] - touch /foo -EOF - stacker build - - manifest=$(cat oci/index.json | jq -r .manifests[1].digest | cut -f2 -d:) - n_layers=$(cat oci/blobs/sha256/$manifest | jq -r '.layers | length') - last_layer=$(cat oci/blobs/sha256/$manifest | jq -r ".layers[$(($n_layers-1))].digest" | cut -f2 -d:) - - mkdir foo - tar -C foo -xf oci/blobs/sha256/$last_layer - ls -1 foo - [ "$(ls -1 foo | wc -l)" = "1" ] - # little bunny - [ -f foo/foo ] -} diff --git a/test/main.py b/test/main.py index e7a4c3a12..144e28e1a 100755 --- a/test/main.py +++ b/test/main.py @@ -7,37 +7,30 @@ import subprocess import sys -storage_types=("btrfs", "overlay") priv_levels=("priv", "unpriv") parser = argparse.ArgumentParser() -parser.add_argument("--storage-type", choices=storage_types) parser.add_argument("--privilege-level", choices=priv_levels) parser.add_argument("--jobs", type=int, default=multiprocessing.cpu_count()) parser.add_argument("tests", nargs="*", default=glob.glob("./test/*.bats")) options = parser.parse_args() -storage_to_test=storage_types priv_to_test=priv_levels -if options.storage_type is not None: - storage_to_test = [options.storage_type] if options.privilege_level is not None: priv_to_test = [options.privilege_level] -for st in storage_to_test: - for priv in priv_to_test: - cmd = ["bats", "--jobs", str(options.jobs), "-t"] - cmd.extend(options.tests) - - env = os.environ.copy() - env["STORAGE_TYPE"] = st - env["PRIVILEGE_LEVEL"] = priv - - print("running tests in modes:", st, priv) - try: - subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError: - print("tests in modes:", st, priv, "failed") - sys.exit(1) +for priv in priv_to_test: + cmd = ["bats", "--jobs", str(options.jobs), "-t"] + cmd.extend(options.tests) + + env = os.environ.copy() + env["PRIVILEGE_LEVEL"] = priv + + print("running tests in modes:", priv) + try: + subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError: + print("tests in modes:", st, priv, "failed") + sys.exit(1) diff --git a/test/multiple-output-types.bats b/test/multiple-output-types.bats index 81ea74dbe..1896312c6 100644 --- a/test/multiple-output-types.bats +++ b/test/multiple-output-types.bats @@ -9,8 +9,6 @@ function teardown() { } @test "multiple layer type outputs work" { - require_storage overlay - cat > stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml << EOF @@ -56,7 +54,6 @@ EOF } @test "overlay_dirs dest works" { - require_storage overlay mkdir dir_to_overlay touch dir_to_overlay/file cat > stacker.yaml << EOF @@ -74,7 +71,6 @@ EOF } @test "overlay_dirs cache works" { - require_storage overlay mkdir dir_to_overlay touch dir_to_overlay/file cat > stacker.yaml << EOF @@ -100,7 +96,6 @@ EOF } @test "overlay_dirs don't preserve ownership" { - require_storage overlay mkdir dir_to_overlay touch dir_to_overlay/file touch dir_to_overlay/file2 diff --git a/test/publish.bats b/test/publish.bats index 4441338bb..e444bc65c 100644 --- a/test/publish.bats +++ b/test/publish.bats @@ -133,8 +133,6 @@ function teardown() { } @test "publish multiple layer types" { - require_storage overlay - stacker --storage-type overlay build -f ocibuilds/sub4/stacker.yaml --layer-type tar --layer-type squashfs stacker --storage-type overlay publish -f ocibuilds/sub4/stacker.yaml --layer-type tar --layer-type squashfs --url oci:oci_publish --tag test1 diff --git a/test/squashfs.bats b/test/squashfs.bats index e7bf7e864..7cbfb3698 100644 --- a/test/squashfs.bats +++ b/test/squashfs.bats @@ -75,33 +75,7 @@ EOF stacker build --layer-type=squashfs } -# the way we generate the underlying squashfs layer is different between btrfs -# and overlay, in that we glob the run: delta in with the base layer in btrfs, -# but not in overlay. so these two tests look different. -@test "squashfs layer support (btrfs)" { - require_storage btrfs - cat > stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml < /message - build_only: true - -myroot: - from: - type: built - tag: base - run: | - echo "foo bar" > /message -EOF - stacker build --layer-type=squashfs - - manifest=$(cat oci/index.json | jq -r .manifests[0].digest | cut -f2 -d:) - layer0=$(cat oci/blobs/sha256/$manifest | jq -r .layers[0].digest | cut -f2 -d:) - - cat oci/blobs/sha256/$manifest | jq -r .layers - - mkdir layer0 - mount -t squashfs oci/blobs/sha256/$layer0 layer0 - cat layer0/message - [ "$(cat layer0/message)" == "foo bar" ] -} - @test "built type with squashfs build-only base works (overlay)" { - require_storage overlay mkdir -p .stacker/layer-bases chmod 777 .stacker/layer-bases image_copy oci:$CENTOS_OCI oci:.stacker/layer-bases/oci:centos diff --git a/test/tmpfs.bats b/test/tmpfs.bats index 0c8b60f9d..4bca70595 100644 --- a/test/tmpfs.bats +++ b/test/tmpfs.bats @@ -9,8 +9,6 @@ function teardown() { } @test "stacker works in a tmpfs" { - require_storage overlay - cat > stacker.yaml < test # always run as privileged... - run "${ROOT_DIR}/stacker" --storage-type=$STORAGE_TYPE --debug build + run "${ROOT_DIR}/stacker" --debug build echo $output [ "$status" -ne 0 ] } @test "underlying layer output conversion happens in a user namespace" { - require_storage overlay - cat > stacker.yaml <