Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Add preliminary hardlink support for review.

Thanks to Tim Riemenschneider <git@tim-riemenschneider.de> for
pointing out a compatibility problem (with older versions of Python)
in an earlier version of this patch.

Signed-off-by: Rob Browning <rlb@defaultvalue.org>
Reviewed-by: Zoran Zaric <zz@zoranzaric.de>
  • Loading branch information...
commit 7ccf0a9f15b5fc4bc0e257cda7658b72bd04b8a1 1 parent f7830d6
Rob Browning rlbdv authored
17 DESIGN
View
@@ -387,7 +387,8 @@ Each .bupm entry contains a variable length sequence of records
containing the metadata for the corresponding path. Each record
records one type of metadata. Current types include a common record
type (containing the normal stat information), a symlink target type,
-a POSIX1e ACL type, etc. See metadata.py for the complete list.
+a hardlink target type, a POSIX1e ACL type, etc. See metadata.py for
+the complete list.
The .bupm file is optional, and when it's missing, bup will behave as
it did before the addition of metadata, and restore files using the
@@ -397,6 +398,20 @@ The nice thing about this design is that you can walk through each
file in a tree just by opening the tree and the .bupmeta contents, and
iterating through both at the same time.
+Bup supports recording and restoring hardlinks, and it does so by
+tracking sets of paths that correspond to the same dev/inode pair when
+indexing. This information is stored in an optional file with the
+same name as the index, but ending with ".hlink".
+
+If there are multiple index runs, and the hardlinks change, bup will
+notice this (within whatever subtree it is asked to reindex) and
+update the .hlink information accordingly.
+
+The current hardlink implementation will refuse to link to any file
+that resides outside the restore tree, and if the restore tree spans a
+different set of filesystems than the save tree, complete sets of
+hardlinks may not be restored.
+
Filesystem Interaction
======================
11 Documentation/bup-restore.md
View
@@ -55,6 +55,17 @@ current system. The use of user and group names can be disabled via
example), and as a special case, a uid or gid of 0 will never be
remapped by name.
+Hardlinks will also be restored when possible, but at least currently,
+no links will be made to targets outside the restore tree, and if the
+restore tree spans a different arrangement of filesystems from the
+save tree, some hardlink sets may not be completely restored.
+
+Also note that changing hardlink sets on disk between index and save
+may produce unexpected results. With the current implementation, bup
+will attempt to recreate any given hardlink set as it existed at index
+time, even if all of the files in the set weren't still hardlinked
+(but were otherwise identical) at save time.
+
Note that during the restoration process, access to data within the
restore tree may be more permissive than it was in the original
source. Unless security is irrelevant, you must restore to a private
17 cmd/index-cmd.py
View
@@ -1,6 +1,6 @@
#!/usr/bin/env python
import sys, stat, time, os
-from bup import options, git, index, drecurse
+from bup import options, git, index, drecurse, hlinkdb
from bup.helpers import *
from bup.hashsplit import GIT_MODE_TREE, GIT_MODE_FILE
@@ -55,6 +55,8 @@ def update_index(top, excluded_paths):
rig = IterHelper(ri.iter(name=top))
tstart = int(time.time())
+ hlinks = hlinkdb.HLinkDB(indexfile + '.hlink')
+
hashgen = None
if opt.fake_valid:
def hashgen(name):
@@ -76,8 +78,14 @@ def hashgen(name):
if rig.cur.exists():
rig.cur.set_deleted()
rig.cur.repack()
+ if rig.cur.nlink > 1 and not stat.S_ISDIR(rig.cur.mode):
+ hlinks.del_path(rig.cur.name)
rig.next()
if rig.cur and rig.cur.name == path: # paths that already existed
+ if not stat.S_ISDIR(rig.cur.mode) and rig.cur.nlink > 1:
+ hlinks.del_path(rig.cur.name)
+ if not stat.S_ISDIR(pst.st_mode) and pst.st_nlink > 1:
+ hlinks.add_path(path, pst.st_dev, pst.st_ino)
rig.cur.from_stat(pst, tstart)
if not (rig.cur.flags & index.IX_HASHVALID):
if hashgen:
@@ -89,8 +97,13 @@ def hashgen(name):
rig.next()
else: # new paths
wi.add(path, pst, hashgen = hashgen)
+ if not stat.S_ISDIR(pst.st_mode) and pst.st_nlink > 1:
+ hlinks.add_path(path, pst.st_dev, pst.st_ino)
+
progress('Indexing: %d, done.\n' % total)
+ hlinks.prepare_save()
+
if ri.exists():
ri.save()
wi.flush()
@@ -114,6 +127,8 @@ def hashgen(name):
else:
wi.close()
+ hlinks.commit_save()
+
optspec = """
bup index <-p|m|s|u> [options...] <filenames...>
89 cmd/restore-cmd.py
View
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-import sys, stat
+import errno, sys, stat
from bup import options, git, metadata, vfs
from bup.helpers import *
@@ -54,6 +54,67 @@ def create_path(n, fullname, meta):
elif stat.S_ISLNK(n.mode):
os.symlink(n.readlink(), fullname)
+# Track a list of (restore_path, vfs_path, meta) triples for each path
+# we've written for a given hardlink_target. This allows us to handle
+# the case where we restore a set of hardlinks out of order (with
+# respect to the original save call(s)) -- i.e. when we don't restore
+# the hardlink_target path first. This data also allows us to attempt
+# to handle other situations like hardlink sets that change on disk
+# during a save, or between index and save.
+targets_written = {}
+
+def hardlink_compatible(target_path, target_vfs_path, target_meta,
+ src_node, src_meta):
+ global top
+ if not os.path.exists(target_path):
+ return False
+ target_node = top.lresolve(target_vfs_path)
+ if src_node.mode != target_node.mode \
+ or src_node.atime != target_node.atime \
+ or src_node.mtime != target_node.mtime \
+ or src_node.ctime != target_node.ctime \
+ or src_node.hash != target_node.hash:
+ return False
+ if not src_meta.same_file(target_meta):
+ return False
+ return True
+
+
+def hardlink_if_possible(fullname, node, meta):
+ """Find a suitable hardlink target, link to it, and return true,
+ otherwise return false."""
+ # Expect the caller to handle restoring the metadata if
+ # hardlinking isn't possible.
+ global targets_written
+ target = meta.hardlink_target
+ target_versions = targets_written.get(target)
+ if target_versions:
+ # Check every path in the set that we've written so far for a match.
+ for (target_path, target_vfs_path, target_meta) in target_versions:
+ if hardlink_compatible(target_path, target_vfs_path, target_meta,
+ node, meta):
+ try:
+ os.link(target_path, fullname)
+ return True
+ except OSError, e:
+ if e.errno != errno.EXDEV:
+ raise
+ else:
+ target_versions = []
+ targets_written[target] = target_versions
+ full_vfs_path = node.fullname()
+ target_versions.append((fullname, full_vfs_path, meta))
+ return False
+
+
+def write_file_content(fullname, n):
+ outf = open(fullname, 'wb')
+ try:
+ for b in chunkyreader(n.open()):
+ outf.write(b)
+ finally:
+ outf.close()
+
def do_node(top, n, meta=None):
# meta will be None for dirs, and when there is no .bupm (i.e. no metadata)
@@ -69,22 +130,18 @@ def do_node(top, n, meta=None):
meta_stream = mfile.open()
meta = metadata.Metadata.read(meta_stream)
print_info(n, fullname)
- create_path(n, fullname, meta)
- # Write content if appropriate (only regular files have content).
- plain_file = False
- if meta:
- plain_file = stat.S_ISREG(meta.mode)
- else:
- plain_file = stat.S_ISREG(n.mode)
+ created_hardlink = False
+ if meta and meta.hardlink_target:
+ created_hardlink = hardlink_if_possible(fullname, n, meta)
- if plain_file:
- outf = open(fullname, 'wb')
- try:
- for b in chunkyreader(n.open()):
- outf.write(b)
- finally:
- outf.close()
+ if not created_hardlink:
+ create_path(n, fullname, meta)
+ if meta:
+ if stat.S_ISREG(meta.mode):
+ write_file_content(fullname, n)
+ elif stat.S_ISREG(n.mode):
+ write_file_content(fullname, n)
total_restored += 1
plog('Restoring: %d\r' % total_restored)
@@ -94,7 +151,7 @@ def do_node(top, n, meta=None):
if meta_stream and not stat.S_ISDIR(sub.mode):
m = metadata.Metadata.read(meta_stream)
do_node(top, sub, m)
- if meta:
+ if meta and not created_hardlink:
meta.apply_to_path(fullname,
restore_numeric_ids=opt.numeric_ids)
finally:
18 cmd/save-cmd.py
View
@@ -1,6 +1,6 @@
#!/usr/bin/env python
import sys, stat, time, math
-from bup import hashsplit, git, options, index, client, metadata
+from bup import hashsplit, git, options, index, client, metadata, hlinkdb
from bup.helpers import *
from bup.hashsplit import GIT_MODE_TREE, GIT_MODE_FILE, GIT_MODE_SYMLINK
@@ -173,6 +173,7 @@ def progress_report(n):
indexfile = opt.indexfile or git.repo('bupindex')
r = index.Reader(indexfile)
+hlink_db = hlinkdb.HLinkDB(indexfile + '.hlink')
def already_saved(ent):
return ent.is_valid() and w.exists(ent.sha) and ent.sha
@@ -183,6 +184,11 @@ def wantrecurse_pre(ent):
def wantrecurse_during(ent):
return not already_saved(ent) or ent.sha_missing()
+def find_hardlink_target(hlink_db, ent):
+ if hlink_db and not stat.S_ISDIR(ent.mode) and ent.nlink > 1:
+ link_paths = hlink_db.node_paths(ent.dev, ent.ino)
+ if link_paths:
+ return link_paths[0]
total = ftotal = 0
if opt.progress:
@@ -283,7 +289,10 @@ def wantrecurse_during(ent):
git_info = (ent.gitmode, git_name, id)
shalists[-1].append(git_info)
sort_key = git.shalist_item_sort_key((ent.mode, file, id))
- metalists[-1].append((sort_key, metadata.from_path(ent.name)))
+ hlink = find_hardlink_target(hlink_db, ent)
+ metalists[-1].append((sort_key,
+ metadata.from_path(ent.name,
+ hardlink_target=hlink)))
else:
if stat.S_ISREG(ent.mode):
try:
@@ -323,7 +332,10 @@ def wantrecurse_during(ent):
git_info = (mode, git_name, id)
shalists[-1].append(git_info)
sort_key = git.shalist_item_sort_key((ent.mode, file, id))
- metalists[-1].append((sort_key, metadata.from_path(ent.name)))
+ hlink = find_hardlink_target(hlink_db, ent)
+ metalists[-1].append((sort_key,
+ metadata.from_path(ent.name,
+ hardlink_target=hlink)))
if exists and wasmissing:
count += oldsize
subcount = 0
115 lib/bup/hlinkdb.py
View
@@ -0,0 +1,115 @@
+import cPickle, errno, os, tempfile
+
+class Error(Exception):
+ pass
+
+class HLinkDB:
+ def __init__(self, filename):
+ # Map a "dev:ino" node to a list of paths associated with that node.
+ self._node_paths = {}
+ # Map a path to a "dev:ino" node.
+ self._path_node = {}
+ self._filename = filename
+ self._save_prepared = None
+ self._tmpname = None
+ f = None
+ try:
+ f = open(filename, 'r')
+ except IOError, e:
+ if e.errno == errno.ENOENT:
+ pass
+ else:
+ raise
+ if f:
+ try:
+ self._node_paths = cPickle.load(f)
+ finally:
+ f.close()
+ f = None
+ # Set up the reverse hard link index.
+ for node, paths in self._node_paths.iteritems():
+ for path in paths:
+ self._path_node[path] = node
+
+ def prepare_save(self):
+ """ Commit all of the relevant data to disk. Do as much work
+ as possible without actually making the changes visible."""
+ if self._save_prepared:
+ raise Error('save of %r already in progress' % self._filename)
+ if self._node_paths:
+ (dir, name) = os.path.split(self._filename)
+ (ffd, self._tmpname) = tempfile.mkstemp('.tmp', name, dir)
+ try:
+ f = os.fdopen(ffd, 'wb', 65536)
+ except:
+ os.close(ffd)
+ raise
+ try:
+ cPickle.dump(self._node_paths, f, 2)
+ except:
+ f.close()
+ os.unlink(self._tmpname)
+ self._tmpname = None
+ raise
+ else:
+ f.close()
+ f = None
+ self._save_prepared = True
+
+ def commit_save(self):
+ if not self._save_prepared:
+ raise Error('cannot commit save of %r; no save prepared'
+ % self._filename)
+ if self._tmpname:
+ os.rename(self._tmpname, self._filename)
+ self._tmpname = None
+ else: # No data -- delete _filename if it exists.
+ try:
+ os.unlink(self._filename)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ pass
+ else:
+ raise
+ self._save_prepared = None
+
+ def abort_save(self):
+ if self._tmpname:
+ os.unlink(self._tmpname)
+ self._tmpname = None
+
+ def __del__(self):
+ self.abort_save()
+
+ def add_path(self, path, dev, ino):
+ # Assume path is new.
+ node = '%s:%s' % (dev, ino)
+ self._path_node[path] = node
+ link_paths = self._node_paths.get(node)
+ if link_paths and path not in link_paths:
+ link_paths.append(path)
+ else:
+ self._node_paths[node] = [path]
+
+ def _del_node_path(self, node, path):
+ link_paths = self._node_paths[node]
+ link_paths.remove(path)
+ if not link_paths:
+ del self._node_paths[node]
+
+ def change_path(self, path, new_dev, new_ino):
+ prev_node = self._path_node.get(path)
+ if prev_node:
+ self._del_node_path(prev_node, path)
+ self.add_path(new_dev, new_ino, path)
+
+ def del_path(self, path):
+ # Path may not be in db (if updating a pre-hardlink support index).
+ node = self._path_node.get(path)
+ if node:
+ self._del_node_path(node, path)
+ del self._path_node[path]
+
+ def node_paths(self, dev, ino):
+ node = '%s:%s' % (dev, ino)
+ return self._node_paths[node]
76 lib/bup/metadata.py
View
@@ -4,13 +4,12 @@
#
# This code is covered under the terms of the GNU Library General
# Public License as described in the bup LICENSE file.
-import errno, os, sys, stat, time, pwd, grp, struct, re
+import errno, os, sys, stat, time, pwd, grp
from cStringIO import StringIO
from bup import vint, xstat
from bup.drecurse import recursive_dirlist
from bup.helpers import add_error, mkdirp, log, is_superuser
-from bup.xstat import utime, lutime, lstat
-import bup._helpers as _helpers
+from bup.xstat import utime, lutime
try:
import xattr
@@ -165,6 +164,7 @@ def _clean_up_extract_path(p):
_rec_tag_nfsv4_acl = 5 # intended to supplant posix1e acls?
_rec_tag_linux_attr = 6 # lsattr(1) chattr(1)
_rec_tag_linux_xattr = 7 # getfattr(1) setfattr(1)
+_rec_tag_hardlink_target = 8 # hard link target path
class ApplyError(Exception):
@@ -183,6 +183,9 @@ class Metadata:
# "bup save", for example, as a placeholder in cases where
# from_path() fails.
+ # NOTE: if any relevant fields are added or removed, be sure to
+ # update same_file() below.
+
## Common records
# Timestamps are (sec, ns), relative to 1970-01-01 00:00:00, ns
@@ -208,6 +211,17 @@ def _add_common(self, path, st):
pass
self.mode = st.st_mode
+ def _same_common(self, other):
+ """Return true or false to indicate similarity in the hardlink sense."""
+ return self.uid == other.uid \
+ and self.gid == other.gid \
+ and self.rdev == other.rdev \
+ and self.atime == other.atime \
+ and self.mtime == other.mtime \
+ and self.ctime == other.ctime \
+ and self.user == other.user \
+ and self.group == other.group
+
def _encode_common(self):
if not self.mode:
return None
@@ -411,6 +425,22 @@ def _load_symlink_target_rec(self, port):
self.symlink_target = vint.read_bvec(port)
+ ## Hardlink targets
+
+ def _add_hardlink_target(self, target):
+ self.hardlink_target = target
+
+ def _same_hardlink_target(self, other):
+ """Return true or false to indicate similarity in the hardlink sense."""
+ return self.hardlink_target == other.hardlink_target
+
+ def _encode_hardlink_target(self):
+ return self.hardlink_target
+
+ def _load_hardlink_target_rec(self, port):
+ self.hardlink_target = vint.read_bvec(port)
+
+
## POSIX1e ACL records
# Recorded as a list:
@@ -433,6 +463,10 @@ def _add_posix1e_acl(self, path, st):
if e.errno != errno.EOPNOTSUPP:
raise
+ def _same_posix1e_acl(self, other):
+ """Return true or false to indicate similarity in the hardlink sense."""
+ return self.posix1e_acl == other.posix1e_acl
+
def _encode_posix1e_acl(self):
# Encode as two strings (w/default ACL string possibly empty).
if self.posix1e_acl:
@@ -506,6 +540,10 @@ def _add_linux_attr(self, path, st):
else:
raise
+ def _same_linux_attr(self, other):
+ """Return true or false to indicate similarity in the hardlink sense."""
+ return self.linux_attr == other.linux_attr
+
def _encode_linux_attr(self):
if self.linux_attr:
return vint.pack('V', self.linux_attr)
@@ -541,6 +579,10 @@ def _add_linux_xattr(self, path, st):
if e.errno != errno.EOPNOTSUPP:
raise
+ def _same_linux_xattr(self, other):
+ """Return true or false to indicate similarity in the hardlink sense."""
+ return self.linux_xattr == other.linux_xattr
+
def _encode_linux_xattr(self):
if self.linux_xattr:
result = vint.pack('V', len(self.linux_xattr))
@@ -595,6 +637,7 @@ def __init__(self):
self.path = None
self.size = None
self.symlink_target = None
+ self.hardlink_target = None
self.linux_attr = None
self.linux_xattr = None
self.posix1e_acl = None
@@ -603,7 +646,10 @@ def __init__(self):
def write(self, port, include_path=True):
records = include_path and [(_rec_tag_path, self._encode_path())] or []
records.extend([(_rec_tag_common, self._encode_common()),
- (_rec_tag_symlink_target, self._encode_symlink_target()),
+ (_rec_tag_symlink_target,
+ self._encode_symlink_target()),
+ (_rec_tag_hardlink_target,
+ self._encode_hardlink_target()),
(_rec_tag_posix1e_acl, self._encode_posix1e_acl()),
(_rec_tag_linux_attr, self._encode_linux_attr()),
(_rec_tag_linux_xattr, self._encode_linux_xattr())])
@@ -637,6 +683,8 @@ def read(port):
result._load_common_rec(port)
elif tag == _rec_tag_symlink_target:
result._load_symlink_target_rec(port)
+ elif tag == _rec_tag_hardlink_target:
+ result._load_hardlink_target_rec(port)
elif tag == _rec_tag_posix1e_acl:
result._load_posix1e_acl_rec(port)
elif tag ==_rec_tag_nfsv4_acl:
@@ -664,7 +712,7 @@ def apply_to_path(self, path=None, restore_numeric_ids=False):
if not path:
path = self.path
if not path:
- raise Exception('Metadata.apply_to_path() called with no path');
+ raise Exception('Metadata.apply_to_path() called with no path')
if not self._recognized_file_type():
add_error('not applying metadata to "%s"' % path
+ ' with unrecognized mode "0x%x"\n' % self.mode)
@@ -678,8 +726,20 @@ def apply_to_path(self, path=None, restore_numeric_ids=False):
except ApplyError, e:
add_error(e)
+ def same_file(self, other):
+ """Compare this to other for equivalency. Return true if
+ their information implies they could represent the same file
+ on disk, in the hardlink sense. Assume they're both regular
+ files."""
+ return self._same_common(other) \
+ and self._same_hardlink_target(other) \
+ and self._same_posix1e_acl(other) \
+ and self._same_linux_attr(other) \
+ and self._same_linux_xattr(other)
+
-def from_path(path, statinfo=None, archive_path=None, save_symlinks=True):
+def from_path(path, statinfo=None, archive_path=None,
+ save_symlinks=True, hardlink_target=None):
result = Metadata()
result.path = archive_path
st = statinfo or xstat.lstat(path)
@@ -687,6 +747,7 @@ def from_path(path, statinfo=None, archive_path=None, save_symlinks=True):
result._add_common(path, st)
if save_symlinks:
result._add_symlink_target(path, st)
+ result._add_hardlink_target(hardlink_target)
result._add_posix1e_acl(path, st)
result._add_linux_attr(path, st)
result._add_linux_xattr(path, st)
@@ -870,7 +931,8 @@ def display_archive(file):
for meta in _ArchiveIterator(file):
if not meta.path:
print >> sys.stderr, \
- 'bup: no metadata path, but asked to only display path (increase verbosity?)'
+ 'bup: no metadata path, but asked to only display path', \
+ '(increase verbosity?)'
sys.exit(1)
print meta.path
48 t/hardlink-sets
View
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+import os, stat, sys
+
+# Print the full paths of all the files in each hardlink set
+# underneath one of the paths. Separate sets with a blank line, sort
+# the paths within each set, and sort the sets by their first path.
+
+def usage():
+ print >> sys.stderr, "Usage: hardlink-sets <paths ...>"
+
+if len(sys.argv) < 2:
+ usage()
+ sys.exit(1)
+
+def on_walk_error(e):
+ raise e
+
+hardlink_set = {}
+
+for p in sys.argv[1:]:
+ for root, dirs, files in os.walk(p, onerror = on_walk_error):
+ for filename in files:
+ full_path = os.path.join(root, filename)
+ st = os.lstat(full_path)
+ if not stat.S_ISDIR(st.st_mode):
+ node = '%s:%s' % (st.st_dev, st.st_ino)
+ link_paths = hardlink_set.get(node)
+ if link_paths:
+ link_paths.append(full_path)
+ else:
+ hardlink_set[node] = [full_path]
+
+# Sort the link sets.
+for node, link_paths in hardlink_set.items():
+ link_paths.sort()
+
+first_set = True
+for link_paths in sorted(hardlink_set.values(), key = lambda x : x[0]):
+ if len(link_paths) > 1:
+ if first_set:
+ first_set = False
+ else:
+ print
+ for p in sorted(link_paths):
+ print p
+
+sys.exit(0)
133 t/test-meta.sh
View
@@ -9,6 +9,11 @@ bup()
"$TOP/bup" "$@"
}
+hardlink-sets()
+{
+ "$TOP/t/hardlink-sets" "$@"
+}
+
# Very simple metadata tests -- create a test tree then check that bup
# meta can reproduce the metadata correctly (according to bup xstat)
# via create, extract, start-extract, and finish-extract. The current
@@ -129,6 +134,15 @@ setup-test-tree()
mkdir -p "$TOP/bupmeta.tmp/src"
cp -pPR Documentation cmd lib t "$TOP/bupmeta.tmp"/src
+ # Add some hard links for the general tests.
+ (
+ cd "$TOP/bupmeta.tmp"/src
+ touch hardlink-target
+ ln hardlink-target hardlink-1
+ ln hardlink-target hardlink-2
+ ln hardlink-target hardlink-3
+ )
+
# Regression test for metadata sort order. Previously, these two
# entries would sort in the wrong order because the metadata
# entries were being sorted by mangled name, but the index isn't.
@@ -162,6 +176,125 @@ WVSTART 'metadata save/restore (general)'
test-src-save-restore
)
+setup-hardlink-test()
+{
+ (
+ cd "$TOP/bupmeta.tmp"
+ rm -rf src src.bup
+ mkdir src src.bup
+ WVPASS bup init
+ )
+}
+
+hardlink-test-run-restore()
+{
+ force-delete src-restore
+ mkdir src-restore
+ WVPASS bup restore -C src-restore "/src/latest$(pwd)/"
+ WVPASS test -d src-restore/src
+}
+
+# Test hardlinks more carefully.
+WVSTART 'metadata save/restore (hardlinks)'
+(
+ set -e
+ set -x
+ export BUP_DIR="$TOP/bupmeta.tmp/src.bup"
+ force-delete "$TOP/bupmeta.tmp"
+ mkdir -p "$TOP/bupmeta.tmp"
+
+ cd "$TOP/bupmeta.tmp"
+
+ # Test trivial case - single hardlink.
+ setup-hardlink-test
+ (
+ cd "$TOP/bupmeta.tmp"/src
+ touch hardlink-target
+ ln hardlink-target hardlink-1
+ )
+ WVPASS bup index src
+ WVPASS bup save -t -n src src
+ hardlink-test-run-restore
+ WVPASS compare-trees src/ src-restore/src/
+
+ # Test hardlink changes between index runs.
+ #
+ setup-hardlink-test
+ cd "$TOP/bupmeta.tmp"/src
+ touch hardlink-target-a
+ touch hardlink-target-b
+ ln hardlink-target-a hardlink-b-1
+ ln hardlink-target-a hardlink-a-1
+ cd ..
+ WVPASS bup index -vv src
+ rm src/hardlink-b-1
+ ln src/hardlink-target-b src/hardlink-b-1
+ WVPASS bup index -vv src
+ WVPASS bup save -t -n src src
+ hardlink-test-run-restore
+ echo ./src/hardlink-a-1 > hardlink-sets.expected
+ echo ./src/hardlink-target-a >> hardlink-sets.expected
+ echo >> hardlink-sets.expected
+ echo ./src/hardlink-b-1 >> hardlink-sets.expected
+ echo ./src/hardlink-target-b >> hardlink-sets.expected
+ (cd src-restore && hardlink-sets .) > hardlink-sets.restored
+ WVPASS diff -u hardlink-sets.expected hardlink-sets.restored
+
+ # Test hardlink changes between index and save -- hardlink set [a
+ # b c d] changes to [a b] [c d]. At least right now bup should
+ # notice and recreate the latter.
+ setup-hardlink-test
+ cd "$TOP/bupmeta.tmp"/src
+ touch a
+ ln a b
+ ln a c
+ ln a d
+ cd ..
+ WVPASS bup index -vv src
+ rm src/c src/d
+ touch src/c
+ ln src/c src/d
+ WVPASS bup save -t -n src src
+ hardlink-test-run-restore
+ echo ./src/a > hardlink-sets.expected
+ echo ./src/b >> hardlink-sets.expected
+ echo >> hardlink-sets.expected
+ echo ./src/c >> hardlink-sets.expected
+ echo ./src/d >> hardlink-sets.expected
+ (cd src-restore && hardlink-sets .) > hardlink-sets.restored
+ WVPASS diff -u hardlink-sets.expected hardlink-sets.restored
+
+ # Test that we don't link outside restore tree.
+ setup-hardlink-test
+ cd "$TOP/bupmeta.tmp"
+ mkdir src/a src/b
+ touch src/a/1
+ ln src/a/1 src/b/1
+ WVPASS bup index -vv src
+ WVPASS bup save -t -n src src
+ force-delete src-restore
+ mkdir src-restore
+ WVPASS bup restore -C src-restore "/src/latest$(pwd)/src/a/"
+ WVPASS test -e src-restore/1
+ echo -n > hardlink-sets.expected
+ (cd src-restore && hardlink-sets .) > hardlink-sets.restored
+ WVPASS diff -u hardlink-sets.expected hardlink-sets.restored
+
+ # Test that we do link within separate sub-trees.
+ setup-hardlink-test
+ cd "$TOP/bupmeta.tmp"
+ mkdir src/a src/b
+ touch src/a/1
+ ln src/a/1 src/b/1
+ WVPASS bup index -vv src/a src/b
+ WVPASS bup save -t -n src src/a src/b
+ hardlink-test-run-restore
+ echo ./src/a/1 > hardlink-sets.expected
+ echo ./src/b/1 >> hardlink-sets.expected
+ (cd src-restore && hardlink-sets .) > hardlink-sets.restored
+ WVPASS diff -u hardlink-sets.expected hardlink-sets.restored
+)
+
WVSTART 'meta --edit'
(
force-delete "$TOP/bupmeta.tmp"
Please sign in to comment.
Something went wrong with that request. Please try again.