From b6e34519bfba5aea182b078e4153fe922bae479c Mon Sep 17 00:00:00 2001 From: achingbrain Date: Tue, 16 Apr 2019 19:38:30 +0100 Subject: [PATCH] feat: convert to async/await BREAKING CHANGES: 1. Everything is now async/await 2. No more callbacks, Readable Streams or Pull Streams 3. `stat` and `ls` commands return `cid` objects instead of string hashes 4. `stat` and `ls` commands return all fields, `hash`, `long` etc options are now ignored --- package.json | 26 +- src/cli/flush.js | 2 +- src/cli/ls.js | 2 +- src/core/cp.js | 300 ++++-------- src/core/flush.js | 29 +- src/core/index.js | 47 +- src/core/ls-pull-stream.js | 144 ------ src/core/ls-readable-stream.js | 10 - src/core/ls.js | 61 ++- src/core/mkdir.js | 199 ++++---- src/core/mv.js | 47 +- src/core/read-pull-stream.js | 74 --- src/core/read-readable-stream.js | 10 - src/core/read.js | 46 +- src/core/rm.js | 112 +++-- src/core/stat.js | 161 +++--- src/core/utils/add-link.js | 363 ++++++-------- src/core/utils/apply-default-options.js | 53 ++ src/core/utils/count-stream-bytes.js | 17 - src/core/utils/create-lock.js | 48 +- src/core/utils/create-node.js | 31 +- src/core/utils/dag-pb.js | 30 ++ src/core/utils/format-cid.js | 15 - src/core/utils/hamt-utils.js | 260 +++++----- src/core/utils/index.js | 29 -- src/core/utils/limit-stream-bytes.js | 24 - src/core/utils/load-node.js | 21 - src/core/utils/remove-link.js | 209 ++++---- src/core/utils/to-async-iterator.js | 91 ++++ src/core/utils/to-mfs-path.js | 168 +++---- src/core/utils/to-pull-source.js | 68 --- src/core/utils/to-sources-and-destination.js | 22 +- src/core/utils/to-sources.js | 14 +- src/core/utils/to-trail.js | 69 +-- src/core/utils/update-mfs-root.js | 14 +- src/core/utils/update-tree.js | 86 ++-- src/core/utils/with-mfs-root.js | 69 ++- src/core/utils/zeros.js | 31 -- src/core/write.js | 466 +++++++----------- src/http/read.js | 3 +- src/http/write.js | 15 +- src/index.js | 2 +- test/cp.spec.js | 71 +-- test/flush.spec.js | 4 +- test/helpers/cid-at-path.js | 17 +- test/helpers/collect-leaf-cids.js | 40 -- test/helpers/constants.js | 6 + test/helpers/create-mfs.js | 56 +++ test/helpers/create-shard.js | 31 +- test/helpers/create-sharded-directory.js | 1 + test/helpers/index.js | 48 -- test/helpers/print-tree.js | 14 +- test/helpers/random-bytes.js | 21 - test/helpers/stream-to-array.js | 11 + test/helpers/stream-to-buffer.js | 11 + test/helpers/traverse-leaf-nodes.js | 20 + test/ls.spec.js | 486 +++++++------------ test/mkdir.spec.js | 29 +- test/mv.spec.js | 19 +- test/read.spec.js | 301 +++++------- test/rm.spec.js | 70 ++- test/stat.spec.js | 88 +--- test/write.spec.js | 230 ++++----- 63 files changed, 2024 insertions(+), 3038 deletions(-) delete mode 100644 src/core/ls-pull-stream.js delete mode 100644 src/core/ls-readable-stream.js delete mode 100644 src/core/read-pull-stream.js delete mode 100644 src/core/read-readable-stream.js create mode 100644 src/core/utils/apply-default-options.js delete mode 100644 src/core/utils/count-stream-bytes.js create mode 100644 src/core/utils/dag-pb.js delete mode 100644 src/core/utils/format-cid.js delete mode 100644 src/core/utils/index.js delete mode 100644 src/core/utils/limit-stream-bytes.js delete mode 100644 src/core/utils/load-node.js create mode 100644 src/core/utils/to-async-iterator.js delete mode 100644 src/core/utils/to-pull-source.js delete mode 100644 src/core/utils/zeros.js delete mode 100644 test/helpers/collect-leaf-cids.js create mode 100644 test/helpers/constants.js create mode 100644 test/helpers/create-mfs.js delete mode 100644 test/helpers/index.js delete mode 100644 test/helpers/random-bytes.js create mode 100644 test/helpers/stream-to-array.js create mode 100644 test/helpers/stream-to-buffer.js create mode 100644 test/helpers/traverse-leaf-nodes.js diff --git a/package.json b/package.json index 9453987..a47c077 100644 --- a/package.json +++ b/package.json @@ -18,7 +18,8 @@ "release": "aegir release", "release-minor": "aegir release --type minor", "release-major": "aegir release --type major", - "coverage": "aegir coverage" + "coverage": "aegir coverage", + "dep-check": "aegir dep-check" }, "repository": { "type": "git", @@ -42,18 +43,16 @@ "detect-node": "^2.0.4", "detect-webworker": "^1.0.0", "dirty-chai": "^2.0.1", - "ipld": "~0.21.1", - "ipld-in-memory": "^2.0.0", - "multihashes": "~0.4.14", - "pull-buffer-stream": "^1.0.1", - "pull-traverse": "^1.0.3", + "ipfs-block-service": "~0.15.2", + "ipfs-repo": "~0.26.4", + "ipld": "~0.22.0", + "memdown": "^4.0.0", "temp-write": "^3.4.0" }, "dependencies": { - "async": "^2.6.1", "cids": "~0.5.5", "debug": "^4.1.0", - "filereader-stream": "^2.0.0", + "err-code": "^1.1.2", "hamt-sharding": "~0.0.2", "interface-datastore": "~0.6.0", "ipfs-multipart": "~0.1.0", @@ -61,18 +60,13 @@ "ipfs-unixfs-exporter": "~0.36.1", "ipfs-unixfs-importer": "~0.38.5", "ipld-dag-pb": "~0.15.2", - "is-pull-stream": "~0.0.0", - "is-stream": "^1.1.0", "joi": "^14.3.0", "joi-browser": "^13.4.0", "mortice": "^1.2.1", + "multicodec": "~0.5.0", + "multihashes": "~0.4.14", "once": "^1.4.0", - "promisify-es6": "^1.0.3", - "pull-cat": "^1.1.11", - "pull-defer": "~0.2.3", - "pull-stream": "^3.6.9", - "pull-stream-to-stream": "^1.3.4", - "stream-to-pull-stream": "^1.7.2" + "promisify-es6": "^1.0.3" }, "contributors": [ "Alan Shaw ", diff --git a/src/cli/flush.js b/src/cli/flush.js index d58ac5d..9c9e11b 100644 --- a/src/cli/flush.js +++ b/src/cli/flush.js @@ -2,7 +2,7 @@ const { FILE_SEPARATOR -} = require('../core/utils') +} = require('../core/utils/constants') module.exports = { command: 'flush [path]', diff --git a/src/cli/ls.js b/src/cli/ls.js index a5c8b3c..f5dd288 100644 --- a/src/cli/ls.js +++ b/src/cli/ls.js @@ -9,7 +9,7 @@ const { } = require('./utils') const { FILE_SEPARATOR -} = require('../core/utils') +} = require('../core/utils/constants') module.exports = { command: 'ls [path]', diff --git a/src/core/cp.js b/src/core/cp.js index 6126f74..c77e2e8 100644 --- a/src/core/cp.js +++ b/src/core/cp.js @@ -1,233 +1,143 @@ 'use strict' -const waterfall = require('async/waterfall') -const parallel = require('async/parallel') -const { - addLink, - updateTree, - updateMfsRoot, - toTrail, - toSourcesAndDestination, - toMfsPath -} = require('./utils') -const stat = require('./stat') const mkdir = require('./mkdir') const log = require('debug')('ipfs:mfs:cp') +const errCode = require('err-code') +const updateTree = require('./utils/update-tree') +const updateMfsRoot = require('./utils/update-mfs-root') +const addLink = require('./utils/add-link') +const applyDefaultOptions = require('./utils/apply-default-options') +const toMfsPath = require('./utils/to-mfs-path') +const toSourcesAndDestination = require('./utils/to-sources-and-destination') +const toTrail = require('./utils/to-trail') const defaultOptions = { parents: false, flush: true, format: 'dag-pb', hashAlg: 'sha2-256', + cidVersion: 0, shardSplitThreshold: 1000 } module.exports = (context) => { - return function mfsCp () { - const args = Array.from(arguments) - const callback = args.pop() - - waterfall([ - (cb) => toSourcesAndDestination(context, args, defaultOptions, cb), - ({ sources, destination, options }, cb) => { - if (!sources.length) { - return cb(new Error('Please supply at least one source')) - } + return async function mfsCp (...args) { + const options = applyDefaultOptions(args, defaultOptions) + let { + sources, destination + } = await toSourcesAndDestination(context, args) + + if (!sources.length) { + throw errCode(new Error('Please supply at least one source'), 'EINVALIDPARAMS') + } - if (!destination) { - return cb(new Error('Please supply a destination')) - } + if (!destination) { + throw errCode(new Error('Please supply a destination'), 'EINVALIDPARAMS') + } - options.parents = options.p || options.parents + options.parents = options.p || options.parents - cb(null, { sources, destination, options }) - }, - ({ sources, destination, options }, cb) => toTrail(context, destination.mfsPath, options, (error, trail) => { - if (error) { - return cb(error) - } + // make sure all sources exist + const missing = sources.find(source => !source.exists) - if (trail.length === destination.parts.length) { - log('Destination does not exist') + if (missing) { + throw errCode(new Error(`${missing.path} does not exist`), 'EINVALIDPARAMS') + } - if (sources.length === 1) { - log('Only one source, copying to a file') - return copyToFile(context, sources.pop(), destination, trail, options, cb) - } + const destinationIsDirectory = isDirectory(destination) - log('Multiple sources, copying to a directory') - return copyToDirectory(context, sources, destination, trail, options, cb) - } + if (destination.exists) { + log('Destination exists') - const parent = trail[trail.length - 1] + if (sources.length === 1 && !destinationIsDirectory) { + throw errCode(new Error('directory already has entry by that name'), 'EALREADYEXISTS') + } + } else { + log('Destination does not exist') - if (parent.type === 'dir') { - log('Destination is a directory') - return copyToDirectory(context, sources, destination, trail, options, cb) + if (sources.length > 1) { + if (!options.parents) { + throw errCode(new Error('destination did not exist, pass -p to create intermediate directories'), 'EINVALIDPARAMS') } - cb(new Error('directory already has entry by that name')) - }) - ], callback) + await mkdir(context)(destination.path, options) + destination = await toMfsPath(context, destination.path) + } + } + + const destinationPath = isDirectory(destination) ? destination.mfsPath : destination.mfsDirectory + const trail = await toTrail(context, destinationPath, options) + + if (sources.length === 1) { + const source = sources.pop() + const destinationName = destinationIsDirectory ? source.name : destination.name + + log(`Only one source, copying to destination ${destinationIsDirectory ? 'directory' : 'file'} ${destinationName}`) + + return copyToFile(context, source, destinationName, trail, options) + } + + log('Multiple sources, wrapping in a directory') + return copyToDirectory(context, sources, destination, trail, options) } } -const copyToFile = (context, source, destination, destinationTrail, options, callback) => { - waterfall([ - (cb) => asExistentTrail(context, source, options, cb), - (sourceTrail, cb) => { - const parent = destinationTrail[destinationTrail.length - 1] - const child = sourceTrail[sourceTrail.length - 1] - - waterfall([ - (next) => context.ipld.get(parent.cid, next), - (result, next) => addLink(context, { - parent: result.value, - parentCid: parent.cid, - size: child.size, - cid: child.cid, - name: destination.parts[destination.parts.length - 1] - }, next), - ({ node, cid }, next) => { - parent.node = node - parent.cid = cid - parent.size = node.size - - next(null, destinationTrail) - } - ], cb) - }, +const isDirectory = (destination) => { + return destination.unixfs && + destination.unixfs.type && + destination.unixfs.type.includes('directory') +} - // update the tree with the new child - (trail, cb) => updateTree(context, trail, options, cb), +const copyToFile = async (context, source, destination, destinationTrail, options) => { + let parent = destinationTrail.pop() - // Update the MFS record with the new CID for the root of the tree - ({ cid }, cb) => updateMfsRoot(context, cid, cb) - ], (error) => callback(error)) -} + parent = await addSourceToParent(context, source, destination, parent, options) -const copyToDirectory = (context, sources, destination, destinationTrail, options, callback) => { - waterfall([ - (cb) => { - if (destinationTrail.length !== (destination.parts.length + 1)) { - log(`Making destination directory`, destination.path) - - return waterfall([ - (cb) => mkdir(context)(destination.path, options, cb), - (cb) => toMfsPath(context, destination.path, cb), - (mfsPath, cb) => { - destination = mfsPath - - toTrail(context, destination.mfsPath, options, cb) - } - ], (err, trail) => { - if (err) { - return cb(err) - } - - destinationTrail = trail - - cb() - }) - } + // update the tree with the new containg directory + destinationTrail.push(parent) - cb() - }, - (cb) => parallel( - sources.map(source => (next) => asExistentTrail(context, source, options, next)), - cb - ), - (sourceTrails, cb) => { - waterfall([ - // ensure targets do not exist - (next) => { - parallel( - sources.map(source => { - return (cb) => { - stat(context)(`${destination.path}/${source.name}`, options, (error) => { - if (error) { - if (error.message.includes('does not exist')) { - return cb() - } - - return cb(error) - } - - cb(new Error('directory already has entry by that name')) - }) - } - }), - (error) => next(error) - ) - }, - // add links to target directory - (next) => { - const parent = destinationTrail[destinationTrail.length - 1] - - waterfall([ - (next) => context.ipld.get(parent.cid, next), - (result, next) => next(null, { cid: parent.cid, node: result.value }) - ].concat( - sourceTrails.map((sourceTrail, index) => { - return (parent, done) => { - const child = sourceTrail[sourceTrail.length - 1] - - log(`Adding ${sources[index].name} to ${parent.cid.toBaseEncodedString()}`) - - addLink(context, { - parent: parent.node, - parentCid: parent.cid, - size: child.size, - cid: child.cid, - name: sources[index].name - }, (err, result) => { - if (err) { - return done(err) - } - - log(`New directory hash ${result.cid.toBaseEncodedString()}`) - - done(err, result) - }) - } - }) - ), next) - }, - - ({ node, cid }, next) => { - const parent = destinationTrail[destinationTrail.length - 1] - - parent.node = node - parent.cid = cid - parent.size = node.size - - next(null, destinationTrail) - }, - - // update the tree with the new child - (trail, next) => updateTree(context, trail, options, next), - - // Update the MFS record with the new CID for the root of the tree - ({ cid }, next) => updateMfsRoot(context, cid, next) - ], cb) - } - ], (error) => callback(error)) + const newRootCid = await updateTree(context, destinationTrail, options) + + // Update the MFS record with the new CID for the root of the tree + await updateMfsRoot(context, newRootCid) } -const asExistentTrail = (context, source, options, callback) => { - toTrail(context, source.mfsPath, options, (err, trail) => { - if (err) { - return callback(err) - } +const copyToDirectory = async (context, sources, destination, destinationTrail, options) => { + // copy all the sources to the destination + for (let i = 0; i < sources.length; i++) { + const source = sources[i] - if (source.type === 'ipfs') { - return callback(null, trail) - } + destination = await addSourceToParent(context, source, source.name, destination, options) + } - if (trail.length !== (source.parts.length + 1)) { - return callback(new Error(`${source.path} does not exist`)) - } + // update the tree with the new containg directory + destinationTrail[destinationTrail.length - 1] = destination - callback(null, trail) + const newRootCid = await updateTree(context, destinationTrail, options) + + // Update the MFS record with the new CID for the root of the tree + await updateMfsRoot(context, newRootCid) +} + +const addSourceToParent = async (context, source, childName, parent, options) => { + const sourceBlock = await context.repo.blocks.get(source.cid) + + const { + node, + cid + } = await addLink(context, { + parentCid: parent.cid, + size: sourceBlock.data.length, + cid: source.cid, + name: childName, + format: options.format, + hashAlg: options.hashAlg, + cidVersion: options.cidVersion }) + + parent.node = node + parent.cid = cid + parent.size = node.size + + return parent } diff --git a/src/core/flush.js b/src/core/flush.js index fb3c79c..ecfc333 100644 --- a/src/core/flush.js +++ b/src/core/flush.js @@ -1,36 +1,17 @@ 'use strict' -const waterfall = require('async/waterfall') +const applyDefaultOptions = require('./utils/apply-default-options') const stat = require('./stat') - const { FILE_SEPARATOR -} = require('./utils') +} = require('./utils/constants') const defaultOptions = {} module.exports = (context) => { - return function mfsFlush (path, options, callback) { - if (typeof options === 'function') { - callback = options - options = {} - } - - if (typeof path === 'function') { - callback = path - options = {} - path = FILE_SEPARATOR - } - - if (!path) { - path = FILE_SEPARATOR - } - - options = Object.assign({}, defaultOptions, options) + return async function mfsFlush (path = FILE_SEPARATOR, options = defaultOptions) { + options = applyDefaultOptions(options, defaultOptions) - waterfall([ - (cb) => stat(context)(path, options, cb), - (stats, cb) => cb() - ], callback) + await stat(context)(path, options) } } diff --git a/src/core/index.js b/src/core/index.js index ee99c09..58d19e3 100644 --- a/src/core/index.js +++ b/src/core/index.js @@ -2,13 +2,10 @@ const assert = require('assert') const promisify = require('promisify-es6') -const { - createLock -} = require('./utils') +const createLock = require('./utils/create-lock') // These operations are read-locked at the function level and will execute simultaneously const readOperations = { - ls: require('./ls'), stat: require('./stat') } @@ -24,22 +21,15 @@ const writeOperations = { // These operations are asynchronous and manage their own locking const unwrappedOperations = { write: require('./write'), - read: require('./read') -} - -// These operations are synchronous and manage their own locking -const unwrappedSynchronousOperations = { - readPullStream: require('./read-pull-stream'), - readReadableStream: require('./read-readable-stream'), - lsPullStream: require('./ls-pull-stream'), - lsReadableStream: require('./ls-readable-stream') + read: require('./read'), + ls: require('./ls') } const wrap = ({ options, mfs, operations, lock }) => { Object.keys(operations).forEach(key => { - mfs[key] = promisify(lock(operations[key](options))) + mfs[key] = lock(operations[key](options)) }) } @@ -55,7 +45,28 @@ module.exports = (options) => { } = Object.assign({}, defaultOptions || {}, options) assert(options.ipld, 'MFS requires an IPLD instance') - assert(options.repo, 'MFS requires an ipfs-repo instance') + assert(options.blocks, 'MFS requires an BlockStore instance') + assert(options.datastore, 'MFS requires a DataStore instance') + + // should be able to remove this when async/await PRs are in for datastore, blockstore & repo + options.repo = { + blocks: { + get: promisify(options.blocks.get, { + context: options.blocks + }) + }, + datastore: { + open: promisify(options.datastore.open, { + context: options.datastore + }), + get: promisify(options.datastore.get, { + context: options.datastore + }), + put: promisify(options.datastore.put, { + context: options.datastore + }) + } + } const lock = createLock(repoOwner) @@ -77,11 +88,7 @@ module.exports = (options) => { }) Object.keys(unwrappedOperations).forEach(key => { - mfs[key] = promisify(unwrappedOperations[key](options)) - }) - - Object.keys(unwrappedSynchronousOperations).forEach(key => { - mfs[key] = unwrappedSynchronousOperations[key](options) + mfs[key] = unwrappedOperations[key](options) }) return mfs diff --git a/src/core/ls-pull-stream.js b/src/core/ls-pull-stream.js deleted file mode 100644 index 8d95573..0000000 --- a/src/core/ls-pull-stream.js +++ /dev/null @@ -1,144 +0,0 @@ -'use strict' - -const waterfall = require('async/waterfall') -const UnixFs = require('ipfs-unixfs') -const exporter = require('ipfs-unixfs-exporter') -const { - loadNode, - formatCid, - toMfsPath, - FILE_SEPARATOR, - FILE_TYPES -} = require('./utils') -const pull = require('pull-stream/pull') -const collect = require('pull-stream/sinks/collect') -const asyncMap = require('pull-stream/throughs/async-map') -const filter = require('pull-stream/throughs/filter') -const once = require('pull-stream/sources/once') -const error = require('pull-stream/sources/error') -const defer = require('pull-defer') - -const defaultOptions = { - long: false, - cidBase: 'base58btc' -} - -module.exports = (context) => { - return function mfsLs (path, options = {}) { - if (typeof path === 'object') { - options = path - path = FILE_SEPARATOR - } - - if (path === undefined) { - path = FILE_SEPARATOR - } - - options = Object.assign({}, defaultOptions, options) - - options.long = options.l || options.long - - const deferred = defer.source() - - waterfall([ - (cb) => toMfsPath(context, path, cb), - ({ mfsPath, depth }, cb) => { - pull( - exporter(mfsPath, context.ipld, { - maxDepth: depth - }), - - collect((err, files) => { - if (err) { - return cb(err) - } - - if (files.length > 1) { - return cb(new Error(`Path ${path} had ${files.length} roots`)) - } - - const file = files[0] - - if (!file) { - return cb(new Error(`${path} does not exist`)) - } - - if (file.type !== 'dir') { - return cb(null, once(file)) - } - - let first = true - - return cb(null, pull( - exporter(mfsPath, context.ipld, { - maxDepth: depth + 1 - }), - // first item in list is the directory node - filter(() => { - if (first) { - first = false - return false - } - - return true - }) - )) - }) - ) - }, - (source, cb) => { - cb(null, - pull( - source, - - // load DAGNodes for each file - asyncMap((file, cb) => { - if (!options.long) { - return cb(null, { - name: file.name, - type: 0, - size: 0, - hash: '' - }) - } - - loadNode(context, { - cid: file.cid - }, (err, result) => { - if (err) { - return cb(err) - } - - if (Buffer.isBuffer(result.node)) { - return cb(null, { - name: file.name, - type: 0, - hash: formatCid(file.cid, options.cidBase), - size: result.node.length - }) - } - - const meta = UnixFs.unmarshal(result.node.data) - - cb(null, { - name: file.name, - type: FILE_TYPES[meta.type], - hash: formatCid(file.cid, options.cidBase), - size: meta.fileSize() || 0 - }) - }) - }) - ) - ) - } - ], (err, source) => { - if (err) { - return deferred.resolve(error(err)) - } - - deferred.resolve(source) - }) - - return deferred - } -} diff --git a/src/core/ls-readable-stream.js b/src/core/ls-readable-stream.js deleted file mode 100644 index 69d546d..0000000 --- a/src/core/ls-readable-stream.js +++ /dev/null @@ -1,10 +0,0 @@ -'use strict' - -const lsPullStream = require('./ls-pull-stream') -const toStream = require('pull-stream-to-stream') - -module.exports = (context) => { - return function mfsLsReadableStream (path, options = {}) { - return toStream.source(lsPullStream(context)(path, options)) - } -} diff --git a/src/core/ls.js b/src/core/ls.js index 471ac2d..8114d69 100644 --- a/src/core/ls.js +++ b/src/core/ls.js @@ -1,28 +1,57 @@ 'use strict' +const exporter = require('ipfs-unixfs-exporter') +const applyDefaultOptions = require('./utils/apply-default-options') +const toMfsPath = require('./utils/to-mfs-path') const { - FILE_SEPARATOR -} = require('./utils') -const pull = require('pull-stream/pull') -const collect = require('pull-stream/sinks/collect') -const lsPullStream = require('./ls-pull-stream') + FILE_SEPARATOR, + FILE_TYPES +} = require('./utils/constants') + +const defaultOptions = { + +} + +const toOutput = (fsEntry) => { + let type = 0 + let size = fsEntry.node.size || fsEntry.node.length + + if (fsEntry.unixfs) { + size = fsEntry.unixfs.fileSize() + type = FILE_TYPES[fsEntry.unixfs.type] + } + + return { + cid: fsEntry.cid, + name: fsEntry.name, + type, + size + } +} module.exports = (context) => { - return function mfsLs (path, options, callback) { - if (typeof path === 'function') { - callback = path + return async function * mfsLs (path = FILE_SEPARATOR, options = {}) { + if (typeof path === 'object' && !(path instanceof String)) { + options = path path = FILE_SEPARATOR - options = {} } - if (typeof options === 'function') { - callback = options - options = {} + options = applyDefaultOptions(options, defaultOptions) + options.long = options.l || options.long + + const mfsPath = await toMfsPath(context, path) + const fsDir = await exporter(mfsPath.mfsPath, context.ipld) + + // single file/node + if (!fsDir.unixfs || !fsDir.unixfs.type.includes('directory')) { + yield toOutput(fsDir) + + return } - pull( - lsPullStream(context)(path, options), - collect(callback) - ) + // directory, perhaps sharded + for await (const fsEntry of fsDir.content(options)) { + yield toOutput(fsEntry) + } } } diff --git a/src/core/mkdir.js b/src/core/mkdir.js index 534e72a..3856b6f 100644 --- a/src/core/mkdir.js +++ b/src/core/mkdir.js @@ -1,21 +1,18 @@ 'use strict' -const waterfall = require('async/waterfall') -const asyncMap = require('async/map') +const errCode = require('err-code') const log = require('debug')('ipfs:mfs:mkdir') const exporter = require('ipfs-unixfs-exporter') -const pull = require('pull-stream/pull') -const filter = require('pull-stream/throughs/filter') -const map = require('pull-stream/throughs/map') -const collect = require('pull-stream/sinks/collect') +const createNode = require('./utils/create-node') +const toPathComponents = require('./utils/to-path-components') +const updateMfsRoot = require('./utils/update-mfs-root') +const updateTree = require('./utils/update-tree') +const addLink = require('./utils/add-link') +const withMfsRoot = require('./utils/with-mfs-root') +const applyDefaultOptions = require('./utils/apply-default-options') const { - createNode, - toMfsPath, - toPathComponents, - updateMfsRoot, - updateTree, FILE_SEPARATOR -} = require('./utils') +} = require('./utils/constants') const defaultOptions = { parents: false, @@ -27,123 +24,105 @@ const defaultOptions = { } module.exports = (context) => { - return function mfsMkdir (path, options, callback) { - if (typeof options === 'function') { - callback = options - options = {} - } - - options = Object.assign({}, defaultOptions, options) - - options.parents = options.p || options.parents - options.cidVersion = options.cidVersion || 0 + return async function mfsMkdir (path, options) { + options = applyDefaultOptions(options, defaultOptions) if (!path) { - return callback(new Error('no path given to Mkdir')) + throw new Error('no path given to Mkdir') } path = path.trim() if (path === FILE_SEPARATOR) { - return callback(options.parents ? null : new Error(`cannot create directory '${FILE_SEPARATOR}': Already exists`)) + if (options.parents) { + return + } + + throw errCode(new Error(`cannot create directory '${FILE_SEPARATOR}': Already exists`), 'EINVALIDPATH') + } + + if (path.substring(0, 1) !== FILE_SEPARATOR) { + throw errCode(new Error('paths must start with a leading /'), 'EINVALIDPATH') } log(`Creating ${path}`) const pathComponents = toPathComponents(path) - waterfall([ - (cb) => toMfsPath(context, path, cb), - // figure out the CID of the containing folder - ({ mfsDirectory, mfsPath, root }, cb) => { - const toExport = toPathComponents(mfsPath) - .slice(1) - - let depth = 0 - - let exported = '' - - pull( - exporter(mfsPath, context.ipld, { - fullPath: true - }), - // find the directory from each level in the filesystem - filter(node => { - if (node.name === toExport[depth]) { - depth++ - - return true - } - - return false - }), - // load DAGNode for the containing folder - map((node) => { - const currentPath = `${exported}${exported ? '/' : ''}${toExport[node.depth]}` - - if (node.type !== 'dir') { - throw new Error(`cannot access ${currentPath}: Not a directory`) - } - exported = currentPath - - return { - cid: node.cid, - name: node.name - } - }), - collect(cb) - ) - }, - // Update the MFS tree from the containingFolder upwards - (trail, cb) => { - pathComponents.unshift('/') - - // we managed to load all of the requested path segments so the - // directory already exists - if (trail.length === pathComponents.length) { - return cb(new Error('file already exists')) - } + if (pathComponents[0] === 'ipfs') { + throw errCode(new Error("path cannot have the prefix 'ipfs'"), 'EINVALIDPATH') + } + + let root = await withMfsRoot(context) + let parent + let trail = [] + const emptyDir = await createNode(context, 'directory', options) - asyncMap(pathComponents.map((part, index) => ({ part, index })), ({ part, index }, cb) => { - if (trail[index]) { - return cb(null, { - name: part, - ...trail[index] - }) + // make sure the containing folder exists, creating it if necessary + for (let i = 0; i <= pathComponents.length; i++) { + const subPathComponents = pathComponents.slice(0, i) + const subPath = `/ipfs/${root.toBaseEncodedString()}/${subPathComponents.join('/')}` + + try { + parent = await exporter(subPath, context.ipld) + log(`${subPath} existed`) + log(`${subPath} had children ${parent.node.links.map(link => link.name)}`) + + if (i === pathComponents.length) { + if (options.parents) { + return } - // if we are not at the last path component and we are - // not creating intermediate directories make a fuss - if (index !== pathComponents.length - 1 && !options.parents) { - return cb(new Error('file does not exist')) + throw errCode(new Error('file already exists'), 'EALREADYEXISTS') + } + + trail.push({ + name: parent.name, + cid: parent.cid + }) + } catch (err) { + if (err.code === 'ERR_NOT_FOUND') { + if (i < pathComponents.length && !options.parents) { + throw errCode(new Error(`Intermediate directory path ${subPath} does not exist, use the -p flag to create it`), 'ERR_NOT_FOUND') } - waterfall([ - (done) => createNode(context, 'directory', options, done), - ({ cid, node }, done) => { - done(null, { - cid, - size: node.size, - name: part - }) - } - ], cb) - }, cb) - }, - - // update the tree from the leaf to the root - (trail, cb) => updateTree(context, trail, options, cb), - - // Update the MFS record with the new CID for the root of the tree - ({ cid }, cb) => updateMfsRoot(context, cid, cb) - ], (error) => { - if (error && error.message.includes('file already exists') && options.parents) { - // when the directory already exists and we are creating intermediate - // directories, do not error out (consistent with mkdir -p) - error = null + // add the intermediate directory + await addEmptyDir(context, subPathComponents[subPathComponents.length - 1], emptyDir, trail[trail.length - 1], trail, options) + } else { + throw err + } } + } + + // add an empty dir to the last path component + // await addEmptyDir(context, pathComponents[pathComponents.length - 1], emptyDir, parent, trail) + + // update the tree from the leaf to the root + const newRootCid = await updateTree(context, trail, options) - callback(error) - }) + // Update the MFS record with the new CID for the root of the tree + await updateMfsRoot(context, newRootCid) } } + +const addEmptyDir = async (context, childName, emptyDir, parent, trail, options) => { + log(`Adding empty dir called ${childName} to ${parent.cid.toBaseEncodedString()}`) + + const result = await addLink(context, { + parent: parent.node, + parentCid: parent.cid, + size: emptyDir.node.size, + cid: emptyDir.cid, + name: childName, + format: options.format, + hashAlg: options.hashAlg, + cidVersion: options.cidVersion + }) + + trail[trail.length - 1].cid = result.cid + + trail.push({ + name: childName, + cid: emptyDir.cid + }) +} diff --git a/src/core/mv.js b/src/core/mv.js index 084b293..00ae295 100644 --- a/src/core/mv.js +++ b/src/core/mv.js @@ -1,10 +1,7 @@ 'use strict' -const series = require('async/series') -const waterfall = require('async/waterfall') -const { - toSources -} = require('./utils') +const applyDefaultOptions = require('./utils/apply-default-options') +const toSources = require('./utils/to-sources') const cp = require('./cp') const rm = require('./rm') @@ -18,34 +15,28 @@ const defaultOptions = { } module.exports = (context) => { - return function mfsMv () { - let args = Array.from(arguments) - const callback = args.pop() - + return async function mfsMv (...args) { if (Array.isArray(args[0])) { args = args[0].concat(args.slice(1)) } - waterfall([ - (cb) => toSources(context, args, defaultOptions, cb), - ({ sources, options }, cb) => { - // remove the callback - const cpArgs = sources - .map(source => source.path).concat(options) + const { + sources + } = await toSources(context, args) + const options = applyDefaultOptions(args, defaultOptions) + + const cpArgs = sources + .map(source => source.path).concat(options) - // remove the last source as it'll be the destination - const rmArgs = sources - .slice(0, -1) - .map(source => source.path) - .concat(Object.assign(options, { - recursive: true - })) + // remove the last source as it'll be the destination + const rmArgs = sources + .slice(0, -1) + .map(source => source.path) + .concat(Object.assign(options, { + recursive: true + })) - series([ - (cb) => cp(context).apply(null, cpArgs.concat(cb)), - (cb) => rm(context).apply(null, rmArgs.concat(cb)) - ], cb) - } - ], callback) + await cp(context).apply(null, cpArgs) + await rm(context).apply(null, rmArgs) } } diff --git a/src/core/read-pull-stream.js b/src/core/read-pull-stream.js deleted file mode 100644 index 99b5cc1..0000000 --- a/src/core/read-pull-stream.js +++ /dev/null @@ -1,74 +0,0 @@ -'use strict' - -const exporter = require('ipfs-unixfs-exporter') -const pull = require('pull-stream/pull') -const once = require('pull-stream/sources/once') -const asyncMap = require('pull-stream/throughs/async-map') -const flatten = require('pull-stream/throughs/flatten') -const filter = require('pull-stream/throughs/filter') -const defer = require('pull-defer') -const collect = require('pull-stream/sinks/collect') -const { - toMfsPath -} = require('./utils') -const log = require('debug')('ipfs:mfs:read-pull-stream') - -const defaultOptions = { - offset: 0, - length: undefined -} - -module.exports = (context) => { - return function mfsReadPullStream (path, options = {}) { - options = Object.assign({}, defaultOptions, options) - - // support legacy go arguments - options.length = options.length || options.count - - log(`Reading ${path}`) - - const deferred = defer.source() - - pull( - once(path), - asyncMap((path, cb) => toMfsPath(context, path, cb)), - asyncMap(({ mfsPath, root }, cb) => { - log(`Exporting ${mfsPath}`) - - return pull( - exporter(mfsPath, context.ipld, { - offset: options.offset, - length: options.length - }), - collect(cb) - ) - }), - flatten(), - filter(), - collect((error, files) => { - if (error) { - return deferred.abort(error) - } - - if (!files || !files.length) { - return deferred.abort(new Error(`${path} does not exist`)) - } - - const file = files[0] - - if (file.type !== 'file') { - return deferred.abort(new Error(`${path} was not a file`)) - } - - if (!file.content) { - return deferred.abort(new Error(`Could not load content stream from ${path}`)) - } - - log(`Got ${path} content`) - deferred.resolve(files[0].content) - }) - ) - - return deferred - } -} diff --git a/src/core/read-readable-stream.js b/src/core/read-readable-stream.js deleted file mode 100644 index a851722..0000000 --- a/src/core/read-readable-stream.js +++ /dev/null @@ -1,10 +0,0 @@ -'use strict' - -const readPullStream = require('./read-pull-stream') -const toStream = require('pull-stream-to-stream') - -module.exports = (context) => { - return function mfsReadReadableStream (path, options = {}) { - return toStream.source(readPullStream(context)(path, options)) - } -} diff --git a/src/core/read.js b/src/core/read.js index 9160427..ce601bf 100644 --- a/src/core/read.js +++ b/src/core/read.js @@ -1,25 +1,39 @@ 'use strict' -const pull = require('pull-stream/pull') -const collect = require('pull-stream/sinks/collect') -const readPullStream = require('./read-pull-stream') +const exporter = require('ipfs-unixfs-exporter') +const applyDefaultOptions = require('./utils/apply-default-options') +const toMfsPath = require('./utils/to-mfs-path') +const errCode = require('err-code') + +const defaultOptions = { + offset: 0, + length: Infinity +} module.exports = (context) => { - return function mfsRead (path, options, callback) { - if (typeof options === 'function') { - callback = options - options = {} - } + return function mfsRead (path, options = {}) { + options = applyDefaultOptions(options, defaultOptions) + + return { + [Symbol.asyncIterator]: async function * read () { + const mfsPath = await toMfsPath(context, path) + const result = await exporter(mfsPath.mfsPath, context.ipld) - pull( - readPullStream(context)(path, options), - collect((error, buffers) => { - if (error) { - return callback(error) + if (result.unixfs.type !== 'file') { + throw errCode(new Error(`${path} was not a file`), 'ENOTFILE') } - return callback(null, Buffer.concat(buffers)) - }) - ) + if (!result.content) { + throw errCode(new Error(`Could not load content stream from ${path}`), 'ENOCONTENT') + } + + for await (const buf of result.content({ + offset: options.offset, + length: options.length + })) { + yield buf + } + } + } } } diff --git a/src/core/rm.js b/src/core/rm.js index a5288ab..fb1bfae 100644 --- a/src/core/rm.js +++ b/src/core/rm.js @@ -1,84 +1,80 @@ 'use strict' -const waterfall = require('async/waterfall') -const series = require('async/series') +const errCode = require('err-code') +const updateTree = require('./utils/update-tree') +const updateMfsRoot = require('./utils/update-mfs-root') +const toSources = require('./utils/to-sources') +const removeLink = require('./utils/remove-link') +const toMfsPath = require('./utils/to-mfs-path') +const toTrail = require('./utils/to-trail') +const applyDefaultOptions = require('./utils/apply-default-options') const { - updateTree, - updateMfsRoot, - toSources, - removeLink, - toMfsPath, - toTrail, FILE_SEPARATOR -} = require('./utils') +} = require('./utils/constants') const defaultOptions = { recursive: false, cidVersion: 0, hashAlg: 'sha2-256', - format: 'dag-pb' + format: 'dag-pb', + flush: true } module.exports = (context) => { - return function mfsRm () { + return async function mfsRm () { const args = Array.from(arguments) - const callback = args.pop() - waterfall([ - (cb) => toSources(context, args, defaultOptions, cb), - ({ sources, options }, cb) => { - if (!sources.length) { - return cb(new Error('Please supply at least one path to remove')) - } + const { + sources + } = await toSources(context, args, defaultOptions) + const options = applyDefaultOptions(args, defaultOptions) - series( - sources.map(source => { - return (done) => removePath(context, source.path, options, done) - }), - (error) => cb(error) - ) + if (!sources.length) { + throw errCode(new Error('Please supply at least one path to remove'), 'EINVALIDPARAMS') + } + + sources.forEach(source => { + if (source.path === FILE_SEPARATOR) { + throw errCode(new Error('Cannot delete root'), 'EINVALIDPARAMS') } - ], callback) - } -} + }) -const removePath = (context, path, options, callback) => { - if (path === FILE_SEPARATOR) { - return callback(new Error('Cannot delete root')) + for (const source of sources) { + await removePath(context, source.path, options) + } } +} - waterfall([ - (cb) => toMfsPath(context, path, cb), - ({ mfsPath, parts }, cb) => toTrail(context, mfsPath, options, (err, trail) => cb(err, { mfsPath, parts, trail })), - ({ trail }, cb) => { - const child = trail.pop() - const parent = trail[trail.length - 1] +const removePath = async (context, path, options) => { + const mfsPath = await toMfsPath(context, path) + const trail = await toTrail(context, mfsPath.mfsPath, options) + const child = trail.pop() + const parent = trail[trail.length - 1] - if (!parent) { - return cb(new Error(`${path} does not exist`)) - } + if (!parent) { + throw errCode(new Error(`${path} does not exist`), 'ERR_NOT_FOUND') + } - if (child.type === 'dir' && !options.recursive) { - return cb(new Error(`${path} is a directory, use -r to remove directories`)) - } + if (child.type === 'directory' && !options.recursive) { + throw errCode(new Error(`${path} is a directory, use -r to remove directories`), 'EDIR') + } - waterfall([ - (done) => removeLink(context, { - parentCid: parent.cid, - name: child.name - }, done), - ({ cid }, done) => { - parent.cid = cid + const { + cid + } = await removeLink(context, { + parentCid: parent.cid, + name: child.name, + format: options.format, + hashAlg: options.hashAlg, + cidVersion: options.cidVersion, + flush: options.flush + }) - done(null, trail) - } - ], cb) - }, + parent.cid = cid - // update the tree with the new child - (trail, cb) => updateTree(context, trail, options, cb), + // update the tree with the new child + const newRootCid = await updateTree(context, trail, options) - // Update the MFS record with the new CID for the root of the tree - ({ cid }, cb) => updateMfsRoot(context, cid, cb) - ], callback) + // Update the MFS record with the new CID for the root of the tree + await updateMfsRoot(context, newRootCid) } diff --git a/src/core/stat.js b/src/core/stat.js index f1b3b06..ad43cf7 100644 --- a/src/core/stat.js +++ b/src/core/stat.js @@ -1,115 +1,90 @@ 'use strict' -const unmarshal = require('ipfs-unixfs').unmarshal -const { - formatCid, - toMfsPath, - loadNode -} = require('./utils') -const waterfall = require('async/waterfall') -const pull = require('pull-stream/pull') -const collect = require('pull-stream/sinks/collect') -const asyncMap = require('pull-stream/throughs/async-map') +const applyDefaultOptions = require('./utils/apply-default-options') +const toMfsPath = require('./utils/to-mfs-path') const exporter = require('ipfs-unixfs-exporter') const log = require('debug')('ipfs:mfs:stat') +const errCode = require('err-code') const defaultOptions = { - hash: false, - size: false, - withLocal: false, - cidBase: 'base58btc' + withLocal: false } module.exports = (context) => { - return function mfsStat (path, options, callback) { - if (typeof options === 'function') { - callback = options - options = {} - } - - options = Object.assign({}, defaultOptions, options) + return async function mfsStat (path, options) { + options = applyDefaultOptions(options, defaultOptions) log(`Fetching stats for ${path}`) - waterfall([ - (cb) => toMfsPath(context, path, cb), - ({ mfsPath, depth }, cb) => { - pull( - exporter(mfsPath, context.ipld, { - maxDepth: depth - }), - - asyncMap((file, cb) => { - if (options.hash) { - return cb(null, { - hash: formatCid(file.cid, options.cidBase) - }) - } - - if (options.size) { - return cb(null, { - size: file.size - }) - } + const { + type, + cid, + mfsPath + } = await toMfsPath(context, path) - loadNode(context, { - cid: file.cid - }, (err, result) => { - if (err) { - return cb(err) - } + let exportPath = type === 'ipfs' && cid ? cid : mfsPath + let file - const { - node, cid - } = result - - if (Buffer.isBuffer(node)) { - return cb(null, { - hash: formatCid(cid, options.cidBase), - size: node.length, - cumulativeSize: node.length, - blocks: 0, - type: 'file', // really? - local: undefined, - sizeLocal: undefined, - withLocality: false - }) - } - - const meta = unmarshal(node.data) - let blocks = node.links.length + try { + file = await exporter(exportPath, context.ipld) + } catch (err) { + if (err.code === 'ERR_NOT_FOUND') { + throw errCode(new Error(`${path} does not exist`), 'ERR_NOT_FOUND') + } - if (meta.type === 'file') { - blocks = meta.blockSizes.length - } + throw err + } - cb(null, { - hash: formatCid(cid, options.cidBase), - size: meta.fileSize() || 0, - cumulativeSize: node.size, - blocks: blocks, - type: meta.type, - local: undefined, - sizeLocal: undefined, - withLocality: false - }) - }) - }), - collect((error, results) => { - if (error) { - return cb(error) - } + if (!statters[file.cid.codec]) { + throw new Error(`Cannot stat codec ${file.cid.codec}`) + } - if (!results.length) { - return cb(new Error(`${path} does not exist`)) - } + return statters[file.cid.codec](file, options) + } +} - log(`Stats for ${path}`, results[0]) +const statters = { + raw: (file) => { + return { + cid: file.cid, + size: file.node.length, + cumulativeSize: file.node.length, + blocks: 0, + type: 'file', // for go compatibility + local: undefined, + sizeLocal: undefined, + withLocality: false + } + }, + 'dag-pb': (file) => { + let blocks = file.node.links.length + let size = file.node.size + let cumulativeSize = file.node.size + let nodeType = null + + if (file.unixfs) { + size = file.unixfs.fileSize() + nodeType = file.unixfs.type + + if (nodeType.includes('directory')) { + size = 0 + cumulativeSize = file.node.size + } - return cb(null, results[0]) - }) - ) + if (nodeType === 'file') { + blocks = file.unixfs.blockSizes.length } - ], callback) + } + + return { + cid: file.cid, + size: size, + cumulativeSize: cumulativeSize, + blocks: blocks, + type: nodeType, + local: undefined, + sizeLocal: undefined, + withLocality: false + } } } diff --git a/src/core/utils/add-link.js b/src/core/utils/add-link.js index e8c8692..9a02b2e 100644 --- a/src/core/utils/add-link.js +++ b/src/core/utils/add-link.js @@ -3,10 +3,8 @@ const { DAGNode, DAGLink -} = require('ipld-dag-pb') +} = require('./dag-pb') const CID = require('cids') -const waterfall = require('async/waterfall') -const whilst = require('async/whilst') const log = require('debug')('ipfs:mfs:core:utils:add-link') const UnixFS = require('ipfs-unixfs') const DirSharded = require('ipfs-unixfs-importer/src/importer/dir-sharded') @@ -17,49 +15,32 @@ const { toPrefix, addLinksToHamtBucket } = require('./hamt-utils') - -const defaultOptions = { - parent: undefined, - cid: undefined, - name: '', - size: undefined, - flush: true, - cidVersion: 0, - hashAlg: 'sha2-256', - codec: 'dag-pb', - shardSplitThreshold: 1000 -} - -const addLink = (context, options, callback) => { - options = Object.assign({}, defaultOptions, options) - - if (!options.parentCid) { - return callback(new Error('No parent CID passed to addLink')) +const errCode = require('err-code') +const promisify = require('promisify-es6') +const mc = require('multicodec') +const mh = require('multihashes') + +const addLink = async (context, options) => { + if (!options.parentCid && !options.parent) { + throw errCode(new Error('No parent node or CID passed to addLink'), 'EINVALIDPARENT') } - if (!CID.isCID(options.parentCid)) { - return callback(new Error('Invalid CID passed to addLink')) + if (options.parentCid && !CID.isCID(options.parentCid)) { + throw errCode(new Error('Invalid CID passed to addLink'), 'EINVALIDPARENTCID') } if (!options.parent) { log('Loading parent node', options.parentCid.toBaseEncodedString()) - return waterfall([ - (cb) => context.ipld.get(options.parentCid, cb), - (result, cb) => cb(null, result.value), - (node, cb) => addLink(context, { - ...options, - parent: node - }, cb) - ], callback) + options.parent = await context.ipld.get(options.parentCid) } if (!options.cid) { - return callback(new Error('No child cid passed to addLink')) + throw errCode(new Error('No child cid passed to addLink'), 'EINVALIDCHILDCID') } if (!options.name) { - return callback(new Error('No child name passed to addLink')) + throw errCode(new Error('No child name passed to addLink'), 'EINVALIDCHILDNAME') } if (!CID.isCID(options.cid)) { @@ -67,7 +48,7 @@ const addLink = (context, options, callback) => { } if (!options.size && options.size !== 0) { - return callback(new Error('No child size passed to addLink')) + throw errCode(new Error('No child size passed to addLink'), 'EINVALIDCHILDSIZE') } const meta = UnixFS.unmarshal(options.parent.data) @@ -75,95 +56,82 @@ const addLink = (context, options, callback) => { if (meta.type === 'hamt-sharded-directory') { log('Adding link to sharded directory') - return addToShardedDirectory(context, options, callback) + return addToShardedDirectory(context, options) } if (options.parent.links.length >= options.shardSplitThreshold) { log('Converting directory to sharded directory') - return convertToShardedDirectory(context, options, callback) + return convertToShardedDirectory(context, options) } - log(`Adding ${options.name} to regular directory`) + log(`Adding ${options.name} (${options.cid.toBaseEncodedString()}) to regular directory`) - addToDirectory(context, options, callback) + return addToDirectory(context, options) } -const convertToShardedDirectory = (context, options, callback) => { - createShard(context, options.parent.links.map(link => ({ +const convertToShardedDirectory = async (context, options) => { + const result = await createShard(context, options.parent.links.map(link => ({ name: link.name, size: link.size, - multihash: link.cid.buffer + cid: link.cid })).concat({ name: options.name, size: options.size, - multihash: options.cid.buffer - }), {}, (err, result) => { - if (!err) { - log('Converted directory to sharded directory', result.cid.toBaseEncodedString()) - } + cid: options.cid + })) - callback(err, result) - }) -} + log('Converted directory to sharded directory', result.cid.toBaseEncodedString()) -const addToDirectory = (context, options, callback) => { - waterfall([ - (done) => DAGNode.rmLink(options.parent, options.name, done), - (parent, done) => DAGNode.addLink(parent, new DAGLink(options.name, options.size, options.cid), done), - (parent, done) => { - // Persist the new parent DAGNode - context.ipld.put(parent, { - version: options.cidVersion, - format: options.codec, - hashAlg: options.hashAlg, - hashOnly: !options.flush - }, (error, cid) => done(error, { - node: parent, - cid - })) - } - ], callback) + return result } -const addToShardedDirectory = (context, options, callback) => { - return addFileToShardedDirectoryy(context, options, (err, result) => { - if (err) { - return callback(err) - } +const addToDirectory = async (context, options) => { + let parent = await DAGNode.rmLink(options.parent, options.name) + parent = await DAGNode.addLink(parent, await DAGLink.create(options.name, options.size, options.cid)) - const { - shard, path - } = result - - shard.flush('', context.ipld, null, async (err, result) => { - if (err) { - return callback(err) - } - - // we have written out the shard, but only one sub-shard will have been written so replace it in the original shard - const oldLink = options.parent.links - .find(link => link.name.substring(0, 2) === path[0].prefix) - - const newLink = result.node.links - .find(link => link.name.substring(0, 2) === path[0].prefix) - - waterfall([ - (done) => { - if (!oldLink) { - return done(null, options.parent) - } - - DAGNode.rmLink(options.parent, oldLink.name, done) - }, - (parent, done) => DAGNode.addLink(parent, newLink, done), - (parent, done) => updateHamtDirectory(context, parent.links, path[0].bucket, options, done) - ], callback) - }) + const format = mc[options.format.toUpperCase().replace(/-/g, '_')] + const hashAlg = mh.names[options.hashAlg] + + // Persist the new parent DAGNode + const cid = await context.ipld.put(parent, format, { + cidVersion: options.cidVersion, + hashAlg, + hashOnly: !options.flush }) + + return { + node: parent, + cid + } } -const addFileToShardedDirectoryy = (context, options, callback) => { +const addToShardedDirectory = async (context, options) => { + const { + shard, path + } = await addFileToShardedDirectory(context, options) + + const result = await shard.flush('', context.ipld, null) + + // we have written out the shard, but only one sub-shard will have been written so replace it in the original shard + const oldLink = options.parent.links + .find(link => link.name.substring(0, 2) === path[0].prefix) + + const newLink = result.node.links + .find(link => link.name.substring(0, 2) === path[0].prefix) + + let parent = options.parent + + if (oldLink) { + parent = await DAGNode.rmLink(options.parent, oldLink.name) + } + + parent = await DAGNode.addLink(parent, newLink) + + return updateHamtDirectory(context, parent.links, path[0].bucket, options) +} + +const addFileToShardedDirectory = async (context, options) => { const file = { name: options.name, cid: options.cid, @@ -171,115 +139,98 @@ const addFileToShardedDirectoryy = (context, options, callback) => { } // start at the root bucket and descend, loading nodes as we go - recreateHamtLevel(options.parent.links, null, null, null, async (err, rootBucket) => { - if (err) { - return callback(err) + const rootBucket = await recreateHamtLevel(options.parent.links) + + const shard = new DirSharded({ + root: true, + dir: true, + parent: null, + parentKey: null, + path: '', + dirty: true, + flat: false + }) + shard._bucket = rootBucket + + shard.flush = promisify(shard.flush, { + context: shard + }) + + // load subshards until the bucket & position no longer changes + const position = await rootBucket._findNewBucketAndPos(file.name) + const path = toBucketPath(position) + path[0].node = options.parent + let index = 0 + + while (index < path.length) { + let segment = path[index] + index++ + let node = segment.node + + let link = node.links + .find(link => link.name.substring(0, 2) === segment.prefix) + + if (!link) { + // prefix is new, file will be added to the current bucket + log(`Link ${segment.prefix}${file.name} will be added`) + index = path.length + + break } - const shard = new DirSharded({ - root: true, - dir: true, - parent: null, - parentKey: null, - path: '', - dirty: true, - flat: false - }) - shard._bucket = rootBucket - - // load subshards until the bucket & position no longer changes - const position = await rootBucket._findNewBucketAndPos(file.name) - const path = toBucketPath(position) - path[0].node = options.parent - let index = 0 - - whilst( - () => index < path.length, - (next) => { - let segment = path[index] - index++ - let node = segment.node - - let link = node.links - .find(link => link.name.substring(0, 2) === segment.prefix) - - if (!link) { - // prefix is new, file will be added to the current bucket - log(`Link ${segment.prefix}${file.name} will be added`) - index = path.length - return next(null, shard) - } - - if (link.name === `${segment.prefix}${file.name}`) { - // file already existed, file will be added to the current bucket - log(`Link ${segment.prefix}${file.name} will be replaced`) - index = path.length - return next(null, shard) - } - - if (link.name.length > 2) { - // another file had the same prefix, will be replaced with a subshard - log(`Link ${link.name} will be replaced with a subshard`) - index = path.length - return next(null, shard) - } - - // load sub-shard - log(`Found subshard ${segment.prefix}`) - context.ipld.get(link.cid, (err, result) => { - if (err) { - return next(err) - } - - // subshard hasn't been loaded, descend to the next level of the HAMT - if (!path[index]) { - log(`Loaded new subshard ${segment.prefix}`) - const node = result.value - - return recreateHamtLevel(node.links, rootBucket, segment.bucket, parseInt(segment.prefix, 16), async (err) => { - if (err) { - return next(err) - } - - const position = await rootBucket._findNewBucketAndPos(file.name) - - path.push({ - bucket: position.bucket, - prefix: toPrefix(position.pos), - node: node - }) - - return next(null, shard) - }) - } - - const nextSegment = path[index] - - // add next level's worth of links to bucket - addLinksToHamtBucket(result.value.links, nextSegment.bucket, rootBucket, (error) => { - nextSegment.node = result.value - - next(error, shard) - }) - }) - }, - (err, shard) => { - if (err) { - return callback(err) - } - - // finally add the new file into the shard - shard.put(file.name, { - size: file.size, - multihash: file.cid.buffer - }, (err) => { - callback(err, { - shard, path - }) - }) - } - ) + if (link.name === `${segment.prefix}${file.name}`) { + // file already existed, file will be added to the current bucket + log(`Link ${segment.prefix}${file.name} will be replaced`) + index = path.length + + break + } + + if (link.name.length > 2) { + // another file had the same prefix, will be replaced with a subshard + log(`Link ${link.name} will be replaced with a subshard`) + index = path.length + + break + } + + // load sub-shard + log(`Found subshard ${segment.prefix}`) + const subShard = await context.ipld.get(link.cid) + + // subshard hasn't been loaded, descend to the next level of the HAMT + if (!path[index]) { + log(`Loaded new subshard ${segment.prefix}`) + await recreateHamtLevel(subShard.links, rootBucket, segment.bucket, parseInt(segment.prefix, 16)) + + const position = await rootBucket._findNewBucketAndPos(file.name) + + path.push({ + bucket: position.bucket, + prefix: toPrefix(position.pos), + node: subShard + }) + + break + } + + const nextSegment = path[index] + + // add next level's worth of links to bucket + await addLinksToHamtBucket(subShard.links, nextSegment.bucket, rootBucket) + + nextSegment.node = subShard + } + + // finally add the new file into the shard + await shard._bucket.put(file.name, { + size: file.size, + cid: file.cid }) + + return { + shard, path + } } const toBucketPath = (position) => { diff --git a/src/core/utils/apply-default-options.js b/src/core/utils/apply-default-options.js new file mode 100644 index 0000000..303c38e --- /dev/null +++ b/src/core/utils/apply-default-options.js @@ -0,0 +1,53 @@ +'use strict' + +const errCode = require('err-code') + +module.exports = (options = {}, defaults) => { + if (Array.isArray(options)) { + options = options.filter(arg => typeof arg === 'object').pop() || {} + } + + const output = {} + + for (let key in defaults) { + if (options[key] !== null && options[key] !== undefined) { + output[key] = options[key] + } else { + output[key] = defaults[key] + } + } + + const format = output.format || output.codec + + if (format && isNaN(format)) { + output.format = format + delete output.codec + } + + // support legacy go arguments + if (options.count !== undefined) { + output.length = options.count + } + + if (options.p !== undefined) { + output.parents = options.p + } + + if (options.l !== undefined) { + output.long = options.l + } + + if (!output.length && output.length !== 0) { + output.length = Infinity + } + + if (output.offset < 0) { + throw errCode(new Error('cannot have negative write offset'), 'EINVALIDPARAMS') + } + + if (output.length < 0) { + throw errCode(new Error('cannot have negative byte count'), 'EINVALIDPARAMS') + } + + return output +} diff --git a/src/core/utils/count-stream-bytes.js b/src/core/utils/count-stream-bytes.js deleted file mode 100644 index 7627cac..0000000 --- a/src/core/utils/count-stream-bytes.js +++ /dev/null @@ -1,17 +0,0 @@ -'use strict' - -const through = require('pull-stream/throughs/through') - -const countStreamBytes = (callback) => { - let bytesRead = 0 - - return through((buffer) => { - bytesRead += buffer.length - - return buffer - }, () => { - callback(bytesRead) - }) -} - -module.exports = countStreamBytes diff --git a/src/core/utils/create-lock.js b/src/core/utils/create-lock.js index a7155fa..12002cc 100644 --- a/src/core/utils/create-lock.js +++ b/src/core/utils/create-lock.js @@ -1,7 +1,6 @@ 'use strict' const mortice = require('mortice') -const log = require('debug')('ipfs:mfs:lock') let lock @@ -17,51 +16,20 @@ module.exports = (repoOwner) => { singleProcess: repoOwner }) - const performOperation = (type, func, args, callback) => { - log(`Queuing ${type} operation`) - - mutex[`${type}Lock`](() => { - return new Promise((resolve, reject) => { - args.push((error, result) => { - log(`${type.substring(0, 1).toUpperCase()}${type.substring(1)} operation callback invoked${error ? ' with error: ' + error.message : ''}`) - - if (error) { - return reject(error) - } - - resolve(result) - }) - log(`Starting ${type} operation`) - func.apply(null, args) - }) - }) - .then((result) => { - log(`Finished ${type} operation`) - - callback(null, result) - }, (error) => { - log(`Finished ${type} operation with error: ${error.message}`) - - callback(error) - }) - } - lock = { readLock: (func) => { - return function () { - const args = Array.from(arguments) - let callback = args.pop() - - performOperation('read', func, args, callback) + return (...args) => { + return mutex.readLock(() => { + return func.apply(null, args) + }) } }, writeLock: (func) => { - return function () { - const args = Array.from(arguments) - let callback = args.pop() - - performOperation('write', func, args, callback) + return (...args) => { + return mutex.writeLock(() => { + return func.apply(null, args) + }) } } } diff --git a/src/core/utils/create-node.js b/src/core/utils/create-node.js index c38399d..eacd70e 100644 --- a/src/core/utils/create-node.js +++ b/src/core/utils/create-node.js @@ -1,23 +1,26 @@ 'use strict' -const waterfall = require('async/waterfall') const UnixFS = require('ipfs-unixfs') const { DAGNode -} = require('ipld-dag-pb') +} = require('./dag-pb') +const mc = require('multicodec') +const mh = require('multihashes') -const createNode = (context, type, options, callback) => { - waterfall([ - (done) => DAGNode.create(new UnixFS(type).marshal(), [], done), - (node, done) => context.ipld.put(node, { - version: options.cidVersion, - format: options.format, - hashAlg: options.hashAlg - }, (err, cid) => done(err, { - cid, - node - })) - ], callback) +const createNode = async (context, type, options) => { + const format = mc[options.format.toUpperCase().replace(/-/g, '_')] + const hashAlg = mh.names[options.hashAlg] + + const node = await DAGNode.create(new UnixFS(type).marshal(), []) + const cid = await context.ipld.put(node, format, { + cidVersion: options.cidVersion, + hashAlg + }) + + return { + cid, + node + } } module.exports = createNode diff --git a/src/core/utils/dag-pb.js b/src/core/utils/dag-pb.js new file mode 100644 index 0000000..947422b --- /dev/null +++ b/src/core/utils/dag-pb.js @@ -0,0 +1,30 @@ +'use strict' + +const { + DAGNode, + DAGLink +} = require('ipld-dag-pb') +const promisify = require('promisify-es6') + +const node = { + create: promisify(DAGNode.create, { + context: DAGNode + }), + addLink: promisify(DAGNode.addLink, { + context: DAGNode + }), + rmLink: promisify(DAGNode.rmLink, { + context: DAGNode + }) +} + +const link = { + create: promisify(DAGLink.create, { + context: DAGLink + }) +} + +module.exports = { + DAGNode: node, + DAGLink: link +} diff --git a/src/core/utils/format-cid.js b/src/core/utils/format-cid.js deleted file mode 100644 index ab9bae3..0000000 --- a/src/core/utils/format-cid.js +++ /dev/null @@ -1,15 +0,0 @@ -'use strict' - -const CID = require('cids') - -module.exports = (cid, base) => { - if (Buffer.isBuffer(cid)) { - cid = new CID(cid) - } - - if (base === 'base58btc') { - return cid.toBaseEncodedString() - } - - return cid.toV1().toBaseEncodedString(base) -} diff --git a/src/core/utils/hamt-utils.js b/src/core/utils/hamt-utils.js index 00ec1e2..e852c3b 100644 --- a/src/core/utils/hamt-utils.js +++ b/src/core/utils/hamt-utils.js @@ -2,42 +2,39 @@ const { DAGNode -} = require('ipld-dag-pb') -const waterfall = require('async/waterfall') -const whilst = require('async/whilst') -const series = require('async/series') +} = require('./dag-pb') const Bucket = require('hamt-sharding/src/bucket') const DirSharded = require('ipfs-unixfs-importer/src/importer/dir-sharded') const log = require('debug')('ipfs:mfs:core:utils:hamt-utils') const UnixFS = require('ipfs-unixfs') +const promisify = require('promisify-es6') +const mc = require('multicodec') +const mh = require('multihashes') -const updateHamtDirectory = (context, links, bucket, options, callback) => { +const updateHamtDirectory = async (context, links, bucket, options) => { // update parent with new bit field - waterfall([ - (cb) => { - const data = Buffer.from(bucket._children.bitField().reverse()) - const dir = new UnixFS('hamt-sharded-directory', data) - dir.fanout = bucket.tableSize() - dir.hashType = DirSharded.hashFn.code - - DAGNode.create(dir.marshal(), links, cb) - }, - (parent, done) => { - // Persist the new parent DAGNode - context.ipld.put(parent, { - version: options.cidVersion, - format: options.codec, - hashAlg: options.hashAlg, - hashOnly: !options.flush - }, (error, cid) => done(error, { - node: parent, - cid - })) - } - ], callback) + const data = Buffer.from(bucket._children.bitField().reverse()) + const dir = new UnixFS('hamt-sharded-directory', data) + dir.fanout = bucket.tableSize() + dir.hashType = DirSharded.hashFn.code + + const format = mc[options.format.toUpperCase().replace(/-/g, '_')] + const hashAlg = mh.names[options.hashAlg] + + const parent = await DAGNode.create(dir.marshal(), links) + const cid = await context.ipld.put(parent, format, { + cidVersion: options.cidVersion, + hashAlg, + hashOnly: !options.flush + }) + + return { + node: parent, + cid + } } -const recreateHamtLevel = (links, rootBucket, parentBucket, positionAtParent, callback) => { +const recreateHamtLevel = async (links, rootBucket, parentBucket, positionAtParent) => { // recreate this level of the HAMT const bucket = new Bucket({ hashFn: DirSharded.hashFn, @@ -48,11 +45,13 @@ const recreateHamtLevel = (links, rootBucket, parentBucket, positionAtParent, ca parentBucket._putObjectAt(positionAtParent, bucket) } - addLinksToHamtBucket(links, bucket, rootBucket, callback) + await addLinksToHamtBucket(links, bucket, rootBucket) + + return bucket } -const addLinksToHamtBucket = (links, bucket, rootBucket, callback) => { - Promise.all( +const addLinksToHamtBucket = async (links, bucket, rootBucket) => { + await Promise.all( links.map(link => { if (link.name.length === 2) { const pos = parseInt(link.name, 16) @@ -66,11 +65,10 @@ const addLinksToHamtBucket = (links, bucket, rootBucket, callback) => { return (rootBucket || bucket).put(link.name.substring(2), { size: link.size, - multihash: link.cid + cid: link.cid }) }) ) - .then(() => callback(null, bucket), callback) } const toPrefix = (position) => { @@ -81,110 +79,95 @@ const toPrefix = (position) => { .substring(0, 2) } -const generatePath = (context, fileName, rootNode, callback) => { +const generatePath = async (context, fileName, rootNode) => { // start at the root bucket and descend, loading nodes as we go - recreateHamtLevel(rootNode.links, null, null, null, async (err, rootBucket) => { - if (err) { - return callback(err) + const rootBucket = await recreateHamtLevel(rootNode.links, null, null, null) + const position = await rootBucket._findNewBucketAndPos(fileName) + + // the path to the root bucket + let path = [{ + bucket: position.bucket, + prefix: toPrefix(position.pos) + }] + let currentBucket = position.bucket + + while (currentBucket !== rootBucket) { + path.push({ + bucket: currentBucket, + prefix: toPrefix(currentBucket._posAtParent) + }) + + currentBucket = currentBucket._parent + } + + path.reverse() + path[0].node = rootNode + + // load DAGNode for each path segment + for (let i = 0; i < path.length; i++) { + const segment = path[i] + + // find prefix in links + const link = segment.node.links + .filter(link => link.name.substring(0, 2) === segment.prefix) + .pop() + + // entry was not in shard + if (!link) { + // reached bottom of tree, file will be added to the current bucket + log(`Link ${segment.prefix}${fileName} will be added`) + // return path + continue } - const position = await rootBucket._findNewBucketAndPos(fileName) + // found entry + if (link.name === `${segment.prefix}${fileName}`) { + log(`Link ${segment.prefix}${fileName} will be replaced`) + // file already existed, file will be added to the current bucket + // return path + continue + } + + // found subshard + log(`Found subshard ${segment.prefix}`) + const node = await context.ipld.get(link.cid) - // the path to the root bucket - let path = [{ - bucket: position.bucket, - prefix: toPrefix(position.pos) - }] - let currentBucket = position.bucket + // subshard hasn't been loaded, descend to the next level of the HAMT + if (!path[i + 1]) { + log(`Loaded new subshard ${segment.prefix}`) - while (currentBucket !== rootBucket) { + await recreateHamtLevel(node.links, rootBucket, segment.bucket, parseInt(segment.prefix, 16)) + const position = await rootBucket._findNewBucketAndPos(fileName) + + // i-- path.push({ - bucket: currentBucket, - prefix: toPrefix(currentBucket._posAtParent) + bucket: position.bucket, + prefix: toPrefix(position.pos), + node: node }) - currentBucket = currentBucket._parent + continue } - path[path.length - 1].node = rootNode - - let index = path.length - - // load DAGNode for each path segment - whilst( - () => index > 0, - (next) => { - index-- - - const segment = path[index] - - // find prefix in links - const link = segment.node.links - .filter(link => link.name.substring(0, 2) === segment.prefix) - .pop() - - if (!link) { - // reached bottom of tree, file will be added to the current bucket - log(`Link ${segment.prefix}${fileName} will be added`) - return next(null, path) - } - - if (link.name === `${segment.prefix}${fileName}`) { - log(`Link ${segment.prefix}${fileName} will be replaced`) - // file already existed, file will be added to the current bucket - return next(null, path) - } - - // found subshard - log(`Found subshard ${segment.prefix}`) - context.ipld.get(link.cid, (err, result) => { - if (err) { - return next(err) - } - - // subshard hasn't been loaded, descend to the next level of the HAMT - if (!path[index - 1]) { - log(`Loaded new subshard ${segment.prefix}`) - const node = result.value - - return recreateHamtLevel(node.links, rootBucket, segment.bucket, parseInt(segment.prefix, 16), async (err, bucket) => { - if (err) { - return next(err) - } - - const position = await rootBucket._findNewBucketAndPos(fileName) - - index++ - path.unshift({ - bucket: position.bucket, - prefix: toPrefix(position.pos), - node: node - }) - - next() - }) - } - - const nextSegment = path[index - 1] - - // add intermediate links to bucket - addLinksToHamtBucket(result.value.links, nextSegment.bucket, rootBucket, (error) => { - nextSegment.node = result.value - - next(error) - }) - }) - }, - async (err, path) => { - await rootBucket.put(fileName, true) - - callback(err, { rootBucket, path }) - } - ) - }) + const nextSegment = path[i + 1] + + // add intermediate links to bucket + await addLinksToHamtBucket(node.links, nextSegment.bucket, rootBucket) + + nextSegment.node = node + } + + await rootBucket.put(fileName, true) + + path.reverse() + + return { + rootBucket, + path + } } -const createShard = (context, contents, options, callback) => { +const createShard = async (context, contents, options) => { const shard = new DirSharded({ root: true, dir: true, @@ -197,25 +180,18 @@ const createShard = (context, contents, options, callback) => { ...options }) - const operations = contents.map(contents => { - return (cb) => { - shard.put(contents.name, { - size: contents.size, - multihash: contents.multihash - }, cb) - } - }) + for (let i = 0; i < contents.length; i++) { + await shard._bucket.put(contents[i].name, { + size: contents[i].size, + cid: contents[i].cid + }) + } - return series( - operations, - (err) => { - if (err) { - return callback(err) - } + shard.flush = promisify(shard.flush, { + context: shard + }) - shard.flush('', context.ipld, null, callback) - } - ) + return shard.flush('', context.ipld, null) } module.exports = { diff --git a/src/core/utils/index.js b/src/core/utils/index.js deleted file mode 100644 index 43e2299..0000000 --- a/src/core/utils/index.js +++ /dev/null @@ -1,29 +0,0 @@ -'use strict' - -const constants = require('./constants') - -module.exports = { - addLink: require('./add-link'), - countStreamBytes: require('./count-stream-bytes'), - createLock: require('./create-lock'), - createNode: require('./create-node'), - formatCid: require('./format-cid'), - limitStreamBytes: require('./limit-stream-bytes'), - loadNode: require('./load-node'), - removeLink: require('./remove-link'), - toMfsPath: require('./to-mfs-path'), - toPathComponents: require('./to-path-components'), - toPullSource: require('./to-pull-source'), - toSourcesAndDestination: require('./to-sources-and-destination'), - toSources: require('./to-sources'), - toTrail: require('./to-trail'), - updateMfsRoot: require('./update-mfs-root'), - updateTree: require('./update-tree'), - withMfsRoot: require('./with-mfs-root'), - zeros: require('./zeros'), - - FILE_SEPARATOR: constants.FILE_SEPARATOR, - MAX_CHUNK_SIZE: constants.MAX_CHUNK_SIZE, - MAX_LINKS: constants.MAX_LINKS, - FILE_TYPES: constants.FILE_TYPES -} diff --git a/src/core/utils/limit-stream-bytes.js b/src/core/utils/limit-stream-bytes.js deleted file mode 100644 index 40094a6..0000000 --- a/src/core/utils/limit-stream-bytes.js +++ /dev/null @@ -1,24 +0,0 @@ -'use strict' - -const asyncMap = require('pull-stream/throughs/async-map') - -const limitStreamBytes = (limit) => { - let bytesRead = 0 - - return asyncMap((buffer, cb) => { - if (bytesRead > limit) { - return cb(true) // eslint-disable-line standard/no-callback-literal - } - - // If we only need to return part of this buffer, slice it to make it smaller - if (bytesRead + buffer.length > limit) { - buffer = buffer.slice(0, limit - bytesRead) - } - - bytesRead = bytesRead + buffer.length - - cb(null, buffer) - }) -} - -module.exports = limitStreamBytes diff --git a/src/core/utils/load-node.js b/src/core/utils/load-node.js deleted file mode 100644 index 2572a33..0000000 --- a/src/core/utils/load-node.js +++ /dev/null @@ -1,21 +0,0 @@ -'use strict' - -const waterfall = require('async/waterfall') -const CID = require('cids') -const log = require('debug')('ipfs:mfs:utils:load-node') - -const loadNode = (context, dagLink, callback) => { - const cid = new CID(dagLink.cid) - - log(`Loading DAGNode for child ${cid.toBaseEncodedString()}`) - - waterfall([ - (cb) => context.ipld.get(cid, cb), - (result, cb) => cb(null, { - node: result.value, - cid - }) - ], callback) -} - -module.exports = loadNode diff --git a/src/core/utils/remove-link.js b/src/core/utils/remove-link.js index f795c23..b459841 100644 --- a/src/core/utils/remove-link.js +++ b/src/core/utils/remove-link.js @@ -3,8 +3,7 @@ const { DAGNode, DAGLink -} = require('ipld-dag-pb') -const waterfall = require('async/waterfall') +} = require('./dag-pb') const CID = require('cids') const log = require('debug')('ipfs:mfs:core:utils:remove-link') const UnixFS = require('ipfs-unixfs') @@ -12,44 +11,27 @@ const { generatePath, updateHamtDirectory } = require('./hamt-utils') +const errCode = require('err-code') +const mc = require('multicodec') +const mh = require('multihashes') -const defaultOptions = { - parent: undefined, - parentCid: undefined, - name: '', - flush: true, - cidVersion: 0, - hashAlg: 'sha2-256', - codec: 'dag-pb', - shardSplitThreshold: 1000 -} - -const removeLink = (context, options, callback) => { - options = Object.assign({}, defaultOptions, options) - - if (!options.parentCid) { - return callback(new Error('No parent CID passed to removeLink')) +const removeLink = async (context, options) => { + if (!options.parentCid && !options.parent) { + throw errCode(new Error('No parent node or CID passed to removeLink'), 'EINVALIDPARENT') } - if (!CID.isCID(options.parentCid)) { - return callback(new Error('Invalid CID passed to addLink')) + if (options.parentCid && !CID.isCID(options.parentCid)) { + throw errCode(new Error('Invalid CID passed to removeLink'), 'EINVALIDPARENTCID') } if (!options.parent) { log('Loading parent node', options.parentCid.toBaseEncodedString()) - return waterfall([ - (cb) => context.ipld.get(options.parentCid, cb), - (result, cb) => cb(null, result.value), - (node, cb) => removeLink(context, { - ...options, - parent: node - }, cb) - ], callback) + options.parent = await context.ipld.get(options.parentCid) } if (!options.name) { - return callback(new Error('No child name passed to removeLink')) + throw errCode(new Error('No child name passed to removeLink'), 'EINVALIDCHILDNAME') } const meta = UnixFS.unmarshal(options.parent.data) @@ -57,54 +39,55 @@ const removeLink = (context, options, callback) => { if (meta.type === 'hamt-sharded-directory') { log(`Removing ${options.name} from sharded directory`) - return removeFromShardedDirectory(context, options, callback) + return removeFromShardedDirectory(context, options) } log(`Removing link ${options.name} regular directory`) - return removeFromDirectory(context, options, callback) + return removeFromDirectory(context, options) } -const removeFromDirectory = (context, options, callback) => { - waterfall([ - (cb) => DAGNode.rmLink(options.parent, options.name, cb), - (newParentNode, cb) => { - context.ipld.put(newParentNode, { - version: options.cidVersion, - format: options.codec, - hashAlg: options.hashAlg - }, (error, cid) => cb(error, { - node: newParentNode, - cid - })) - }, - (result, cb) => { - log('Updated regular directory', result.cid.toBaseEncodedString()) - - cb(null, result) - } - ], callback) +const removeFromDirectory = async (context, options) => { + const format = mc[options.format.toUpperCase().replace(/-/g, '_')] + const hashAlg = mh.names[options.hashAlg] + + const newParentNode = await DAGNode.rmLink(options.parent, options.name) + const cid = await context.ipld.put(newParentNode, format, { + cidVersion: options.cidVersion, + hashAlg + }) + + log('Updated regular directory', cid.toBaseEncodedString()) + + return { + node: newParentNode, + cid + } } -const removeFromShardedDirectory = (context, options, callback) => { - return waterfall([ - (cb) => generatePath(context, options.name, options.parent, cb), - ({ rootBucket, path }, cb) => { - rootBucket.del(options.name) - .then(() => cb(null, { rootBucket, path }), cb) - }, - ({ rootBucket, path }, cb) => { - updateShard(context, path, { - name: options.name, - cid: options.cid, - size: options.size - }, options, (err, result = {}) => cb(err, { rootBucket, ...result })) - }, - ({ rootBucket, node }, cb) => updateHamtDirectory(context, node.links, rootBucket, options, cb) - ], callback) +const removeFromShardedDirectory = async (context, options) => { + const { + rootBucket, path + } = await generatePath(context, options.name, options.parent) + + await rootBucket.del(options.name) + + const { + node + } = await updateShard(context, path, { + name: options.name, + cid: options.cid, + size: options.size, + hashAlg: options.hashAlg, + format: options.format, + cidVersion: options.cidVersion, + flush: options.flush + }, options) + + return updateHamtDirectory(context, node.links, rootBucket, options) } -const updateShard = (context, positions, child, options, callback) => { +const updateShard = async (context, positions, child, options) => { const { bucket, prefix, @@ -115,67 +98,45 @@ const updateShard = (context, positions, child, options, callback) => { .find(link => link.name.substring(0, 2) === prefix) if (!link) { - return callback(new Error(`No link found with prefix ${prefix} for file ${child.name}`)) + throw errCode(new Error(`No link found with prefix ${prefix} for file ${child.name}`), 'ERR_NOT_FOUND') + } + + if (link.name === `${prefix}${child.name}`) { + log(`Removing existing link ${link.name}`) + + const newNode = await DAGNode.rmLink(node, link.name) + + await bucket.del(child.name) + + return updateHamtDirectory(context, newNode.links, bucket, options) } - return waterfall([ - (cb) => { - if (link.name === `${prefix}${child.name}`) { - log(`Removing existing link ${link.name}`) - - return waterfall([ - (done) => DAGNode.rmLink(node, link.name, done), - (node, done) => { - context.ipld.put(node, { - version: options.cidVersion, - format: options.codec, - hashAlg: options.hashAlg, - hashOnly: !options.flush - }, (error, cid) => done(error, { - node, - cid - })) - }, - (result, done) => { - bucket.del(child.name) - .then(() => done(null, result), done) - }, - (result, done) => updateHamtDirectory(context, result.node.links, bucket, options, done) - ], cb) - } - - log(`Descending into sub-shard ${link.name} for ${prefix}${child.name}`) - - return waterfall([ - (cb) => updateShard(context, positions, child, options, cb), - (result, cb) => { - let newName = prefix - - if (result.node.links.length === 1) { - log(`Removing subshard for ${prefix}`) - - // convert shard back to normal dir - result.cid = result.node.links[0].cid - result.node = result.node.links[0] - - newName = `${prefix}${result.node.name.substring(2)}` - } - - log(`Updating shard ${prefix} with name ${newName}`) - - updateShardParent(context, bucket, node, prefix, newName, result.node.size, result.cid, options, cb) - } - ], cb) - } - ], callback) + log(`Descending into sub-shard ${link.name} for ${prefix}${child.name}`) + + const result = await updateShard(context, positions, child, options) + + let newName = prefix + + if (result.node.links.length === 1) { + log(`Removing subshard for ${prefix}`) + + // convert shard back to normal dir + result.cid = result.node.links[0].cid + result.node = result.node.links[0] + + newName = `${prefix}${result.node.name.substring(2)}` + } + + log(`Updating shard ${prefix} with name ${newName}`) + + return updateShardParent(context, bucket, node, prefix, newName, result.node.size, result.cid, options) } -const updateShardParent = async (context, bucket, parent, oldName, newName, size, cid, options, callback) => { - waterfall([ - (done) => DAGNode.rmLink(parent, oldName, done), - (parent, done) => DAGNode.addLink(parent, new DAGLink(newName, size, cid), done), - (parent, done) => updateHamtDirectory(context, parent.links, bucket, options, done) - ], callback) +const updateShardParent = async (context, bucket, parent, oldName, newName, size, cid, options) => { + parent = await DAGNode.rmLink(parent, oldName) + parent = await DAGNode.addLink(parent, await DAGLink.create(newName, size, cid)) + + return updateHamtDirectory(context, parent.links, bucket, options) } module.exports = removeLink diff --git a/src/core/utils/to-async-iterator.js b/src/core/utils/to-async-iterator.js new file mode 100644 index 0000000..6f35083 --- /dev/null +++ b/src/core/utils/to-async-iterator.js @@ -0,0 +1,91 @@ +'use strict' + +const errCode = require('err-code') +const fs = require('fs') +const log = require('debug')('ipfs:mfs:utils:to-pull-source') +const { + MAX_CHUNK_SIZE +} = require('./constants') + +const toAsyncIterator = async (content) => { + if (!content) { + throw errCode(new Error('paths must start with a leading /'), 'EINVALIDPATH') + } + + if (typeof content === 'string' || content instanceof String) { + // Paths, node only + log('Content was a path') + + return fs.createReadStream(content) + } + + if (content.length) { + log('Content was array-like') + + return { + [Symbol.asyncIterator]: async function * bufferContent () { + yield content + } + } + } + + if (content[Symbol.asyncIterator]) { + log('Content was an async iterator') + return content + } + + if (content[Symbol.iterator]) { + log('Content was an iterator') + return content + } + + if (global.Blob && content instanceof global.Blob) { + // HTML5 Blob objects (including Files) + log('Content was an HTML5 Blob') + + let index = 0 + + const iterator = { + next: async () => { + if (index > content.size) { + return { + done: true + } + } + + return new Promise((resolve, reject) => { + const chunk = content.slice(index, MAX_CHUNK_SIZE) + index += MAX_CHUNK_SIZE + + const reader = new global.FileReader() + + const handleLoad = (ev) => { + reader.removeEventListener('loadend', handleLoad, false) + + if (ev.error) { + return reject(ev.error) + } + + resolve({ + done: false, + value: Buffer.from(reader.result) + }) + } + + reader.addEventListener('loadend', handleLoad) + reader.readAsArrayBuffer(chunk) + }) + } + } + + return { + [Symbol.asyncIterator]: () => { + return iterator + } + } + } + + throw errCode(new Error(`Don't know how to convert ${content} into an async iterator`), 'EINVALIDPARAMS') +} + +module.exports = toAsyncIterator diff --git a/src/core/utils/to-mfs-path.js b/src/core/utils/to-mfs-path.js index 49c7a92..a1263bb 100644 --- a/src/core/utils/to-mfs-path.js +++ b/src/core/utils/to-mfs-path.js @@ -3,98 +3,104 @@ const { FILE_SEPARATOR } = require('./constants') -const withMfsRoot = require('./with-mfs-root') -const waterfall = require('async/waterfall') -const parallel = require('async/parallel') +const loadMfsRoot = require('./with-mfs-root') const toPathComponents = require('./to-path-components') +const exporter = require('ipfs-unixfs-exporter') +const errCode = require('err-code') const IPFS_PREFIX = 'ipfs' -const toMfsPath = (context, path, callback) => { +const toMfsPath = async (context, path) => { let outputArray = Array.isArray(path) - const paths = Array.isArray(path) ? path : [path] - - waterfall([ - (cb) => { - parallel({ - paths: (done) => { - let p - try { - p = paths.map(path => { - path = (path || '').trim() - path = path.replace(/(\/\/+)/g, '/') - - if (!path) { - throw new Error('paths must not be empty') - } - - if (path.substring(0, 1) !== FILE_SEPARATOR) { - throw new Error(`paths must start with a leading ${FILE_SEPARATOR}`) - } - - if (path.substring(path.length - FILE_SEPARATOR.length) === FILE_SEPARATOR) { - path = path.substring(0, path.length - FILE_SEPARATOR.length) - } - - return toPathComponents(path) - }) - } catch (err) { - return done(err) - } - - done(null, p) - }, - root: (done) => withMfsRoot(context, done) - }, cb) - }, - ({ paths, root }, cb) => { - cb(null, paths.map(parts => { - if (parts[0] === IPFS_PREFIX) { - let mfsDirectory - - if (parts.length === 2) { - mfsDirectory = `${FILE_SEPARATOR}${parts.join(FILE_SEPARATOR)}` - } else { - mfsDirectory = `${FILE_SEPARATOR}${parts.slice(0, parts.length - 1).join(FILE_SEPARATOR)}` - } - - return { - type: 'ipfs', - depth: parts.length - 2, - - mfsPath: `${FILE_SEPARATOR}${parts.join(FILE_SEPARATOR)}`, - mfsDirectory, - root, - parts, - path: `${FILE_SEPARATOR}${parts.join(FILE_SEPARATOR)}`, - name: parts[parts.length - 1] - } - } + let paths = Array.isArray(path) ? path : [path] + const root = await loadMfsRoot(context) - const mfsPath = `/${IPFS_PREFIX}/${root.toBaseEncodedString()}/${parts.join(FILE_SEPARATOR)}` - const mfsDirectory = `/${IPFS_PREFIX}/${root.toBaseEncodedString()}/${parts.slice(0, parts.length - 1).join(FILE_SEPARATOR)}` + paths = paths.map(path => { + path = (path || '').trim() + path = path.replace(/(\/\/+)/g, '/') - return { - type: 'mfs', - depth: parts.length, + if (path.endsWith('/') && path.length > 1) { + path = path.substring(0, path.length - 1) + } - mfsDirectory, - mfsPath, - root, - parts, - path: `${FILE_SEPARATOR}${parts.join(FILE_SEPARATOR)}`, - name: parts[parts.length - 1] - } - })) - }, - (mfsPaths, cb) => { - if (outputArray) { - return cb(null, mfsPaths) + if (!path) { + throw errCode(new Error('paths must not be empty'), 'ENOPATH') + } + + if (path.substring(0, 1) !== FILE_SEPARATOR) { + throw errCode(new Error(`paths must start with a leading ${FILE_SEPARATOR}`), 'EINVALIDPATH') + } + + if (path.substring(path.length - FILE_SEPARATOR.length) === FILE_SEPARATOR) { + path = path.substring(0, path.length - FILE_SEPARATOR.length) + } + + const pathComponents = toPathComponents(path) + + if (pathComponents[0] === IPFS_PREFIX) { + // e.g. /ipfs/QMfoo or /ipfs/Qmfoo/sub/path + let mfsDirectory + + if (pathComponents.length === 2) { + mfsDirectory = `${FILE_SEPARATOR}${pathComponents.join(FILE_SEPARATOR)}` + } else { + mfsDirectory = `${FILE_SEPARATOR}${pathComponents.slice(0, pathComponents.length - 1).join(FILE_SEPARATOR)}` + } + + return { + type: 'ipfs', + depth: pathComponents.length - 2, + + mfsPath: `${FILE_SEPARATOR}${pathComponents.join(FILE_SEPARATOR)}`, + mfsDirectory, + parts: pathComponents, + path: `${FILE_SEPARATOR}${pathComponents.join(FILE_SEPARATOR)}`, + name: pathComponents[pathComponents.length - 1] } + } + + const mfsPath = `/${IPFS_PREFIX}/${root.toBaseEncodedString()}${pathComponents.length ? '/' + pathComponents.join(FILE_SEPARATOR) : ''}` + const mfsDirectory = `/${IPFS_PREFIX}/${root.toBaseEncodedString()}/${pathComponents.slice(0, pathComponents.length - 1).join(FILE_SEPARATOR)}` + + return { + type: 'mfs', + depth: pathComponents.length, - cb(null, mfsPaths[0]) + mfsDirectory, + mfsPath, + parts: pathComponents, + path: `${FILE_SEPARATOR}${pathComponents.join(FILE_SEPARATOR)}`, + name: pathComponents[pathComponents.length - 1] } - ], callback) + }) + + await Promise.all( + paths.map(async (path) => { + const cidPath = path.type === 'mfs' ? path.mfsPath : path.path + + try { + const res = await exporter(cidPath, context.ipld) + + path.cid = res.cid + path.mfsPath = `/ipfs/${res.path}` + path.unixfs = res.unixfs + path.content = res.content + + } catch (err) { + if (err.code !== 'ERR_NOT_FOUND') { + throw err + } + } + + path.exists = Boolean(path.cid) + }) + ) + + if (outputArray) { + return paths + } + + return paths[0] } module.exports = toMfsPath diff --git a/src/core/utils/to-pull-source.js b/src/core/utils/to-pull-source.js deleted file mode 100644 index c0f2f3d..0000000 --- a/src/core/utils/to-pull-source.js +++ /dev/null @@ -1,68 +0,0 @@ -'use strict' - -const toPull = require('stream-to-pull-stream') -const isStream = require('is-stream') -const fileReaderStream = require('filereader-stream') -const isPullStream = require('is-pull-stream') -const fs = require('fs') -const values = require('pull-stream/sources/values') -const log = require('debug')('ipfs:mfs:utils:to-pull-source') -const waterfall = require('async/waterfall') - -const toPullSource = (content, options, callback) => { - if (!content) { - return callback(new Error('paths must start with a leading /')) - } - - // Buffers - if (Buffer.isBuffer(content)) { - log('Content was a buffer') - - if (!options.length && options.length !== 0) { - options.length = options.length || content.length - } - - return callback(null, values([content])) - } - - // Paths, node only - if (typeof content === 'string' || content instanceof String) { - log('Content was a path') - - // Find out the file size if options.length has not been specified - return waterfall([ - (done) => options.length ? done(null, { - size: options.length - }) : fs.stat(content, done), - (stats, done) => { - options.length = stats.size - - done(null, toPull.source(fs.createReadStream(content))) - } - ], callback) - } - - // HTML5 Blob objects (including Files) - if (global.Blob && content instanceof global.Blob) { - log('Content was an HTML5 Blob') - options.length = options.length || content.size - - content = fileReaderStream(content) - } - - // Node streams - if (isStream(content)) { - log('Content was a Node stream') - return callback(null, toPull.source(content)) - } - - // Pull stream - if (isPullStream.isSource(content)) { - log('Content was a pull-stream') - return callback(null, content) - } - - callback(new Error(`Don't know how to convert ${content} into a pull stream source`)) -} - -module.exports = toPullSource diff --git a/src/core/utils/to-sources-and-destination.js b/src/core/utils/to-sources-and-destination.js index 5690373..b41d3c8 100644 --- a/src/core/utils/to-sources-and-destination.js +++ b/src/core/utils/to-sources-and-destination.js @@ -2,19 +2,19 @@ const toSources = require('./to-sources') -function toSourcesAndDestination (context, args, defaultOptions, callback) { - toSources(context, args, defaultOptions, (err, result) => { - if (err) { - return callback(err) - } +async function toSourcesAndDestination (context, args) { + const { + sources, + options + } = await toSources(context, args) - const destination = result.sources.pop() + const destination = sources.pop() - callback(null, { - destination, - ...result - }) - }) + return { + destination, + sources, + options + } } module.exports = toSourcesAndDestination diff --git a/src/core/utils/to-sources.js b/src/core/utils/to-sources.js index cc952dd..81c3923 100644 --- a/src/core/utils/to-sources.js +++ b/src/core/utils/to-sources.js @@ -2,10 +2,7 @@ const toMfsPath = require('./to-mfs-path') -function toSources (context, args, defaultOptions, callback) { - args = args.slice() - const options = Object.assign({}, defaultOptions, args.filter(arg => typeof arg === 'object').pop() || {}) - +async function toSources (context, args) { // Support weird mfs.mv([source, dest], options, callback) signature if (Array.isArray(args[0])) { args = args[0] @@ -15,12 +12,9 @@ function toSources (context, args, defaultOptions, callback) { .filter(arg => typeof arg === 'string') .map(source => source.trim()) - toMfsPath(context, sources, (err, sources) => { - callback(err, { - sources, - options - }) - }) + return { + sources: await toMfsPath(context, sources) + } } module.exports = toSources diff --git a/src/core/utils/to-trail.js b/src/core/utils/to-trail.js index 25a3c74..92632db 100644 --- a/src/core/utils/to-trail.js +++ b/src/core/utils/to-trail.js @@ -1,68 +1,23 @@ 'use strict' -const toPathComponents = require('./to-path-components') const exporter = require('ipfs-unixfs-exporter') -const pull = require('pull-stream/pull') -const filter = require('pull-stream/throughs/filter') -const map = require('pull-stream/throughs/map') -const collect = require('pull-stream/sinks/collect') const log = require('debug')('ipfs:mfs:utils:to-trail') -const toTrail = (context, path, options, callback) => { - const toExport = toPathComponents(path) - .slice(1) - const finalPath = `/${toExport - .slice(1) - .join('/')}` +const toTrail = async (context, path) => { + log(`Creating trail for path ${path}`) - let depth = 0 + const output = [] - log(`Creating trail for path ${path} ${toExport}`) + for await (const fsEntry of exporter.path(path, context.ipld)) { + output.push({ + name: fsEntry.name, + cid: fsEntry.cid, + size: fsEntry.node.size, + type: fsEntry.unixfs.type + }) + } - let exported = '' - - pull( - exporter(path, context.ipld, { - fullPath: true, - maxDepth: toExport.length - 1 - }), - // find the directory from each level in the filesystem - filter(node => { - log(`Saw node ${node.name} for segment ${toExport[depth]} at depth ${node.depth}`) - - if (node.name === toExport[depth]) { - depth++ - - return true - } - - return false - }), - // load DAGNode for the containing folder - map((node) => { - let currentPath = '/' - let name = currentPath - - if (exported) { - currentPath = `${exported === '/' ? '' : exported}/${toExport[node.depth]}` - name = node.name - } - - exported = currentPath - - if (exported !== finalPath && node.type !== 'dir') { - throw new Error(`cannot access ${exported}: Not a directory ${finalPath}`) - } - - return { - name, - cid: node.cid, - size: node.size, - type: node.type - } - }), - collect(callback) - ) + return output } module.exports = toTrail diff --git a/src/core/utils/update-mfs-root.js b/src/core/utils/update-mfs-root.js index 3dac620..a175430 100644 --- a/src/core/utils/update-mfs-root.js +++ b/src/core/utils/update-mfs-root.js @@ -1,20 +1,16 @@ 'use strict' -const log = require('debug')('ipfs:mfs:utils:update-mfs:root') -const waterfall = require('async/waterfall') -const CID = require('cids') +const log = require('debug')('ipfs:mfs:utils:update-mfs-root') const { MFS_ROOT_KEY } = require('./constants') -const updateMfsRoot = (context, buffer, callback) => { - const cid = new CID(buffer) - +const updateMfsRoot = async (context, cid) => { log(`New MFS root will be ${cid.toBaseEncodedString()}`) - waterfall([ - (cb) => context.repo.datastore.put(MFS_ROOT_KEY, cid.buffer, (error) => cb(error)) - ], (error) => callback(error, cid)) + await context.repo.datastore.put(MFS_ROOT_KEY, cid.buffer) + + return cid } module.exports = updateMfsRoot diff --git a/src/core/utils/update-tree.js b/src/core/utils/update-tree.js index 5797dd9..4572d04 100644 --- a/src/core/utils/update-tree.js +++ b/src/core/utils/update-tree.js @@ -1,54 +1,60 @@ 'use strict' -const waterfall = require('async/waterfall') -const reduceRight = require('async/reduceRight') +const log = require('debug')('ipfs:mfs:utils:update-tree') const addLink = require('./add-link') const defaultOptions = { shardSplitThreshold: 1000 } -const updateTree = (context, trail, options, callback) => { +// loop backwards through the trail, replacing links of all components to update CIDs +const updateTree = async (context, trail, options) => { options = Object.assign({}, defaultOptions, options) - waterfall([ - (cb) => context.ipld.getMany(trail.map(node => node.cid), cb), - (nodes, cb) => { - let index = trail.length - 1 - - reduceRight(trail, null, (child, node, done) => { - const dagNode = nodes[index] - const cid = trail[index].cid - index-- - - if (!child) { - // first item in the list - return done(null, node) - } - - addLink(context, { - parent: dagNode, - parentCid: cid, - name: child.name, - cid: child.cid, - size: child.size, - flush: options.flush, - shardSplitThreshold: options.shardSplitThreshold - }, (err, result) => { - if (err) { - return done(err) - } - - done(err, { - cid: result.cid, - node: result.node, - name: node.name, - size: result.node.size - }) - }) - }, cb) + log('Trail', trail) + trail = trail.slice().reverse() + + let index = 0 + let child + + for await (const node of context.ipld.getMany(trail.map(node => node.cid))) { + const cid = trail[index].cid + const name = trail[index].name + index++ + + if (!child) { + child = { + cid, + name, + size: node.size + } + + continue } - ], callback) + + const result = await addLink(context, { + parent: node, + name: child.name, + cid: child.cid, + size: child.size, + flush: options.flush, + shardSplitThreshold: options.shardSplitThreshold, + format: options.format, + hashAlg: options.hashAlg, + cidVersion: options.cidVersion + }) + + // new child for next loop + child = { + cid: result.cid, + name, + size: result.node.size + } + } + + log(`Final CID ${child.cid.toBaseEncodedString('')}`) + + return child.cid } module.exports = updateTree diff --git a/src/core/utils/with-mfs-root.js b/src/core/utils/with-mfs-root.js index 9dcedd2..0938c30 100644 --- a/src/core/utils/with-mfs-root.js +++ b/src/core/utils/with-mfs-root.js @@ -4,49 +4,44 @@ const CID = require('cids') const UnixFs = require('ipfs-unixfs') const { DAGNode -} = require('ipld-dag-pb') +} = require('./dag-pb') const log = require('debug')('ipfs:mfs:utils:with-mfs-root') -const waterfall = require('async/waterfall') +const mc = require('multicodec') +const mh = require('multihashes') const { MFS_ROOT_KEY } = require('./constants') -const withMfsRoot = (context, callback) => { - waterfall([ - // Open the repo if it's been closed - (cb) => context.repo.datastore.open((error) => cb(error)), - (cb) => { - // Load the MFS root CID - context.repo.datastore.get(MFS_ROOT_KEY, (error, result) => { - // Once datastore-level releases its error.code addition, we can remove error.notFound logic - if (error && (error.notFound || error.code === 'ERR_NOT_FOUND')) { - log('Creating new MFS root') - - return waterfall([ - // Store an empty node as the root - (next) => DAGNode.create(new UnixFs('directory').marshal(), next), - (node, next) => context.ipld.put(node, { - version: 0, - hashAlg: 'sha2-256', - format: 'dag-pb' - }, next), - // Store the Buffer in the datastore - (cid, next) => context.repo.datastore.put(MFS_ROOT_KEY, cid.buffer, (error) => next(error, cid)) - ], cb) - } - - cb(error, result ? new CID(result) : null) - }) - }, - // Turn the Buffer into a CID - (cid, cb) => { - log(`Fetched MFS root ${cid.toBaseEncodedString()}`) - - cb(null, cid) +const loadMfsRoot = async (context) => { + // Open the repo if it's been closed + await context.repo.datastore.open() + + // Load the MFS root CID + let cid + + try { + const buf = await context.repo.datastore.get(MFS_ROOT_KEY) + + cid = new CID(buf) + } catch (err) { + if (err.code !== 'ERR_NOT_FOUND') { + throw err } - // Invoke the API function with the root CID - ], callback) + + log('Creating new MFS root') + const node = await DAGNode.create(new UnixFs('directory').marshal()) + cid = await context.ipld.put(node, mc.DAG_PB, { + cidVersion: 0, + hashAlg: mh.names['sha2-256'] // why can't ipld look this up? + }) + + await context.repo.datastore.put(MFS_ROOT_KEY, cid.buffer) + } + + log(`Loaded MFS root /ipfs/${cid.toBaseEncodedString()}`) + + return cid } -module.exports = withMfsRoot +module.exports = loadMfsRoot diff --git a/src/core/utils/zeros.js b/src/core/utils/zeros.js deleted file mode 100644 index 6a27aeb..0000000 --- a/src/core/utils/zeros.js +++ /dev/null @@ -1,31 +0,0 @@ -'use strict' - -// A pull stream source that will emit buffers full of zeros up to the specified length -const zeros = (max = Infinity, increment = 4096) => { - let i = 0 - - return (end, cb) => { - if (end) { - return cb && cb(end) - } - - if (i >= max) { - // Ugh. https://github.com/standard/standard/issues/623 - const foo = true - return cb(foo) - } - - let nextLength = increment - - if ((i + nextLength) > max) { - // final chunk doesn't divide neatly into increment - nextLength = max - i - } - - i += nextLength - - cb(null, Buffer.alloc(nextLength, 0)) - } -} - -module.exports = zeros diff --git a/src/core/write.js b/src/core/write.js index 1b258d3..45f93c8 100644 --- a/src/core/write.js +++ b/src/core/write.js @@ -1,39 +1,23 @@ 'use strict' -const promisify = require('promisify-es6') -const waterfall = require('async/waterfall') -const parallel = require('async/parallel') -const series = require('async/series') -const { - createLock, - updateMfsRoot, - addLink, - updateTree, - toMfsPath, - toPathComponents, - toPullSource, - loadNode, - limitStreamBytes, - countStreamBytes, - toTrail, - zeros -} = require('./utils') -const { - unmarshal -} = require('ipfs-unixfs') -const pull = require('pull-stream/pull') -const cat = require('pull-cat') -const collect = require('pull-stream/sinks/collect') -const empty = require('pull-stream/sources/empty') -const err = require('pull-stream/sources/error') const log = require('debug')('ipfs:mfs:write') -const values = require('pull-stream/sources/values') -const exporter = require('ipfs-unixfs-exporter') const importer = require('ipfs-unixfs-importer') -const deferred = require('pull-defer') -const CID = require('cids') const stat = require('./stat') const mkdir = require('./mkdir') +const addLink = require('./utils/add-link') +const applyDefaultOptions = require('./utils/apply-default-options') +const createLock = require('./utils/create-lock') +const toAsyncIterator = require('./utils/to-async-iterator') +const toMfsPath = require('./utils/to-mfs-path') +const toPathComponents = require('./utils/to-path-components') +const toTrail = require('./utils/to-trail') +const updateTree = require('./utils/update-tree') +const updateMfsRoot = require('./utils/update-mfs-root') +const errCode = require('err-code') +const { + MAX_CHUNK_SIZE +} = require('./utils/constants') +const last = require('async-iterator-last') const defaultOptions = { offset: 0, // the offset in the file to begin writing @@ -53,187 +37,90 @@ const defaultOptions = { shardSplitThreshold: 1000 } -module.exports = function mfsWrite (context) { - return promisify((path, content, options, callback) => { - if (typeof options === 'function') { - callback = options - options = {} +module.exports = (context) => { + return async function mfsWrite (path, content, options) { + log('Hello world, writing', path, content, options) + options = applyDefaultOptions(options, defaultOptions) + + let source, destination, parent + log('Reading source, destination and parent') + await createLock().readLock(async () => { + source = await toAsyncIterator(content, options) + destination = await toMfsPath(context, path) + parent = await toMfsPath(context, destination.mfsDirectory) + })() + log('Read source, destination and parent') + if (!options.parents && !parent.exists) { + throw errCode(new Error('directory does not exist'), 'ENOEXIST') + } + + if (!options.create && !destination.exists) { + throw errCode(new Error('file does not exist'), 'ENOEXIST') } - options = Object.assign({}, defaultOptions, options) + return updateOrImport(context, path, source, destination, options) + } +} - if (options.offset < 0) { - return callback(new Error('cannot have negative write offset')) +const updateOrImport = async (context, path, source, destination, options) => { + const child = await write(context, source, destination, options) + + // The slow bit is done, now add or replace the DAGLink in the containing directory + // re-reading the path to the containing folder in case it has changed in the interim + await createLock().writeLock(async () => { + const pathComponents = toPathComponents(path) + const fileName = pathComponents.pop() + let parentExists = false + + try { + await stat(context)(`/${pathComponents.join('/')}`, options) + parentExists = true + } catch (err) { + if (err.code !== 'ERR_NOT_FOUND') { + throw err + } } - if (options.length < 0) { - return callback(new Error('cannot have negative byte count')) + if (!parentExists) { + await mkdir(context)(`/${pathComponents.join('/')}`, options) } - if (!options.length && options.length !== 0) { - options.length = Infinity + // get an updated mfs path in case the root changed while we were writing + const updatedPath = await toMfsPath(context, path) + const trail = await toTrail(context, updatedPath.mfsDirectory, options) + const parent = trail[trail.length - 1] + + if (!parent.type.includes('directory')) { + throw errCode(new Error(`cannot write to ${parent.name}: Not a directory`), 'ENOTADIRECTORY') } - options.cidVersion = options.cidVersion || 0 - - waterfall([ - (done) => { - createLock().readLock((callback) => { - waterfall([ - (done) => { - parallel({ - source: (next) => toPullSource(content, options, next), - path: (next) => toMfsPath(context, path, next) - }, done) - }, - ({ source, path: { mfsPath, mfsDirectory } }, done) => { - series({ - mfsDirectory: (next) => stat(context)(mfsDirectory, { - unsorted: true, - long: true - }, (error, result) => { - if (error && error.message.includes('does not exist')) { - error = null - } - - next(error, result) - }), - mfsPath: (next) => stat(context)(mfsPath, { - unsorted: true, - long: true - }, (error, result) => { - if (error && error.message.includes('does not exist')) { - error = null - } - - next(error, result) - }) - }, (error, result = {}) => { - done(error, { - source, - path, - mfsDirectory: result.mfsDirectory, - mfsPath: result.mfsPath - }) - }) - } - ], callback) - })(done) - }, - ({ source, path, mfsDirectory, mfsPath }, done) => { - if (!options.parents && !mfsDirectory) { - return done(new Error('directory does not exist')) - } + const parentNode = await context.ipld.get(parent.cid) - if (!options.create && !mfsPath) { - return done(new Error('file does not exist')) - } + const result = await addLink(context, { + parent: parentNode, + name: fileName, + cid: child.cid, + size: child.size, + flush: options.flush, + shardSplitThreshold: options.shardSplitThreshold, + format: options.format, + hashAlg: options.hashAlg, + cidVersion: options.cidVersion + }) - updateOrImport(context, options, path, source, mfsPath, done) - } - ], (error) => callback(error)) - }) -} + parent.cid = result.cid -const updateOrImport = (context, options, path, source, existingChild, callback) => { - waterfall([ - (next) => { - if (existingChild) { - return loadNode(context, { - cid: existingChild.hash - }, next) - } + // update the tree with the new child + const newRootCid = await updateTree(context, trail, options) - next(null, null) - }, - - (result, next) => { - const { - cid, node - } = result || {} - - write(context, cid, node, source, options, next) - }, - - // The slow bit is done, now add or replace the DAGLink in the containing directory - // re-reading the path to the containing folder in case it has changed in the interim - (child, next) => { - createLock().writeLock((writeLockCallback) => { - const pathComponents = toPathComponents(path) - const fileName = pathComponents.pop() - - waterfall([ - (cb) => stat(context)(`/${pathComponents.join('/')}`, options, (error, result) => { - if (error && error.message.includes('does not exist')) { - error = null - } - - cb(null, Boolean(result)) - }), - (parentExists, cb) => { - if (parentExists) { - return cb() - } - - mkdir(context)(`/${pathComponents.join('/')}`, options, cb) - }, - // get an updated mfs path in case the root changed while we were writing - (cb) => toMfsPath(context, path, cb), - ({ mfsDirectory, root }, cb) => { - toTrail(context, mfsDirectory, options, (err, trail) => { - if (err) { - return cb(err) - } - - const parent = trail[trail.length - 1] - - if (parent.type !== 'dir') { - return cb(new Error(`cannot write to ${parent.name}: Not a directory`)) - } - - context.ipld.get(parent.cid, (err, result) => { - if (err) { - return cb(err) - } - - addLink(context, { - parent: result.value, - parentCid: parent.cid, - name: fileName, - cid: child.cid, - size: child.size, - flush: options.flush, - shardSplitThreshold: options.shardSplitThreshold - }, (err, result) => { - if (err) { - return cb(err) - } - - parent.cid = result.cid - parent.size = result.node.size - - cb(null, trail) - }) - }) - }) - }, - - // update the tree with the new child - (trail, cb) => updateTree(context, trail, options, cb), - - // Update the MFS record with the new CID for the root of the tree - ({ cid }, cb) => updateMfsRoot(context, cid, cb) - ], writeLockCallback) - })(next) - }], callback) + // Update the MFS record with the new CID for the root of the tree + await updateMfsRoot(context, newRootCid) + })() } -const write = (context, existingNodeCid, existingNode, source, options, callback) => { - let existingNodeMeta - - if (existingNode) { - existingNodeMeta = unmarshal(existingNode.data) - log(`Overwriting file ${existingNodeCid.toBaseEncodedString()} offset ${options.offset} length ${options.length}`) +const write = async (context, source, destination, options) => { + if (destination.exists) { + log(`Overwriting file ${destination.cid.toBaseEncodedString()} offset ${options.offset} length ${options.length}`) } else { log(`Writing file offset ${options.offset} length ${options.length}`) } @@ -242,106 +129,123 @@ const write = (context, existingNodeCid, existingNode, source, options, callback // pad start of file if necessary if (options.offset > 0) { - if (existingNode && existingNodeMeta.fileSize() > options.offset) { + if (destination.unixfs && destination.unixfs.fileSize() > options.offset) { log(`Writing first ${options.offset} bytes of original file`) - const startFile = deferred.source() - - sources.push(startFile) - - pull( - exporter(existingNodeCid, context.ipld, { - offset: 0, - length: options.offset - }), - collect((error, files) => { - if (error) { - return startFile.resolve(err(error)) - } - - startFile.resolve(files[0].content) - }) + sources.push( + () => { + return destination.content({ + offset: 0, + length: options.offset + }) + } ) } else { log(`Writing zeros for first ${options.offset} bytes`) - sources.push(zeros(options.offset)) + sources.push( + asyncZeroes(options.offset) + ) } } - const endFile = deferred.source() - - // add the new source sources.push( - pull( - source, - limitStreamBytes(options.length), - countStreamBytes((bytesRead) => { - log(`Wrote ${bytesRead} bytes`) - - if (existingNode && !options.truncate) { - // if we've done reading from the new source and we are not going - // to truncate the file, add the end of the existing file to the output - const fileSize = existingNodeMeta.fileSize() - const offset = options.offset + bytesRead - - if (fileSize > offset) { - log(`Writing last ${fileSize - offset} of ${fileSize} bytes from original file`) - pull( - exporter(existingNodeCid, context.ipld, { - offset - }), - collect((error, files) => { - if (error) { - return endFile.resolve(err(error)) - } - - endFile.resolve(files[0].content) - }) - ) - } else { - log(`Not writing last bytes from original file`) - endFile.resolve(empty()) - } - } - }) - ) + limitAsyncStreamBytes(source, options.length) ) - // add the end of the file if necessary - if (existingNode && !options.truncate) { - sources.push( - endFile - ) + const content = countBytesStreamed(catAsyncInterators(sources), (bytesWritten) => { + if (destination.unixfs && !options.truncate) { + // if we've done reading from the new source and we are not going + // to truncate the file, add the end of the existing file to the output + const fileSize = destination.unixfs.fileSize() + + if (fileSize > bytesWritten) { + log(`Writing last ${fileSize - bytesWritten} of ${fileSize} bytes from original file starting at offset ${bytesWritten}`) + + return destination.content({ + offset: bytesWritten + }) + } else { + log(`Not writing last bytes from original file`) + } + } + + return { + [Symbol.asyncIterator]: async function * () {} + } + }) + + let result = await last(importer([{ + content: content + }], context.ipld, { + progress: options.progress, + hashAlg: options.hashAlg, + cidVersion: options.cidVersion, + strategy: options.strategy, + rawLeaves: options.rawLeaves, + reduceSingleLeafToSelf: options.reduceSingleLeafToSelf, + leafType: options.leafType + })) + + log(`Wrote ${result.cid.toBaseEncodedString()}`) + + return { + cid: result.cid, + size: result.size } +} - pull( - values([{ - path: '', - content: cat(sources) - }]), - importer(context.ipld, { - progress: options.progress, - hashAlg: options.hashAlg, - cidVersion: options.cidVersion, - strategy: options.strategy, - rawLeaves: options.rawLeaves, - reduceSingleLeafToSelf: options.reduceSingleLeafToSelf, - leafType: options.leafType - }), - collect((error, results) => { - if (error) { - return callback(error) +const limitAsyncStreamBytes = (stream, limit) => { + return async function * _limitAsyncStreamBytes () { + let emitted = 0 + + for await (const buf of stream) { + emitted += buf.length + + if (emitted > limit) { + yield buf.slice(0, limit - emitted) + + return } - const result = results.pop() - const cid = new CID(result.multihash) + yield buf + } + } +} - log(`Wrote ${cid.toBaseEncodedString()}`) +const asyncZeroes = (count, chunkSize = MAX_CHUNK_SIZE) => { + const buf = Buffer.alloc(chunkSize, 0) - callback(null, { - cid, - size: result.size - }) - }) - ) + const stream = { + [Symbol.asyncIterator]: async function * _asyncZeroes () { + while (true) { + yield buf.slice() + } + } + } + + return limitAsyncStreamBytes(stream, count) +} + +const catAsyncInterators = async function * (sources) { + for (let i = 0; i < sources.length; i++) { + for await (const buf of sources[i]()) { + yield buf + } + } +} + +const countBytesStreamed = async function * (source, notify) { + let wrote = 0 + + for await (const buf of source) { + wrote += buf.length + + yield buf + } + + for await (const buf of notify(wrote)) { + wrote += buf.length + + yield buf + } } diff --git a/src/http/read.js b/src/http/read.js index e389b41..5538121 100644 --- a/src/http/read.js +++ b/src/http/read.js @@ -51,8 +51,7 @@ const mfsRead = { query: Joi.object().keys({ arg: Joi.string().required(), offset: Joi.number().integer().min(0), - length: Joi.number().integer().min(0), - count: Joi.number().integer().min(0) + length: Joi.number().integer().min(0) }) .rename('o', 'offset', { override: true, diff --git a/src/http/write.js b/src/http/write.js index 0308843..05cd45a 100644 --- a/src/http/write.js +++ b/src/http/write.js @@ -2,11 +2,13 @@ const Joi = require('joi') const multipart = require('ipfs-multipart') +const Boom = require('boom') const mfsWrite = { method: 'POST', path: '/api/v0/files/write', async handler (request, h) { + console.info('ok, starting write') const { ipfs } = request.server.app @@ -29,14 +31,23 @@ const mfsWrite = { const fileStream = await new Promise((resolve, reject) => { const parser = multipart.reqParser(request.payload) + let fileStream parser.on('file', (_, stream) => { - resolve(stream) + if (fileStream) { + return reject(Boom.badRequest('Please only send one file')) + } + + fileStream = stream }) parser.on('error', (error) => { reject(error) }) + + parser.on('end', () => { + resolve(fileStream) + }) }) await ipfs.files.write(arg, fileStream, { @@ -54,7 +65,7 @@ const mfsWrite = { flush, shardSplitThreshold }) - +console.info('wrote') return h.response() }, options: { diff --git a/src/index.js b/src/index.js index 2a29e8f..522e8d2 100644 --- a/src/index.js +++ b/src/index.js @@ -5,7 +5,7 @@ const core = require('./core') const http = require('./http') const { FILE_TYPES -} = require('./core/utils') +} = require('./core/utils/constants') module.exports = { cli, diff --git a/test/cp.spec.js b/test/cp.spec.js index e75dc32..0a1e6a2 100644 --- a/test/cp.spec.js +++ b/test/cp.spec.js @@ -4,11 +4,11 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const bufferStream = require('pull-buffer-stream') -const { - createMfs, - createShardedDirectory -} = require('./helpers') +const createMfs = require('./helpers/create-mfs') +const createShardedDirectory = require('./helpers/create-sharded-directory') +const streamToBuffer = require('./helpers/stream-to-buffer') +const streamToArray = require('./helpers/stream-to-array') +const crypto = require('crypto') describe('cp', () => { let mfs @@ -44,7 +44,7 @@ describe('cp', () => { } }) - it('refuses to copy a file to a non-existent directory', async () => { + it('refuses to copy a non-existent file', async () => { try { await mfs.cp('/i-do-not-exist', '/output') throw new Error('No error was thrown for a non-existent file') @@ -57,16 +57,16 @@ describe('cp', () => { const source = `/source-file-${Math.random()}.txt` const destination = `/dest-file-${Math.random()}.txt` - await mfs.write(source, bufferStream(100), { + await mfs.write(source, crypto.randomBytes(100), { create: true }) - await mfs.write(destination, bufferStream(100), { + await mfs.write(destination, crypto.randomBytes(100), { create: true }) try { await mfs.cp(source, destination) - throw new Error('No error was thrown for a non-existent file') + throw new Error('No error was thrown when trying to overwrite a file') } catch (err) { expect(err.message).to.contain('directory already has entry by that name') } @@ -75,7 +75,7 @@ describe('cp', () => { it('refuses to copy a file to itself', async () => { const source = `/source-file-${Math.random()}.txt` - await mfs.write(source, bufferStream(100), { + await mfs.write(source, crypto.randomBytes(100), { create: true }) @@ -90,18 +90,15 @@ describe('cp', () => { it('copies a file to new location', async () => { const source = `/source-file-${Math.random()}.txt` const destination = `/dest-file-${Math.random()}.txt` - let data = Buffer.alloc(0) + let data = crypto.randomBytes(500) - await mfs.write(source, bufferStream(500, { - collector: (bytes) => { - data = Buffer.concat([data, bytes]) - } - }), { + await mfs.write(source, data, { create: true }) await mfs.cp(source, destination) - const buffer = await mfs.read(destination) + + let buffer = await streamToBuffer(mfs.read(destination)) expect(buffer).to.deep.equal(data) }) @@ -111,7 +108,7 @@ describe('cp', () => { const directory = `/dest-directory-${Math.random()}` const destination = `${directory}${source}` - await mfs.write(source, bufferStream(500), { + await mfs.write(source, crypto.randomBytes(500), { create: true }) await mfs.mkdir(directory) @@ -153,19 +150,15 @@ describe('cp', () => { it('copies multiple files to new location', async () => { const sources = [{ path: `/source-file-${Math.random()}.txt`, - data: Buffer.alloc(0) + data: crypto.randomBytes(500) }, { path: `/source-file-${Math.random()}.txt`, - data: Buffer.alloc(0) + data: crypto.randomBytes(500) }] const destination = `/dest-dir-${Math.random()}` for (const source of sources) { - await mfs.write(source.path, bufferStream(500, { - collector: (bytes) => { - source.data = Buffer.concat([source.data, bytes]) - } - }), { + await mfs.write(source.path, source.data, { create: true }) } @@ -175,7 +168,7 @@ describe('cp', () => { }) for (const source of sources) { - const buffer = await mfs.read(`${destination}${source.path}`) + const buffer = await streamToBuffer(mfs.read(`${destination}${source.path}`)) expect(buffer).to.deep.equal(source.data) } @@ -185,12 +178,30 @@ describe('cp', () => { const source = `/source-file-${Math.random()}.txt` const destination = `/dest-file-${Math.random()}.txt` - await mfs.write(source, bufferStream(100), { + await mfs.write(source, crypto.randomBytes(100), { create: true }) const stats = await mfs.stat(source) - await mfs.cp(`/ipfs/${stats.hash}`, destination) + await mfs.cp(`/ipfs/${stats.cid.toBaseEncodedString()}`, destination) + + const destinationStats = await mfs.stat(destination) + expect(destinationStats.size).to.equal(100) + }) + + it('copies files from deep ipfs paths', async () => { + const dir = `dir-${Math.random()}` + const file = `source-file-${Math.random()}.txt` + const source = `/${dir}/${file}` + const destination = `/dest-file-${Math.random()}.txt` + + await mfs.write(source, crypto.randomBytes(100), { + create: true, + parents: true + }) + + const stats = await mfs.stat(`/${dir}`) + await mfs.cp(`/ipfs/${stats.cid.toBaseEncodedString()}/${file}`, destination) const destinationStats = await mfs.stat(destination) expect(destinationStats.size).to.equal(100) @@ -211,9 +222,7 @@ describe('cp', () => { // should still be a sharded directory expect((await mfs.stat(finalShardedDirPath)).type).to.equal('hamt-sharded-directory') - const files = await mfs.ls(finalShardedDirPath, { - long: true - }) + const files = await streamToArray(mfs.ls(finalShardedDirPath)) expect(files.length).to.be.ok() }) diff --git a/test/flush.spec.js b/test/flush.spec.js index d9176f1..fced064 100644 --- a/test/flush.spec.js +++ b/test/flush.spec.js @@ -4,9 +4,7 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const { - createMfs -} = require('./helpers') +const createMfs = require('./helpers/create-mfs') describe('flush', () => { let mfs diff --git a/test/helpers/cid-at-path.js b/test/helpers/cid-at-path.js index 830aa54..c0e10c8 100644 --- a/test/helpers/cid-at-path.js +++ b/test/helpers/cid-at-path.js @@ -1,23 +1,22 @@ 'use strict' -const CID = require('cids') -const { - toPathComponents -} = require('../../src/core/utils') +const toPathComponents = require('../../src/core/utils/to-path-components') module.exports = async (path, mfs) => { const parts = toPathComponents(path) const fileName = parts.pop() const directory = `/${parts.join('/')}` - const files = (await mfs.ls(directory, { + const files = [] + + for await (const file of mfs.ls(directory, { long: true - })) + })) { + files.push(file) + } const file = files .filter(file => file.name === fileName) .pop() - return new CID( - file.hash - ) + return file.cid } diff --git a/test/helpers/collect-leaf-cids.js b/test/helpers/collect-leaf-cids.js deleted file mode 100644 index d71a9c7..0000000 --- a/test/helpers/collect-leaf-cids.js +++ /dev/null @@ -1,40 +0,0 @@ -'use strict' - -const pull = require('pull-stream') -const traverse = require('pull-traverse') -const CID = require('cids') - -module.exports = (mfs, multihash) => { - return new Promise((resolve, reject) => { - pull( - traverse.depthFirst(new CID(multihash), (cid) => { - return pull( - pull.values([cid]), - pull.asyncMap((cid, callback) => { - mfs.ipld.get(cid, (error, result) => { - callback(error, !error && result.value) - }) - }), - pull.asyncMap((node, callback) => { - if (!node.links) { - return callback() - } - - return callback( - null, node.links.map(link => link.cid) - ) - }), - pull.filter(Boolean), - pull.flatten() - ) - }), - pull.collect((error, cids) => { - if (error) { - return reject(error) - } - - resolve(cids) - }) - ) - }) -} diff --git a/test/helpers/constants.js b/test/helpers/constants.js new file mode 100644 index 0000000..fbe8c53 --- /dev/null +++ b/test/helpers/constants.js @@ -0,0 +1,6 @@ +'use strict' + +module.exports = { + EMPTY_DIRECTORY_HASH: 'QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn', + EMPTY_DIRECTORY_HASH_BASE32: 'bafybeiczsscdsbs7ffqz55asqdf3smv6klcw3gofszvwlyarci47bgf354' +} diff --git a/test/helpers/create-mfs.js b/test/helpers/create-mfs.js new file mode 100644 index 0000000..cb52642 --- /dev/null +++ b/test/helpers/create-mfs.js @@ -0,0 +1,56 @@ +'use strict' + +const core = require('../../src/core') +const isWebWorker = require('detect-webworker') +const promisify = require('promisify-es6') +const { + MemoryDatastore +} = require('interface-datastore') +const Ipld = require('ipld') +const Repo = require('ipfs-repo') +const BlockService = require('ipfs-block-service') + +const createMfs = async () => { + let repo = new Repo(`test-repo-${Date.now()}`, { + lock: 'memory', + storageBackends: { + root: MemoryDatastore, + blocks: MemoryDatastore, + keys: MemoryDatastore, + datastore: MemoryDatastore + } + }) + + repo.init = promisify(repo.init, { + context: repo + }) + repo.open = promisify(repo.open, { + context: repo + }) + + await repo.init({}) + await repo.open() + + const bs = new BlockService(repo) + + const ipld = new Ipld({ + blockService: bs + }) + + const mfs = core({ + ipld, + datastore: repo.datastore, + blocks: bs, + + // https://github.com/Joris-van-der-Wel/karma-mocha-webworker/issuses/4 + // There is no IPFS node running on the main thread so run it on the + // worker along with the tests + repoOwner: isWebWorker + }) + + mfs.ipld = ipld + + return mfs +} + +module.exports = createMfs diff --git a/test/helpers/create-shard.js b/test/helpers/create-shard.js index dc2cbcf..34fc36c 100644 --- a/test/helpers/create-shard.js +++ b/test/helpers/create-shard.js @@ -1,31 +1,16 @@ 'use strict' -const pull = require('pull-stream/pull') -const values = require('pull-stream/sources/values') -const collect = require('pull-stream/sinks/collect') const importer = require('ipfs-unixfs-importer') -const CID = require('cids') +const last = require('async-iterator-last') -const createShard = (ipld, files, shardSplitThreshold = 10) => { - return new Promise((resolve, reject) => { - pull( - values(files), - importer(ipld, { - shardSplitThreshold, - reduceSingleLeafToSelf: false, // same as go-ipfs-mfs implementation, differs from `ipfs add`(!) - leafType: 'raw' // same as go-ipfs-mfs implementation, differs from `ipfs add`(!) - }), - collect((err, files) => { - if (err) { - return reject(err) - } +const createShard = async (ipld, files, shardSplitThreshold = 10) => { + let result = await last(importer(files, ipld, { + shardSplitThreshold, + reduceSingleLeafToSelf: false, // same as go-ipfs-mfs implementation, differs from `ipfs add`(!) + leafType: 'raw' // same as go-ipfs-mfs implementation, differs from `ipfs add`(!) + })) - const dir = files[files.length - 1] - - resolve(new CID(dir.multihash)) - }) - ) - }) + return result.cid } module.exports = createShard diff --git a/test/helpers/create-sharded-directory.js b/test/helpers/create-sharded-directory.js index 8730dbd..076fdac 100644 --- a/test/helpers/create-sharded-directory.js +++ b/test/helpers/create-sharded-directory.js @@ -14,6 +14,7 @@ module.exports = async (mfs, shardSplitThreshold = 10, files = shardSplitThresho await mfs.cp(`/ipfs/${cid.toBaseEncodedString()}`, dirPath) + expect((await mfs.stat(`/ipfs/${cid.toBaseEncodedString()}`)).type).to.equal('hamt-sharded-directory') expect((await mfs.stat(dirPath)).type).to.equal('hamt-sharded-directory') return dirPath diff --git a/test/helpers/index.js b/test/helpers/index.js deleted file mode 100644 index d817c27..0000000 --- a/test/helpers/index.js +++ /dev/null @@ -1,48 +0,0 @@ -'use strict' - -const core = require('../../src/core') -const isWebWorker = require('detect-webworker') -const promisify = require('promisify-es6') -const InMemoryDataStore = require('interface-datastore').MemoryDatastore -const Ipld = require('ipld') -const inMemoryIpld = promisify(require('ipld-in-memory').bind(null, Ipld)) - -const createMfs = async () => { - let ipld = await inMemoryIpld() - let datastore = new InMemoryDataStore() - - const mfs = core({ - ipld, - repo: { - datastore - }, - - // https://github.com/Joris-van-der-Wel/karma-mocha-webworker/issues/4 - // There is no IPFS node running on the main thread so run it on the - // worker along with the tests - repoOwner: isWebWorker - }) - - // to allow tests to verify information - mfs.ipld = { - get: promisify(ipld.get.bind(ipld)), - getMany: promisify(ipld.getMany.bind(ipld)), - put: promisify(ipld.put.bind(ipld)) - } - mfs.datastore = datastore - - return mfs -} - -module.exports = { - createMfs, - cidAtPath: require('./cid-at-path'), - collectLeafCids: require('./collect-leaf-cids'), - createShard: require('./create-shard'), - createShardedDirectory: require('./create-sharded-directory'), - createTwoShards: require('./create-two-shards'), - findTreeWithDepth: require('./find-tree-with-depth'), - printTree: require('./print-tree'), - EMPTY_DIRECTORY_HASH: 'QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn', - EMPTY_DIRECTORY_HASH_BASE32: 'bafybeiczsscdsbs7ffqz55asqdf3smv6klcw3gofszvwlyarci47bgf354' -} diff --git a/test/helpers/print-tree.js b/test/helpers/print-tree.js index 34bce64..4a795b4 100644 --- a/test/helpers/print-tree.js +++ b/test/helpers/print-tree.js @@ -1,21 +1,9 @@ 'use strict' -const load = async (cid, ipld) => { - return new Promise((resolve, reject) => { - ipld.get(cid, (err, res) => { - if (err) { - return reject(err) - } - - resolve(res.value) - }) - }) -} - const printTree = async (ipld, cid, indentation = '', name = '') => { console.info(indentation, name, cid.toBaseEncodedString()) // eslint-disable-line no-console - const node = await load(cid, ipld) + const node = await ipld.get(cid) const fileLinks = node.links .filter(link => link.name) diff --git a/test/helpers/random-bytes.js b/test/helpers/random-bytes.js deleted file mode 100644 index 7ed4191..0000000 --- a/test/helpers/random-bytes.js +++ /dev/null @@ -1,21 +0,0 @@ -'use strict' - -const crypto = require('crypto') -const MAX_BYTES = 65536 - -// One day this will be merged: https://github.com/crypto-browserify/randombytes/pull/16 -module.exports = function randomBytes (num) { - const bytes = Buffer.allocUnsafe(num) - - for (let offset = 0; offset < num; offset += MAX_BYTES) { - let size = MAX_BYTES - - if ((offset + size) > num) { - size = num - offset - } - - crypto.randomFillSync(bytes, offset, size) - } - - return bytes -} diff --git a/test/helpers/stream-to-array.js b/test/helpers/stream-to-array.js new file mode 100644 index 0000000..882930e --- /dev/null +++ b/test/helpers/stream-to-array.js @@ -0,0 +1,11 @@ +'use strict' + +module.exports = async (stream) => { + const arr = [] + + for await (const entry of stream) { + arr.push(entry) + } + + return arr +} diff --git a/test/helpers/stream-to-buffer.js b/test/helpers/stream-to-buffer.js new file mode 100644 index 0000000..caab7c8 --- /dev/null +++ b/test/helpers/stream-to-buffer.js @@ -0,0 +1,11 @@ +'use strict' + +module.exports = async (stream) => { + let buffer = Buffer.alloc(0) + + for await (const buf of stream) { + buffer = Buffer.concat([buffer, buf], buffer.length + buf.length) + } + + return buffer +} diff --git a/test/helpers/traverse-leaf-nodes.js b/test/helpers/traverse-leaf-nodes.js new file mode 100644 index 0000000..aa34ff6 --- /dev/null +++ b/test/helpers/traverse-leaf-nodes.js @@ -0,0 +1,20 @@ +'use strict' + +module.exports = async function * traverseLeafNodes (mfs, cid) { + async function * traverse (cid) { + const node = await mfs.ipld.get(cid) + + if (Buffer.isBuffer(node) || !node.links.length) { + yield { + node, + cid + } + + return + } + + node.links.forEach(link => traverse(link.cid)) + } + + return traverse(cid) +} diff --git a/test/ls.spec.js b/test/ls.spec.js index 60887b4..52c1a6f 100644 --- a/test/ls.spec.js +++ b/test/ls.spec.js @@ -4,325 +4,201 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const pull = require('pull-stream/pull') -const collect = require('pull-stream/sinks/collect') -const randomBytes = require('./helpers/random-bytes') const CID = require('cids') const { FILE_TYPES } = require('../src') - -const { - createMfs, - createShardedDirectory -} = require('./helpers') +const createMfs = require('./helpers/create-mfs') +const createShardedDirectory = require('./helpers/create-sharded-directory') +const streamToArray = require('./helpers/stream-to-array') +const crypto = require('crypto') describe('ls', () => { let mfs - let largeFile = randomBytes(490668) + let largeFile = crypto.randomBytes(490668) before(async () => { mfs = await createMfs() }) - const methods = [{ - name: 'ls', - ls: function () { - return mfs.ls.apply(mfs, arguments) - }, - collect: (entries) => entries - }, { - name: 'lsPullStream', - ls: function () { - return Promise.resolve(mfs.lsPullStream.apply(mfs, arguments)) - }, - collect: (stream) => { - return new Promise((resolve, reject) => { - pull( - stream, - collect((error, entries) => { - if (error) { - return reject(error) - } - - resolve(entries) - }) - ) - }) + it('lists the root directory by default', async () => { + const fileName = `small-file-${Math.random()}.txt` + const content = Buffer.from('Hello world') + + await mfs.write(`/${fileName}`, content, { + create: true + }) + + const files = await streamToArray(mfs.ls()) + + expect(files.find(file => file.name === fileName)).to.be.ok() + }) + + it('refuses to lists files with an empty path', async () => { + try { + for await (const _ of mfs.ls('')) { // eslint-disable-line no-unused-vars + // falala + } + + throw new Error('No error was thrown for an empty path') + } catch (err) { + expect(err.code).to.equal('ENOPATH') } - }, { - name: 'lsReadableStream', - ls: function () { - return Promise.resolve(mfs.lsReadableStream.apply(mfs, arguments)) - }, - collect: (stream) => { - return new Promise((resolve, reject) => { - let entries = [] - - stream.on('data', (entry) => { - entries.push(entry) - }) - - stream.on('end', (entry) => { - if (entry) { - entries.push(entry) - } - - resolve(entries) - }) - - stream.on('error', (error) => { - reject(error) - }) - }) + }) + + it('refuses to lists files with an invalid path', async () => { + try { + for await (const _ of mfs.ls('not-valid')) { // eslint-disable-line no-unused-vars + // falala + } + + throw new Error('No error was thrown for an empty path') + } catch (err) { + expect(err.code).to.equal('EINVALIDPATH') } - }] - - methods.forEach(method => { - describe(`ls ${method.name}`, () => { - it('lists the root directory by default', async () => { - const fileName = `small-file-${Math.random()}.txt` - const content = Buffer.from('Hello world') - - await mfs.write(`/${fileName}`, content, { - create: true - }) - const result = await method.ls() - const files = await method.collect(result) - - expect(files.find(file => file.name === fileName)).to.be.ok() - }) - - it('refuses to lists files with an empty path', async () => { - try { - await method.collect(await method.ls('')) - throw new Error('No error was thrown for an empty path') - } catch (err) { - expect(err.message).to.contain('paths must not be empty') - } - }) - - it('refuses to lists files with an invalid path', async () => { - try { - await method.collect(await method.ls('not-valid')) - throw new Error('No error was thrown for an empty path') - } catch (err) { - expect(err.message).to.contain('paths must start with a leading /') - } - }) - - it('lists files in a directory', async () => { - const dirName = `dir-${Math.random()}` - const fileName = `small-file-${Math.random()}.txt` - const content = Buffer.from('Hello world') - - await mfs.write(`/${dirName}/${fileName}`, content, { - create: true, - parents: true - }) - - const stream = await method.ls(`/${dirName}`, {}) - const files = await method.collect(stream) - - expect(files.length).to.equal(1) - expect(files[0].name).to.equal(fileName) - expect(files[0].type).to.equal(FILE_TYPES.file) - expect(files[0].size).to.equal(0) - expect(files[0].hash).to.equal('') - }) - - it('lists files in a directory with meta data', async () => { - const dirName = `dir-${Math.random()}` - const fileName = `small-file-${Math.random()}.txt` - const content = Buffer.from('Hello world') - - await mfs.write(`/${dirName}/${fileName}`, content, { - create: true, - parents: true - }) - - const stream = await method.ls(`/${dirName}`, { - long: true - }) - const files = await method.collect(stream) - - expect(files.length).to.equal(1) - expect(files[0].name).to.equal(fileName) - expect(files[0].type).to.equal(FILE_TYPES.file) - expect(files[0].size).to.equal(content.length) - }) - - it('lists a file', async () => { - const fileName = `small-file-${Math.random()}.txt` - const content = Buffer.from('Hello world') - - await mfs.write(`/${fileName}`, content, { - create: true - }) - - const stream = await method.ls(`/${fileName}`) - const files = await method.collect(stream) - - expect(files.length).to.equal(1) - expect(files[0].name).to.equal(fileName) - expect(files[0].type).to.equal(FILE_TYPES.file) - expect(files[0].size).to.equal(0) - expect(files[0].hash).to.equal('') - }) - - it('lists a file with meta data', async () => { - const fileName = `small-file-${Math.random()}.txt` - const content = Buffer.from('Hello world') - - await mfs.write(`/${fileName}`, content, { - create: true - }) - const stream = await method.ls(`/${fileName}`, { - long: true - }) - const files = await method.collect(stream) - - expect(files.length).to.equal(1) - expect(files[0].name).to.equal(fileName) - expect(files[0].type).to.equal(FILE_TYPES.file) - expect(files[0].size).to.equal(content.length) - }) - - it('lists a file with a base32 hash', async () => { - const fileName = `small-file-${Math.random()}.txt` - const content = Buffer.from('Hello world') - - await mfs.write(`/${fileName}`, content, { - create: true - }) - - const stream = await method.ls(`/${fileName}`, { - long: true, - cidBase: 'base32' - }) - const files = await method.collect(stream) - - expect(files.length).to.equal(1) - expect(files[0].name).to.equal(fileName) - expect(files[0].type).to.equal(FILE_TYPES.file) - expect(files[0].size).to.equal(content.length) - expect(files[0].hash.startsWith('b')).to.equal(true) - }) - - it('fails to list non-existent file', async () => { - try { - const stream = await method.ls('/i-do-not-exist') - await method.collect(stream) - throw new Error('No error was thrown for a non-existent file') - } catch (err) { - expect(err.message).to.contain('does not exist') - } - }) - - it('lists a raw node', async () => { - const filePath = '/stat/large-file.txt' - - await mfs.write(filePath, largeFile, { - create: true, - parents: true, - rawLeaves: true - }) - - const stats = await mfs.stat(filePath) - const result = await mfs.ipld.get(new CID(stats.hash)) - const node = result.value - const child = node.links[0] - - expect(child.cid.codec).to.equal('raw') - - const rawNodeContents = await mfs.ls(`/ipfs/${child.cid}/`, { - long: true - }) - - expect(rawNodeContents[0].type).to.equal(0) // this is what go does - expect(rawNodeContents[0].hash).to.equal(child.cid.toBaseEncodedString()) - }) - - it('lists a raw node in an mfs directory', async () => { - const filePath = '/stat/large-file.txt' - - await mfs.write(filePath, largeFile, { - create: true, - parents: true, - rawLeaves: true - }) - - const stats = await mfs.stat(filePath) - const cid = new CID(stats.hash) - const result = await mfs.ipld.get(cid) - const node = result.value - const child = node.links[0] - - expect(child.cid.codec).to.equal('raw') - - const dir = `/dir-with-raw-${Date.now()}` - const path = `${dir}/raw-${Date.now()}` - - await mfs.mkdir(dir) - await mfs.cp(`/ipfs/${child.cid.toBaseEncodedString()}`, path) - - const rawNodeContents = await mfs.ls(path, { - long: true - }) - - expect(rawNodeContents[0].type).to.equal(0) // this is what go does - expect(rawNodeContents[0].hash).to.equal(child.cid.toBaseEncodedString()) - }) - - it('lists a sharded directory contents', async () => { - const shardSplitThreshold = 10 - const fileCount = 11 - const dirPath = await createShardedDirectory(mfs, shardSplitThreshold, fileCount) - - const files = await method.collect(await method.ls(dirPath, { - long: true - })) - - expect(files.length).to.equal(fileCount) - - files.forEach(file => { - // should be a file - expect(file.type).to.equal(0) - }) - }) - - it('lists a file inside a sharded directory directly', async () => { - const dirPath = await createShardedDirectory(mfs) - - const files = await method.collect(await method.ls(dirPath, { - long: true - })) - - const filePath = `${dirPath}/${files[0].name}` - - // should be able to ls new file directly - expect(await method.collect(await method.ls(filePath, { - long: true - }))).to.not.be.empty() - }) - - it('lists the contents of a directory inside a sharded directory', async () => { - const shardedDirPath = await createShardedDirectory(mfs) - const dirPath = `${shardedDirPath}/subdir-${Math.random()}` - const fileName = `small-file-${Math.random()}.txt` - - await mfs.mkdir(`${dirPath}`) - await mfs.write(`${dirPath}/${fileName}`, Buffer.from([0, 1, 2, 3]), { - create: true - }) - - const files = await method.collect(await method.ls(dirPath, { - long: true - })) - - expect(files.length).to.equal(1) - expect(files.filter(file => file.name === fileName)).to.be.ok() - }) + }) + + it('lists files in a directory', async () => { + const dirName = `dir-${Math.random()}` + const fileName = `small-file-${Math.random()}.txt` + const content = Buffer.from('Hello world') + + await mfs.write(`/${dirName}/${fileName}`, content, { + create: true, + parents: true + }) + + const files = await streamToArray(mfs.ls(`/${dirName}`)) + + expect(files.find(file => file.name === fileName)).to.be.ok() + expect(files.length).to.equal(1) + expect(files[0].name).to.equal(fileName) + expect(files[0].type).to.equal(FILE_TYPES.file) + expect(files[0].size).to.equal(content.length) + expect(CID.isCID(files[0].cid)).to.be.ok() + }) + + it('lists a file', async () => { + const fileName = `small-file-${Math.random()}.txt` + const content = Buffer.from('Hello world') + + await mfs.write(`/${fileName}`, content, { + create: true + }) + + const files = await streamToArray(mfs.ls(`/${fileName}`)) + + expect(files.length).to.equal(1) + expect(files[0].name).to.equal(fileName) + expect(files[0].type).to.equal(FILE_TYPES.file) + expect(files[0].size).to.equal(content.length) + expect(CID.isCID(files[0].cid)).to.be.ok() + }) + + it('fails to list non-existent file', async () => { + try { + for await (const _ of mfs.ls('/i-do-not-exist')) { // eslint-disable-line no-unused-vars + // falala + } + + throw new Error('No error was thrown for a non-existent file') + } catch (err) { + expect(err.code).to.equal('ERR_NOT_FOUND') + } + }) + + it('lists a raw node', async () => { + const filePath = '/stat/large-file.txt' + + await mfs.write(filePath, largeFile, { + create: true, + parents: true, + rawLeaves: true }) + + const stats = await mfs.stat(filePath) + const node = await mfs.ipld.get(stats.cid) + const child = node.links[0] + + expect(child.cid.codec).to.equal('raw') + + const files = await streamToArray(mfs.ls(`/ipfs/${child.cid}`)) + + expect(files.length).to.equal(1) + expect(files[0].type).to.equal(0) // this is what go does + expect(files[0].cid.toBaseEncodedString()).to.equal(child.cid.toBaseEncodedString()) + }) + + it('lists a raw node in an mfs directory', async () => { + const filePath = '/stat/large-file.txt' + + await mfs.write(filePath, largeFile, { + create: true, + parents: true, + rawLeaves: true + }) + + const stats = await mfs.stat(filePath) + const cid = stats.cid + const node = await mfs.ipld.get(cid) + const child = node.links[0] + + expect(child.cid.codec).to.equal('raw') + + const dir = `/dir-with-raw-${Date.now()}` + const path = `${dir}/raw-${Date.now()}` + + await mfs.mkdir(dir) + await mfs.cp(`/ipfs/${child.cid.toBaseEncodedString()}`, path) + + const files = await streamToArray(mfs.ls(`/ipfs/${child.cid}`)) + + expect(files.length).to.equal(1) + expect(files[0].type).to.equal(0) // this is what go does + expect(files[0].cid.toBaseEncodedString()).to.equal(child.cid.toBaseEncodedString()) + }) + + it('lists a sharded directory contents', async () => { + const shardSplitThreshold = 10 + const fileCount = 11 + const dirPath = await createShardedDirectory(mfs, shardSplitThreshold, fileCount) + + const files = await streamToArray(mfs.ls(dirPath)) + + expect(files.length).to.equal(fileCount) + + files.forEach(file => { + // should be a file + expect(file.type).to.equal(0) + }) + }) + + it('lists a file inside a sharded directory directly', async () => { + const dirPath = await createShardedDirectory(mfs) + const files = await streamToArray(mfs.ls(dirPath)) + + const filePath = `${dirPath}/${files[0].name}` + + // should be able to ls new file directly + const file = await streamToArray(mfs.ls(filePath)) + + expect(file.length).to.equal(1) + expect(file[0].name).to.equal(files[0].name) + }) + + it('lists the contents of a directory inside a sharded directory', async () => { + const shardedDirPath = await createShardedDirectory(mfs) + const dirPath = `${shardedDirPath}/subdir-${Math.random()}` + const fileName = `small-file-${Math.random()}.txt` + + await mfs.mkdir(`${dirPath}`) + await mfs.write(`${dirPath}/${fileName}`, Buffer.from([0, 1, 2, 3]), { + create: true + }) + + const files = await streamToArray(mfs.ls(dirPath)) + + expect(files.length).to.equal(1) + expect(files.filter(file => file.name === fileName)).to.be.ok() }) }) diff --git a/test/mkdir.spec.js b/test/mkdir.spec.js index 34860a1..fd81b54 100644 --- a/test/mkdir.spec.js +++ b/test/mkdir.spec.js @@ -5,11 +5,10 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect const multihash = require('multihashes') -const { - createMfs, - cidAtPath, - createShardedDirectory -} = require('./helpers') +const createMfs = require('./helpers/create-mfs') +const cidAtPath = require('./helpers/cid-at-path') +const createShardedDirectory = require('./helpers/create-sharded-directory') +const all = require('async-iterator-all') describe('mkdir', () => { let mfs @@ -32,7 +31,7 @@ describe('mkdir', () => { await mfs.mkdir('foo') throw new Error('No error was thrown when creating an directory with no leading slash') } catch (err) { - expect(err.message).to.contain('paths must start with a leading /') + expect(err.code).to.equal('EINVALIDPATH') } }) @@ -66,7 +65,8 @@ describe('mkdir', () => { const stats = await mfs.stat(path) expect(stats.type).to.equal('directory') - const files = await mfs.ls(path) + const files = await all(mfs.ls(path)) + expect(files.length).to.equal(0) }) @@ -81,9 +81,10 @@ describe('mkdir', () => { await mfs.mkdir(path, { parents: false }) + throw new Error('Did not refuse to create a path that already exists') } catch (err) { - expect(err.message).to.contain('file already exists') + expect(err.code).to.contain('EALREADYEXISTS') } }) @@ -106,11 +107,21 @@ describe('mkdir', () => { parents: true }) - const files = await mfs.ls(path) + const files = await all(mfs.ls(path)) expect(files.length).to.equal(0) }) + it.only('creates a nested directories', async () => { + await mfs.mkdir('/foo') + await mfs.mkdir('/bar') + await mfs.mkdir('/bar/baz') + + const files = await all(mfs.ls('/bar')) + + expect(files.length).to.equal(1) + }) + it('creates a nested directory with a different CID version to the parent', async () => { const directory = `cid-versions-${Math.random()}` const directoryPath = `/${directory}` diff --git a/test/mv.spec.js b/test/mv.spec.js index 7e9641f..422c3c7 100644 --- a/test/mv.spec.js +++ b/test/mv.spec.js @@ -4,11 +4,10 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const bufferStream = require('pull-buffer-stream') -const { - createMfs, - createShardedDirectory -} = require('./helpers') +const createMfs = require('./helpers/create-mfs') +const createShardedDirectory = require('./helpers/create-sharded-directory') +const streamToBuffer = require('./helpers/stream-to-buffer') +const crypto = require('crypto') describe('mv', () => { let mfs @@ -38,18 +37,14 @@ describe('mv', () => { it('moves a file', async () => { const source = `/source-file-${Math.random()}.txt` const destination = `/dest-file-${Math.random()}.txt` - let data = Buffer.alloc(0) + let data = crypto.randomBytes(500) - await mfs.write(source, bufferStream(500, { - collector: (bytes) => { - data = Buffer.concat([data, bytes]) - } - }), { + await mfs.write(source, data, { create: true }) await mfs.mv(source, destination) - const buffer = await mfs.read(destination) + const buffer = await streamToBuffer(mfs.read(destination)) expect(buffer).to.deep.equal(data) try { diff --git a/test/read.spec.js b/test/read.spec.js index 3f1af26..b525cf1 100644 --- a/test/read.spec.js +++ b/test/read.spec.js @@ -4,231 +4,148 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const bufferStream = require('pull-buffer-stream') -const pull = require('pull-stream/pull') -const collect = require('pull-stream/sinks/collect') -const { - createMfs, - createShardedDirectory -} = require('./helpers') -const randomBytes = require('./helpers/random-bytes') +const createMfs = require('./helpers/create-mfs') +const createShardedDirectory = require('./helpers/create-sharded-directory') +const crypto = require('crypto') +const streamToBuffer = require('./helpers/stream-to-buffer') describe('read', () => { let mfs - let smallFile = randomBytes(13) + let smallFile = crypto.randomBytes(13) before(async () => { mfs = await createMfs() }) - const methods = [{ - name: 'read', - read: function () { - return mfs.read.apply(mfs, arguments) - }, - collect: (buffer) => buffer - }, { - name: 'readPullStream', - read: function () { - return Promise.resolve(mfs.readPullStream.apply(mfs, arguments)) - }, - collect: (stream) => { - return new Promise((resolve, reject) => { - pull( - stream, - collect((err, buffers) => { - if (err) { - return reject(err) - } - - resolve(Buffer.concat(buffers)) - }) - ) - }) - } - }, { - name: 'readReadableStream', - read: function () { - return Promise.resolve(mfs.readReadableStream.apply(mfs, arguments)) - }, - collect: (stream) => { - return new Promise((resolve, reject) => { - let data = Buffer.alloc(0) - - stream.on('data', (buffer) => { - data = Buffer.concat([data, buffer]) - }) - - stream.on('end', () => { - resolve(data) - }) - - stream.on('error', (err) => { - reject(err) - }) + describe(`read`, () => { + it('reads a small file', async () => { + const filePath = '/small-file.txt' + + await mfs.write(filePath, smallFile, { + create: true }) - } - }] - methods.forEach(method => { - describe(`read ${method.name}`, () => { - it('reads a small file', async () => { - const filePath = '/small-file.txt' + const buffer = await streamToBuffer(mfs.read(filePath)) - await mfs.write(filePath, smallFile, { - create: true - }) + expect(buffer).to.deep.equal(smallFile) + }) - const result = await method.read(filePath) - const buffer = await method.collect(result) - expect(buffer).to.deep.equal(smallFile) - }) + it('reads a file with an offset', async () => { + const path = `/some-file-${Math.random()}.txt` + let data = crypto.randomBytes(100) + const offset = 10 - it('reads a file with an offset', async () => { - const path = `/some-file-${Math.random()}.txt` - let data = Buffer.alloc(0) - const offset = 10 - - await mfs.write(path, bufferStream(100, { - collector: (bytes) => { - data = Buffer.concat([data, bytes]) - } - }), { - create: true - }) - - const result = await method.read(path, { - offset - }) - const buffer = await method.collect(result) - - expect(buffer).to.deep.equal(data.slice(offset)) + await mfs.write(path, data, { + create: true }) - it('reads a file with a length', async () => { - const path = `/some-file-${Math.random()}.txt` - let data = Buffer.alloc(0) - const length = 10 - - await mfs.write(path, bufferStream(100, { - collector: (bytes) => { - data = Buffer.concat([data, bytes]) - } - }), { - create: true - }) - - const result = await method.read(path, { - length - }) - const buffer = await method.collect(result) - - expect(buffer).to.deep.equal(data.slice(0, length)) - }) + const buffer = await streamToBuffer(mfs.read(path, { + offset + })) - it('reads a file with a legacy count argument', async () => { - const path = `/some-file-${Math.random()}.txt` - let data = Buffer.alloc(0) - const length = 10 - - await mfs.write(path, bufferStream(100, { - collector: (bytes) => { - data = Buffer.concat([data, bytes]) - } - }), { - create: true - }) - - const result = await method.read(path, { - count: length - }) - const buffer = await method.collect(result) - - expect(buffer).to.deep.equal(data.slice(0, length)) - }) + expect(buffer).to.deep.equal(data.slice(offset)) + }) - it('reads a file with an offset and a length', async () => { - const path = `/some-file-${Math.random()}.txt` - let data = Buffer.alloc(0) - const offset = 10 - const length = 10 - - await mfs.write(path, bufferStream(100, { - collector: (bytes) => { - data = Buffer.concat([data, bytes]) - } - }), { - create: true - }) - - const result = await method.read(path, { - offset, - length - }) - const buffer = await method.collect(result) - - expect(buffer).to.deep.equal(data.slice(offset, offset + length)) - }) + it('reads a file with a length', async () => { + const path = `/some-file-${Math.random()}.txt` + let data = crypto.randomBytes(100) + const length = 10 - it('reads a file with an offset and a legacy count argument', async () => { - const path = `/some-file-${Math.random()}.txt` - let data = Buffer.alloc(0) - const offset = 10 - const length = 10 + await mfs.write(path, data, { + create: true + }) - await mfs.write(path, bufferStream(100, { - collector: (bytes) => { - data = Buffer.concat([data, bytes]) - } - }), { - create: true - }) + const buffer = await streamToBuffer(mfs.read(path, { + length + })) - const result = await method.read(path, { - offset, - count: length - }) + expect(buffer).to.deep.equal(data.slice(0, length)) + }) - const buffer = await method.collect(result) + it('reads a file with a legacy count argument', async () => { + const path = `/some-file-${Math.random()}.txt` + let data = crypto.randomBytes(100) + const length = 10 - expect(buffer).to.deep.equal(data.slice(offset, offset + length)) + await mfs.write(path, data, { + create: true }) - it('refuses to read a directory', async () => { - const path = '/' + const buffer = await streamToBuffer(mfs.read(path, { + count: length + })) + + expect(buffer).to.deep.equal(data.slice(0, length)) + }) + + it('reads a file with an offset and a length', async () => { + const path = `/some-file-${Math.random()}.txt` + let data = crypto.randomBytes(100) + const offset = 10 + const length = 10 - try { - const result = await method.read(path) - await method.collect(result) - throw new Error('Should have errored on trying to read a directory') - } catch (err) { - expect(err.message).to.contain('was not a file') - } + await mfs.write(path, data, { + create: true }) - it('refuses to read a non-existent file', async () => { - try { - const stream = await method.read(`/file-${Math.random()}.txt`) - await method.collect(stream) - throw new Error('Should have errored on non-existent file') - } catch (err) { - expect(err.message).to.contain('does not exist') - } + const buffer = await streamToBuffer(mfs.read(path, { + offset, + length + })) + + expect(buffer).to.deep.equal(data.slice(offset, offset + length)) + }) + + it('reads a file with an offset and a legacy count argument', async () => { + const path = `/some-file-${Math.random()}.txt` + let data = crypto.randomBytes(100) + const offset = 10 + const length = 10 + + await mfs.write(path, data, { + create: true }) - it('reads file from inside a sharded directory', async () => { - const shardedDirPath = await createShardedDirectory(mfs) - const filePath = `${shardedDirPath}/file-${Math.random()}.txt` - const content = Buffer.from([0, 1, 2, 3, 4]) + const buffer = await streamToBuffer(mfs.read(path, { + offset, + count: length + })) + + expect(buffer).to.deep.equal(data.slice(offset, offset + length)) + }) + + it('refuses to read a directory', async () => { + const path = '/' + + try { + await streamToBuffer(mfs.read(path)) + throw new Error('Should have errored on trying to read a directory') + } catch (err) { + expect(err.code).to.equal('ENOTFILE') + } + }) - await mfs.write(filePath, content, { - create: true - }) + it('refuses to read a non-existent file', async () => { + try { + await streamToBuffer(mfs.read(`/file-${Math.random()}.txt`)) + throw new Error('Should have errored on non-existent file') + } catch (err) { + expect(err.code).to.equal('ERR_NOT_FOUND') + } + }) - const stream = await method.read(filePath) + it('reads file from inside a sharded directory', async () => { + const shardedDirPath = await createShardedDirectory(mfs) + const filePath = `${shardedDirPath}/file-${Math.random()}.txt` + const content = Buffer.from([0, 1, 2, 3, 4]) - expect(await method.collect(stream)).to.deep.equal(content) + await mfs.write(filePath, content, { + create: true }) + + const buffer = await streamToBuffer(mfs.read(filePath)) + + expect(buffer).to.deep.equal(content) }) }) }) diff --git a/test/rm.spec.js b/test/rm.spec.js index 08725ca..087475c 100644 --- a/test/rm.spec.js +++ b/test/rm.spec.js @@ -4,16 +4,14 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const bufferStream = require('pull-buffer-stream') const CID = require('cids') -const { - createMfs, - createShardedDirectory, - createTwoShards -} = require('./helpers') +const createMfs = require('./helpers/create-mfs') +const createShardedDirectory = require('./helpers/create-sharded-directory') +const createTwoShards = require('./helpers/create-two-shards') +const crypto = require('crypto') const { FILE_SEPARATOR -} = require('../src/core/utils') +} = require('../src/core/utils/constants') describe('rm', () => { let mfs @@ -27,7 +25,7 @@ describe('rm', () => { await mfs.rm() throw new Error('No error was thrown for missing paths') } catch (err) { - expect(err.message).to.contain('Please supply at least one path to remove') + expect(err.code).to.equal('EINVALIDPARAMS') } }) @@ -36,7 +34,7 @@ describe('rm', () => { await mfs.rm(FILE_SEPARATOR) throw new Error('No error was thrown for missing paths') } catch (err) { - expect(err.message).to.contain('Cannot delete root') + expect(err.code).to.equal('EINVALIDPARAMS') } }) @@ -49,7 +47,7 @@ describe('rm', () => { await mfs.rm(path) throw new Error('No error was thrown for missing recursive flag') } catch (err) { - expect(err.message).to.contain(`${path} is a directory, use -r to remove directories`) + expect(err.code).to.equal('EDIR') } }) @@ -58,14 +56,14 @@ describe('rm', () => { await mfs.rm(`/file-${Math.random()}`) throw new Error('No error was thrown for non-existent file') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('ERR_NOT_FOUND') } }) it('removes a file', async () => { const file = `/some-file-${Math.random()}.txt` - await mfs.write(file, bufferStream(100), { + await mfs.write(file, crypto.randomBytes(100), { create: true, parents: true }) @@ -86,11 +84,11 @@ describe('rm', () => { const file1 = `/some-file-${Math.random()}.txt` const file2 = `/some-file-${Math.random()}.txt` - await mfs.write(file1, bufferStream(100), { + await mfs.write(file1, crypto.randomBytes(100), { create: true, parents: true }) - await mfs.write(file2, bufferStream(100), { + await mfs.write(file2, crypto.randomBytes(100), { create: true, parents: true }) @@ -102,14 +100,14 @@ describe('rm', () => { await mfs.stat(file1) throw new Error('File #1 was not removed') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('ERR_NOT_FOUND') } try { await mfs.stat(file2) throw new Error('File #2 was not removed') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('ERR_NOT_FOUND') } }) @@ -125,7 +123,7 @@ describe('rm', () => { await mfs.stat(directory) throw new Error('Directory was not removed') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('ERR_NOT_FOUND') } }) @@ -142,17 +140,17 @@ describe('rm', () => { }) try { - await mfs.ls(subdirectory) + await mfs.stat(path) throw new Error('File was not removed') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('ERR_NOT_FOUND') } try { - await mfs.ls(directory) + await mfs.stat(directory) throw new Error('Directory was not removed') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('EERR_NOT_FOUNDNOLINK') } }) @@ -160,7 +158,7 @@ describe('rm', () => { const directory = `directory-${Math.random()}` const file = `/${directory}/some-file-${Math.random()}.txt` - await mfs.write(file, bufferStream(100), { + await mfs.write(file, crypto.randomBytes(100), { create: true, parents: true }) @@ -172,14 +170,14 @@ describe('rm', () => { await mfs.stat(file) throw new Error('File was not removed') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('ERR_NOT_FOUND') } try { await mfs.stat(`/${directory}`) throw new Error('Directory was not removed') } catch (err) { - expect(err.message).to.contain('does not exist') + expect(err.code).to.equal('ERR_NOT_FOUND') } }) @@ -203,15 +201,15 @@ describe('rm', () => { try { await mfs.stat(dirPath) throw new Error('Directory was not removed') - } catch (error) { - expect(error.message).to.contain('does not exist') + } catch (err) { + expect(err.code).to.equal('ERR_NOT_FOUND') } try { await mfs.stat(shardedDirPath) throw new Error('Directory was not removed') - } catch (error) { - expect(error.message).to.contain('does not exist') + } catch (err) { + expect(err.code).to.equal('ERR_NOT_FOUND') } }) @@ -233,15 +231,15 @@ describe('rm', () => { try { await mfs.stat(otherDirPath) throw new Error('Directory was not removed') - } catch (error) { - expect(error.message).to.contain('does not exist') + } catch (err) { + expect(err.code).to.equal('ERR_NOT_FOUND') } try { await mfs.stat(finalShardedDirPath) throw new Error('Directory was not removed') - } catch (error) { - expect(error.message).to.contain('does not exist') + } catch (err) { + expect(err.code).to.equal('ERR_NOT_FOUND') } }) @@ -260,7 +258,7 @@ describe('rm', () => { await mfs.rm(nextFile.path) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid expect(stats.type).to.equal('hamt-sharded-directory') expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithSomeFiles.toBaseEncodedString()) @@ -281,7 +279,7 @@ describe('rm', () => { await mfs.rm(nextFile.path) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid expect(stats.type).to.equal('hamt-sharded-directory') expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithSomeFiles.toBaseEncodedString()) @@ -302,7 +300,7 @@ describe('rm', () => { await mfs.rm(nextFile.path) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid expect(stats.type).to.equal('hamt-sharded-directory') expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithSomeFiles.toBaseEncodedString()) @@ -323,7 +321,7 @@ describe('rm', () => { await mfs.rm(nextFile.path) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid expect(stats.type).to.equal('hamt-sharded-directory') expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithSomeFiles.toBaseEncodedString()) diff --git a/test/stat.spec.js b/test/stat.spec.js index 633ed04..a8d9808 100644 --- a/test/stat.spec.js +++ b/test/stat.spec.js @@ -4,20 +4,19 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const randomBytes = require('./helpers/random-bytes') +const crypto = require('crypto') const CID = require('cids') - +const createMfs = require('./helpers/create-mfs') +const createShardedDirectory = require('./helpers/create-sharded-directory') const { - createMfs, - createShardedDirectory, EMPTY_DIRECTORY_HASH, EMPTY_DIRECTORY_HASH_BASE32 -} = require('./helpers') +} = require('./helpers/constants') describe('stat', () => { let mfs - let smallFile = randomBytes(13) - let largeFile = randomBytes(490668) + let smallFile = crypto.randomBytes(13) + let largeFile = crypto.randomBytes(490668) before(async () => { mfs = await createMfs() @@ -62,46 +61,6 @@ describe('stat', () => { expect(stats.type).to.equal('directory') }) - it('returns only a hash', async () => { - const path = `/directory-${Math.random()}` - - await mfs.mkdir(path) - - const stats = await mfs.stat(path, { - hash: true - }) - - expect(Object.keys(stats).length).to.equal(1) - expect(stats.hash).to.equal(EMPTY_DIRECTORY_HASH) - }) - - it('returns only a base32 hash', async () => { - const path = `/directory-${Math.random()}` - - await mfs.mkdir(path) - - const stats = await mfs.stat(path, { - hash: true, - cidBase: 'base32' - }) - - expect(Object.keys(stats).length).to.equal(1) - expect(stats.hash).to.equal(EMPTY_DIRECTORY_HASH_BASE32) - }) - - it('returns only the size', async () => { - const path = `/directory-${Math.random()}` - - await mfs.mkdir(path) - - const stats = await mfs.stat(path, { - size: true - }) - - expect(Object.keys(stats).length).to.equal(1) - expect(stats.size).to.equal(0) - }) - it.skip('computes how much of the DAG is local', async () => { }) @@ -136,24 +95,6 @@ describe('stat', () => { expect(stats.type).to.equal('file') }) - it('stats a large file with base32', async () => { - const filePath = '/stat/large-file.txt' - - await mfs.write(filePath, largeFile, { - create: true, - parents: true - }) - - const stats = await mfs.stat(filePath, { - cidBase: 'base32' - }) - expect(stats.hash.startsWith('b')).to.equal(true) - expect(stats.size).to.equal(largeFile.length) - expect(stats.cumulativeSize).to.equal(490800) - expect(stats.blocks).to.equal(2) - expect(stats.type).to.equal('file') - }) - it('stats a raw node', async () => { const filePath = '/stat/large-file.txt' @@ -164,15 +105,14 @@ describe('stat', () => { }) const stats = await mfs.stat(filePath) - const result = await mfs.ipld.get(new CID(stats.hash)) - const node = result.value + const node = await mfs.ipld.get(stats.cid) const child = node.links[0] expect(child.cid.codec).to.equal('raw') const rawNodeStats = await mfs.stat(`/ipfs/${child.cid.toBaseEncodedString()}`) - expect(rawNodeStats.hash).to.equal(child.cid.toBaseEncodedString()) + expect(rawNodeStats.cid.toBaseEncodedString()).to.equal(child.cid.toBaseEncodedString()) expect(rawNodeStats.type).to.equal('file') // this is what go does }) @@ -186,8 +126,7 @@ describe('stat', () => { }) const stats = await mfs.stat(filePath) - const result = await mfs.ipld.get(new CID(stats.hash)) - const node = result.value + const node = await mfs.ipld.get(stats.cid) const child = node.links[0] expect(child.cid.codec).to.equal('raw') @@ -200,7 +139,7 @@ describe('stat', () => { const rawNodeStats = await mfs.stat(path) - expect(rawNodeStats.hash).to.equal(child.cid.toBaseEncodedString()) + expect(rawNodeStats.cid.toBaseEncodedString()).to.equal(child.cid.toBaseEncodedString()) expect(rawNodeStats.type).to.equal('file') // this is what go does }) @@ -215,7 +154,12 @@ describe('stat', () => { it('stats a file inside a sharded directory', async () => { const shardedDirPath = await createShardedDirectory(mfs) - const files = await mfs.ls(`${shardedDirPath}`) + const files = [] + + for await (const file of mfs.ls(`${shardedDirPath}`)) { + files.push(file) + } + const stats = await mfs.stat(`${shardedDirPath}/${files[0].name}`) expect(stats.type).to.equal('file') diff --git a/test/write.spec.js b/test/write.spec.js index 7f3bce2..41f26cd 100644 --- a/test/write.spec.js +++ b/test/write.spec.js @@ -5,21 +5,16 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect const isNode = require('detect-node') -const values = require('pull-stream/sources/values') -const bufferStream = require('pull-buffer-stream') const multihash = require('multihashes') -const randomBytes = require('./helpers/random-bytes') const util = require('util') -const { - collectLeafCids, - createMfs, - cidAtPath, - createShardedDirectory, - createTwoShards, - createShard -} = require('./helpers') -const CID = require('cids') +const createMfs = require('./helpers/create-mfs') +const cidAtPath = require('./helpers/cid-at-path') +const traverseLeafNodes = require('./helpers/traverse-leaf-nodes') +const createShard = require('./helpers/create-shard') +const createShardedDirectory = require('./helpers/create-sharded-directory') +const createTwoShards = require('./helpers/create-two-shards') const crypto = require('crypto') +const all = require('async-iterator-all') let fs, tempWrite @@ -30,11 +25,10 @@ if (isNode) { describe('write', () => { let mfs - let smallFile = randomBytes(13) - let largeFile = randomBytes(490668) + let smallFile = crypto.randomBytes(13) + let largeFile = crypto.randomBytes(490668) const runTest = (fn) => { - let i = 0 const iterations = 5 const files = [{ type: 'Small file', @@ -49,19 +43,12 @@ describe('write', () => { }, { type: 'Really large file', path: `/really-large-file-${Math.random()}.jpg`, - content: (end, callback) => { - if (end) { - return callback(end) + content: { + [Symbol.asyncIterator]: async function * () { + for (let i = 0; i < iterations; i++) { + yield largeFile + } } - - if (i === iterations) { - // Ugh. https://github.com/standard/standard/issues/623 - const foo = true - return callback(foo) - } - - i++ - callback(null, largeFile) }, contentSize: largeFile.length * iterations }] @@ -82,7 +69,7 @@ describe('write', () => { }) throw new Error('Did not fail to convert -1 into a pull stream source') } catch (err) { - expect(err.message).to.contain('Don\'t know how to convert -1 into a pull stream source') + expect(err.code).to.equal('EINVALIDPARAMS') } }) @@ -93,7 +80,7 @@ describe('write', () => { }) throw new Error('Did not object to invalid paths') } catch (err) { - expect(err.message).to.contain('paths must start with a leading /') + expect(err.code).to.equal('EINVALIDPATH') } }) @@ -104,7 +91,7 @@ describe('write', () => { }) throw new Error('Did not object to negative write offset') } catch (err) { - expect(err.message).to.contain('cannot have negative write offset') + expect(err.code).to.equal('EINVALIDPARAMS') } }) @@ -115,7 +102,7 @@ describe('write', () => { }) throw new Error('Did not object to negative byte count') } catch (err) { - expect(err.message).to.contain('cannot have negative byte count') + expect(err.code).to.equal('EINVALIDPARAMS') } }) @@ -125,9 +112,7 @@ describe('write', () => { create: true }) - const files = await mfs.ls('/', { - long: true - }) + const files = await all(mfs.ls('/')) expect(files.length).to.equal(1) expect(files[0].name).to.equal('foo.txt') @@ -199,18 +184,6 @@ describe('write', () => { expect(stats.size).to.equal(smallFile.length) }) - it('writes a small file using a pull stream source', async function () { - const filePath = `/small-file-${Math.random()}.txt` - - await mfs.write(filePath, values([smallFile]), { - create: true - }) - - const stats = await mfs.stat(filePath) - - expect(stats.size).to.equal(smallFile.length) - }) - it('writes a small file using an HTML5 Blob (Browser only)', async function () { if (!global.Blob) { return this.skip() @@ -310,7 +283,8 @@ describe('write', () => { length: 2 }) - const buffer = await mfs.read(path) + const buffer = Buffer.concat(await all(mfs.read(path))) + expect(buffer.length).to.equal(2) }) }) @@ -322,15 +296,19 @@ describe('write', () => { await mfs.write(path, content, { create: true }) + + expect((await mfs.stat(path)).size).to.equal(contentSize) + await mfs.write(path, newContent) const stats = await mfs.stat(path) expect(stats.size).to.equal(contentSize) - const buffer = await mfs.read(path, { + const buffer = Buffer.concat(await all(mfs.read(path, { offset: 0, length: newContent.length - }) + }))) + expect(buffer).to.deep.equal(newContent) }) }) @@ -347,10 +325,11 @@ describe('write', () => { const stats = await mfs.stat(path) expect(stats.size).to.equal(offset + contentSize) - const buffer = await mfs.read(path, { + const buffer = Buffer.concat(await all(mfs.read(path, { offset: 0, length: offset - }) + }))) + expect(buffer).to.deep.equal(Buffer.alloc(offset, 0)) }) }) @@ -371,9 +350,10 @@ describe('write', () => { const stats = await mfs.stat(path) expect(stats.size).to.equal(contentSize + newContent.length - 1) - const buffer = await mfs.read(path, { - offset - }) + const buffer = Buffer.concat(await all(mfs.read(path, { + offset: offset + }))) + expect(buffer).to.deep.equal(newContent) }) }) @@ -393,9 +373,10 @@ describe('write', () => { const stats = await mfs.stat(path) expect(stats.size).to.equal(newContent.length + offset) - const buffer = await mfs.read(path, { + const buffer = Buffer.concat(await all(mfs.read(path, { offset: offset - 5 - }) + }))) + expect(buffer).to.deep.equal(Buffer.concat([Buffer.from([0, 0, 0, 0, 0]), newContent])) }) }) @@ -414,50 +395,12 @@ describe('write', () => { const stats = await mfs.stat(path) expect(stats.size).to.equal(newContent.length) - const buffer = await mfs.read(path) - expect(buffer).to.deep.equal(newContent) - }) - }) - - runTest(({ type, path, content }) => { - it(`truncates a file after writing with a stream (${type})`, async () => { - const newContent = Buffer.from('Oh hai!') - const stream = values([newContent]) - - await mfs.write(path, content, { - create: true - }) - await mfs.write(path, stream, { - truncate: true - }) - - const stats = await mfs.stat(path) - expect(stats.size).to.equal(newContent.length) + const buffer = Buffer.concat(await all(mfs.read(path))) - const buffer = await mfs.read(path) expect(buffer).to.deep.equal(newContent) }) }) - runTest(({ type, path, content }) => { - it(`truncates a file after writing with a stream with an offset (${type})`, async () => { - const offset = 100 - const newContent = Buffer.from('Oh hai!') - const stream = values([newContent]) - - await mfs.write(path, content, { - create: true - }) - await mfs.write(path, stream, { - truncate: true, - offset - }) - - const stats = await mfs.stat(path) - expect(stats.size).to.equal(offset + newContent.length) - }) - }) - runTest(({ type, path, content }) => { it(`writes a file with raw blocks for newly created leaf nodes (${type})`, async () => { await mfs.write(path, content, { @@ -466,11 +409,10 @@ describe('write', () => { }) const stats = await mfs.stat(path) - const cids = await collectLeafCids(mfs, stats.hash) - const rawNodes = cids - .filter(cid => cid.codec === 'raw') - expect(rawNodes).to.not.be.empty() + for await (const { cid } of traverseLeafNodes(mfs, stats.cid)) { + expect(cid.codec).to.equal('raw') + } }) }) @@ -480,7 +422,7 @@ describe('write', () => { for (let i = 0; i < 10; i++) { files.push({ name: `source-file-${Math.random()}.txt`, - source: bufferStream(100) + source: crypto.randomBytes(100) }) } @@ -491,7 +433,7 @@ describe('write', () => { })) ) - const listing = await mfs.ls('/concurrent') + const listing = await all(mfs.ls('/concurrent')) expect(listing.length).to.equal(files.length) listing.forEach(listedFile => { @@ -500,18 +442,8 @@ describe('write', () => { }) it('rewrites really big files', async function () { - let expectedBytes = Buffer.alloc(0) - let originalBytes = Buffer.alloc(0) - const initialStream = bufferStream(1024 * 300, { - collector: (bytes) => { - originalBytes = Buffer.concat([originalBytes, bytes]) - } - }) - const newDataStream = bufferStream(1024 * 300, { - collector: (bytes) => { - expectedBytes = Buffer.concat([expectedBytes, bytes]) - } - }) + const initialStream = crypto.randomBytes(1024 * 300) + const newDataStream = crypto.randomBytes(1024 * 300) const fileName = `/rewrite/file-${Math.random()}.txt` @@ -524,19 +456,19 @@ describe('write', () => { offset: 0 }) - const actualBytes = await mfs.read(fileName) + const actualBytes = Buffer.concat(await all(mfs.read(fileName))) - for (var i = 0; i < expectedBytes.length; i++) { - if (expectedBytes[i] !== actualBytes[i]) { - if (originalBytes[i] === actualBytes[i]) { - throw new Error(`Bytes at index ${i} were not overwritten - expected ${expectedBytes[i]} actual ${originalBytes[i]}`) + for (var i = 0; i < newDataStream.length; i++) { + if (newDataStream[i] !== actualBytes[i]) { + if (initialStream[i] === actualBytes[i]) { + throw new Error(`Bytes at index ${i} were not overwritten - expected ${newDataStream[i]} actual ${initialStream[i]}`) } - throw new Error(`Bytes at index ${i} not equal - expected ${expectedBytes[i]} actual ${actualBytes[i]}`) + throw new Error(`Bytes at index ${i} not equal - expected ${newDataStream[i]} actual ${actualBytes[i]}`) } } - expect(actualBytes).to.deep.equal(expectedBytes) + expect(actualBytes).to.deep.equal(newDataStream) }) it('shards a large directory when writing too many links to it', async () => { @@ -565,9 +497,9 @@ describe('write', () => { expect((await mfs.stat(dirPath)).type).to.equal('hamt-sharded-directory') - const files = await mfs.ls(dirPath, { + const files = await all(mfs.ls(dirPath, { long: true - }) + })) // new file should be in directory expect(files.filter(file => file.name === newFile).pop()).to.be.ok() @@ -586,17 +518,17 @@ describe('write', () => { // should still be a sharded directory expect((await mfs.stat(shardedDirPath)).type).to.equal('hamt-sharded-directory') - const files = await mfs.ls(shardedDirPath, { + const files = await all(mfs.ls(shardedDirPath, { long: true - }) + })) // new file should be in the directory expect(files.filter(file => file.name === newFile).pop()).to.be.ok() // should be able to ls new file directly - expect(await mfs.ls(newFilePath, { + expect(await all(mfs.ls(newFilePath, { long: true - })).to.not.be.empty() + }))).to.not.be.empty() }) it('overwrites a file in a sharded directory when positions do not match', async () => { @@ -618,12 +550,14 @@ describe('write', () => { }) // read the file back - expect(await mfs.read(newFilePath)).to.deep.equal(newContent) + const buffer = Buffer.concat(await all(mfs.read(newFilePath))) + + expect(buffer).to.deep.equal(newContent) // should be able to ls new file directly - expect(await mfs.ls(newFilePath, { + expect(await all(mfs.ls(newFilePath, { long: true - })).to.not.be.empty() + }))).to.not.be.empty() }) it('overwrites file in a sharded directory', async () => { @@ -645,12 +579,14 @@ describe('write', () => { }) // read the file back - expect(await mfs.read(newFilePath)).to.deep.equal(newContent) + const buffer = Buffer.concat(await all(mfs.read(newFilePath))) + + expect(buffer).to.deep.equal(newContent) // should be able to ls new file directly - expect(await mfs.ls(newFilePath, { + expect(await all(mfs.ls(newFilePath, { long: true - })).to.not.be.empty() + }))).to.not.be.empty() }) it('overwrites a file in a subshard of a sharded directory', async () => { @@ -672,12 +608,14 @@ describe('write', () => { }) // read the file back - expect(await mfs.read(newFilePath)).to.deep.equal(newContent) + const buffer = Buffer.concat(await all(mfs.read(newFilePath))) + + expect(buffer).to.deep.equal(newContent) // should be able to ls new file directly - expect(await mfs.ls(newFilePath, { + expect(await all(mfs.ls(newFilePath, { long: true - })).to.not.be.empty() + }))).to.not.be.empty() }) it('writes a file with a different CID version to the parent', async () => { @@ -700,7 +638,7 @@ describe('write', () => { expect((await cidAtPath(filePath, mfs)).version).to.equal(1) - const actualBytes = await mfs.read(filePath) + const actualBytes = Buffer.concat(await all(mfs.read(filePath))) expect(actualBytes).to.deep.equal(expectedBytes) }) @@ -731,7 +669,7 @@ describe('write', () => { expect((await cidAtPath(filePath, mfs)).version).to.equal(1) - const actualBytes = await mfs.read(filePath) + const actualBytes = Buffer.concat(await all(mfs.read(filePath))) expect(actualBytes).to.deep.equal(expectedBytes) }) @@ -762,7 +700,7 @@ describe('write', () => { expect((await cidAtPath(filePath, mfs)).version).to.equal(1) - const actualBytes = await mfs.read(filePath) + const actualBytes = Buffer.concat(await all(mfs.read(filePath))) expect(actualBytes).to.deep.equal(Buffer.from([5, 0, 1, 2, 3, 10, 11])) }) @@ -788,7 +726,7 @@ describe('write', () => { expect(multihash.decode((await cidAtPath(filePath, mfs)).multihash).name).to.equal('sha2-512') - const actualBytes = await mfs.read(filePath) + const actualBytes = Buffer.concat(await all(mfs.read(filePath))) expect(actualBytes).to.deep.equal(expectedBytes) }) @@ -810,7 +748,7 @@ describe('write', () => { }) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid expect(stats.type).to.equal('hamt-sharded-directory') expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithAllFiles.toBaseEncodedString()) @@ -833,7 +771,7 @@ describe('write', () => { }) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithAllFiles.toBaseEncodedString()) }) @@ -855,7 +793,7 @@ describe('write', () => { }) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid expect(stats.type).to.equal('hamt-sharded-directory') expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithAllFiles.toBaseEncodedString()) @@ -878,7 +816,7 @@ describe('write', () => { }) const stats = await mfs.stat(dirPath) - const updatedDirCid = new CID(stats.hash) + const updatedDirCid = stats.cid expect(stats.type).to.equal('hamt-sharded-directory') expect(updatedDirCid.toBaseEncodedString()).to.deep.equal(dirWithAllFiles.toBaseEncodedString()) @@ -921,8 +859,8 @@ describe('write', () => { await mfs.stat(`${dir}/supermodule_test`) await mfs.stat(`${dir}/node-gr`) - expect(await mfs.read(`${dir}/node-gr`)).to.deep.equal(nodeGrContent) - expect(await mfs.read(`${dir}/supermodule_test`)).to.deep.equal(superModuleContent) + expect(Buffer.concat(await all(mfs.read(`${dir}/node-gr`)))).to.deep.equal(nodeGrContent) + expect(Buffer.concat(await all(mfs.read(`${dir}/supermodule_test`)))).to.deep.equal(superModuleContent) await mfs.rm(`${dir}/supermodule_test`) @@ -952,7 +890,7 @@ describe('write', () => { await mfs.stat(`${dir}/file-1011.txt`) - expect(await mfs.read(`${dir}/file-1011.txt`)).to.deep.equal(buf) + expect(Buffer.concat(await all(mfs.read(`${dir}/file-1011.txt`)))).to.deep.equal(buf) }) it('removes files that cause sub-sub-shards to be removed', async function () {