diff --git a/node_modules/tar/.npmignore b/node_modules/node-gyp/node_modules/tar/.npmignore similarity index 100% rename from node_modules/tar/.npmignore rename to node_modules/node-gyp/node_modules/tar/.npmignore diff --git a/node_modules/tar/.travis.yml b/node_modules/node-gyp/node_modules/tar/.travis.yml similarity index 100% rename from node_modules/tar/.travis.yml rename to node_modules/node-gyp/node_modules/tar/.travis.yml diff --git a/node_modules/tar/node_modules/block-stream/LICENSE b/node_modules/node-gyp/node_modules/tar/LICENSE similarity index 99% rename from node_modules/tar/node_modules/block-stream/LICENSE rename to node_modules/node-gyp/node_modules/tar/LICENSE index 19129e315fe..019b7e40ea0 100644 --- a/node_modules/tar/node_modules/block-stream/LICENSE +++ b/node_modules/node-gyp/node_modules/tar/LICENSE @@ -1,11 +1,8 @@ The ISC License - Copyright (c) Isaac Z. Schlueter and Contributors - Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR diff --git a/node_modules/node-gyp/node_modules/tar/README.md b/node_modules/node-gyp/node_modules/tar/README.md new file mode 100644 index 00000000000..cfda2ac1806 --- /dev/null +++ b/node_modules/node-gyp/node_modules/tar/README.md @@ -0,0 +1,50 @@ +# node-tar + +Tar for Node.js. + +[![NPM](https://nodei.co/npm/tar.png)](https://nodei.co/npm/tar/) + +## API + +See `examples/` for usage examples. + +### var tar = require('tar') + +Returns an object with `.Pack`, `.Extract` and `.Parse` methods. + +### tar.Pack([properties]) + +Returns a through stream. Use +[fstream](https://npmjs.org/package/fstream) to write files into the +pack stream and you will receive tar archive data from the pack +stream. + +This only works with directories, it does not work with individual files. + +The optional `properties` object are used to set properties in the tar +'Global Extended Header'. If the `fromBase` property is set to true, +the tar will contain files relative to the path passed, and not with +the path included. + +### tar.Extract([options]) + +Returns a through stream. Write tar data to the stream and the files +in the tarball will be extracted onto the filesystem. + +`options` can be: + +```js +{ + path: '/path/to/extract/tar/into', + strip: 0, // how many path segments to strip from the root when extracting +} +``` + +`options` also get passed to the `fstream.Writer` instance that `tar` +uses internally. + +### tar.Parse() + +Returns a writable stream. Write tar data to it and it will emit +`entry` events for each entry parsed from the tarball. This is used by +`tar.Extract`. diff --git a/node_modules/tar/examples/extracter.js b/node_modules/node-gyp/node_modules/tar/examples/extracter.js similarity index 100% rename from node_modules/tar/examples/extracter.js rename to node_modules/node-gyp/node_modules/tar/examples/extracter.js diff --git a/node_modules/tar/examples/packer.js b/node_modules/node-gyp/node_modules/tar/examples/packer.js similarity index 100% rename from node_modules/tar/examples/packer.js rename to node_modules/node-gyp/node_modules/tar/examples/packer.js diff --git a/node_modules/tar/examples/reader.js b/node_modules/node-gyp/node_modules/tar/examples/reader.js similarity index 100% rename from node_modules/tar/examples/reader.js rename to node_modules/node-gyp/node_modules/tar/examples/reader.js diff --git a/node_modules/tar/lib/buffer-entry.js b/node_modules/node-gyp/node_modules/tar/lib/buffer-entry.js similarity index 100% rename from node_modules/tar/lib/buffer-entry.js rename to node_modules/node-gyp/node_modules/tar/lib/buffer-entry.js diff --git a/node_modules/tar/lib/entry-writer.js b/node_modules/node-gyp/node_modules/tar/lib/entry-writer.js similarity index 100% rename from node_modules/tar/lib/entry-writer.js rename to node_modules/node-gyp/node_modules/tar/lib/entry-writer.js diff --git a/node_modules/tar/lib/entry.js b/node_modules/node-gyp/node_modules/tar/lib/entry.js similarity index 100% rename from node_modules/tar/lib/entry.js rename to node_modules/node-gyp/node_modules/tar/lib/entry.js diff --git a/node_modules/tar/lib/extended-header-writer.js b/node_modules/node-gyp/node_modules/tar/lib/extended-header-writer.js similarity index 100% rename from node_modules/tar/lib/extended-header-writer.js rename to node_modules/node-gyp/node_modules/tar/lib/extended-header-writer.js diff --git a/node_modules/tar/lib/extended-header.js b/node_modules/node-gyp/node_modules/tar/lib/extended-header.js similarity index 100% rename from node_modules/tar/lib/extended-header.js rename to node_modules/node-gyp/node_modules/tar/lib/extended-header.js diff --git a/node_modules/node-gyp/node_modules/tar/lib/extract.js b/node_modules/node-gyp/node_modules/tar/lib/extract.js new file mode 100644 index 00000000000..fe1bb976eb0 --- /dev/null +++ b/node_modules/node-gyp/node_modules/tar/lib/extract.js @@ -0,0 +1,94 @@ +// give it a tarball and a path, and it'll dump the contents + +module.exports = Extract + +var tar = require("../tar.js") + , fstream = require("fstream") + , inherits = require("inherits") + , path = require("path") + +function Extract (opts) { + if (!(this instanceof Extract)) return new Extract(opts) + tar.Parse.apply(this) + + if (typeof opts !== "object") { + opts = { path: opts } + } + + // better to drop in cwd? seems more standard. + opts.path = opts.path || path.resolve("node-tar-extract") + opts.type = "Directory" + opts.Directory = true + + // similar to --strip or --strip-components + opts.strip = +opts.strip + if (!opts.strip || opts.strip <= 0) opts.strip = 0 + + this._fst = fstream.Writer(opts) + + this.pause() + var me = this + + // Hardlinks in tarballs are relative to the root + // of the tarball. So, they need to be resolved against + // the target directory in order to be created properly. + me.on("entry", function (entry) { + // if there's a "strip" argument, then strip off that many + // path components. + if (opts.strip) { + var p = entry.path.split("/").slice(opts.strip).join("/") + entry.path = entry.props.path = p + if (entry.linkpath) { + var lp = entry.linkpath.split("/").slice(opts.strip).join("/") + entry.linkpath = entry.props.linkpath = lp + } + } + if (entry.type === "Link") { + entry.linkpath = entry.props.linkpath = + path.join(opts.path, path.join("/", entry.props.linkpath)) + } + + if (entry.type === "SymbolicLink") { + var dn = path.dirname(entry.path) || "" + var linkpath = entry.props.linkpath + var target = path.resolve(opts.path, dn, linkpath) + if (target.indexOf(opts.path) !== 0) { + linkpath = path.join(opts.path, path.join("/", linkpath)) + } + entry.linkpath = entry.props.linkpath = linkpath + } + }) + + this._fst.on("ready", function () { + me.pipe(me._fst, { end: false }) + me.resume() + }) + + this._fst.on('error', function(err) { + me.emit('error', err) + }) + + this._fst.on('drain', function() { + me.emit('drain') + }) + + // this._fst.on("end", function () { + // console.error("\nEEEE Extract End", me._fst.path) + // }) + + this._fst.on("close", function () { + // console.error("\nEEEE Extract End", me._fst.path) + me.emit("finish") + me.emit("end") + me.emit("close") + }) +} + +inherits(Extract, tar.Parse) + +Extract.prototype._streamEnd = function () { + var me = this + if (!me._ended || me._entry) me.error("unexpected eof") + me._fst.end() + // my .end() is coming later. +} diff --git a/node_modules/tar/lib/global-header-writer.js b/node_modules/node-gyp/node_modules/tar/lib/global-header-writer.js similarity index 100% rename from node_modules/tar/lib/global-header-writer.js rename to node_modules/node-gyp/node_modules/tar/lib/global-header-writer.js diff --git a/node_modules/node-gyp/node_modules/tar/lib/header.js b/node_modules/node-gyp/node_modules/tar/lib/header.js new file mode 100644 index 00000000000..05b237c0c7b --- /dev/null +++ b/node_modules/node-gyp/node_modules/tar/lib/header.js @@ -0,0 +1,385 @@ +// parse a 512-byte header block to a data object, or vice-versa +// If the data won't fit nicely in a simple header, then generate +// the appropriate extended header file, and return that. + +module.exports = TarHeader + +var tar = require("../tar.js") + , fields = tar.fields + , fieldOffs = tar.fieldOffs + , fieldEnds = tar.fieldEnds + , fieldSize = tar.fieldSize + , numeric = tar.numeric + , assert = require("assert").ok + , space = " ".charCodeAt(0) + , slash = "/".charCodeAt(0) + , bslash = process.platform === "win32" ? "\\".charCodeAt(0) : null + +function TarHeader (block) { + if (!(this instanceof TarHeader)) return new TarHeader(block) + if (block) this.decode(block) +} + +TarHeader.prototype = + { decode : decode + , encode: encode + , calcSum: calcSum + , checkSum: checkSum + } + +TarHeader.parseNumeric = parseNumeric +TarHeader.encode = encode +TarHeader.decode = decode + +// note that this will only do the normal ustar header, not any kind +// of extended posix header file. If something doesn't fit comfortably, +// then it will set obj.needExtended = true, and set the block to +// the closest approximation. +function encode (obj) { + if (!obj && !(this instanceof TarHeader)) throw new Error( + "encode must be called on a TarHeader, or supplied an object") + + obj = obj || this + var block = obj.block = new Buffer(512) + + // if the object has a "prefix", then that's actually an extension of + // the path field. + if (obj.prefix) { + // console.error("%% header encoding, got a prefix", obj.prefix) + obj.path = obj.prefix + "/" + obj.path + // console.error("%% header encoding, prefixed path", obj.path) + obj.prefix = "" + } + + obj.needExtended = false + + if (obj.mode) { + if (typeof obj.mode === "string") obj.mode = parseInt(obj.mode, 8) + obj.mode = obj.mode & 0777 + } + + for (var f = 0; fields[f] !== null; f ++) { + var field = fields[f] + , off = fieldOffs[f] + , end = fieldEnds[f] + , ret + + switch (field) { + case "cksum": + // special, done below, after all the others + break + + case "prefix": + // special, this is an extension of the "path" field. + // console.error("%% header encoding, skip prefix later") + break + + case "type": + // convert from long name to a single char. + var type = obj.type || "0" + if (type.length > 1) { + type = tar.types[obj.type] + if (!type) type = "0" + } + writeText(block, off, end, type) + break + + case "path": + // uses the "prefix" field if > 100 bytes, but <= 255 + var pathLen = Buffer.byteLength(obj.path) + , pathFSize = fieldSize[fields.path] + , prefFSize = fieldSize[fields.prefix] + + // paths between 100 and 255 should use the prefix field. + // longer than 255 + if (pathLen > pathFSize && + pathLen <= pathFSize + prefFSize) { + // need to find a slash somewhere in the middle so that + // path and prefix both fit in their respective fields + var searchStart = pathLen - 1 - pathFSize + , searchEnd = prefFSize + , found = false + , pathBuf = new Buffer(obj.path) + + for ( var s = searchStart + ; (s <= searchEnd) + ; s ++ ) { + if (pathBuf[s] === slash || pathBuf[s] === bslash) { + found = s + break + } + } + + if (found !== false) { + prefix = pathBuf.slice(0, found).toString("utf8") + path = pathBuf.slice(found + 1).toString("utf8") + + ret = writeText(block, off, end, path) + off = fieldOffs[fields.prefix] + end = fieldEnds[fields.prefix] + // console.error("%% header writing prefix", off, end, prefix) + ret = writeText(block, off, end, prefix) || ret + break + } + } + + // paths less than 100 chars don't need a prefix + // and paths longer than 255 need an extended header and will fail + // on old implementations no matter what we do here. + // Null out the prefix, and fallthrough to default. + // console.error("%% header writing no prefix") + var poff = fieldOffs[fields.prefix] + , pend = fieldEnds[fields.prefix] + writeText(block, poff, pend, "") + // fallthrough + + // all other fields are numeric or text + default: + ret = numeric[field] + ? writeNumeric(block, off, end, obj[field]) + : writeText(block, off, end, obj[field] || "") + break + } + obj.needExtended = obj.needExtended || ret + } + + var off = fieldOffs[fields.cksum] + , end = fieldEnds[fields.cksum] + + writeNumeric(block, off, end, calcSum.call(this, block)) + + return block +} + +// if it's a negative number, or greater than will fit, +// then use write256. +var MAXNUM = { 12: 077777777777 + , 11: 07777777777 + , 8 : 07777777 + , 7 : 0777777 } +function writeNumeric (block, off, end, num) { + var writeLen = end - off + , maxNum = MAXNUM[writeLen] || 0 + + num = num || 0 + // console.error(" numeric", num) + + if (num instanceof Date || + Object.prototype.toString.call(num) === "[object Date]") { + num = num.getTime() / 1000 + } + + if (num > maxNum || num < 0) { + write256(block, off, end, num) + // need an extended header if negative or too big. + return true + } + + // god, tar is so annoying + // if the string is small enough, you should put a space + // between the octal string and the \0, but if it doesn't + // fit, then don't. + var numStr = Math.floor(num).toString(8) + if (num < MAXNUM[writeLen - 1]) numStr += " " + + // pad with "0" chars + if (numStr.length < writeLen) { + numStr = (new Array(writeLen - numStr.length).join("0")) + numStr + } + + if (numStr.length !== writeLen - 1) { + throw new Error("invalid length: " + JSON.stringify(numStr) + "\n" + + "expected: "+writeLen) + } + block.write(numStr, off, writeLen, "utf8") + block[end - 1] = 0 +} + +function write256 (block, off, end, num) { + var buf = block.slice(off, end) + var positive = num >= 0 + buf[0] = positive ? 0x80 : 0xFF + + // get the number as a base-256 tuple + if (!positive) num *= -1 + var tuple = [] + do { + var n = num % 256 + tuple.push(n) + num = (num - n) / 256 + } while (num) + + var bytes = tuple.length + + var fill = buf.length - bytes + for (var i = 1; i < fill; i ++) { + buf[i] = positive ? 0 : 0xFF + } + + // tuple is a base256 number, with [0] as the *least* significant byte + // if it's negative, then we need to flip all the bits once we hit the + // first non-zero bit. The 2's-complement is (0x100 - n), and the 1's- + // complement is (0xFF - n). + var zero = true + for (i = bytes; i > 0; i --) { + var byte = tuple[bytes - i] + if (positive) buf[fill + i] = byte + else if (zero && byte === 0) buf[fill + i] = 0 + else if (zero) { + zero = false + buf[fill + i] = 0x100 - byte + } else buf[fill + i] = 0xFF - byte + } +} + +function writeText (block, off, end, str) { + // strings are written as utf8, then padded with \0 + var strLen = Buffer.byteLength(str) + , writeLen = Math.min(strLen, end - off) + // non-ascii fields need extended headers + // long fields get truncated + , needExtended = strLen !== str.length || strLen > writeLen + + // write the string, and null-pad + if (writeLen > 0) block.write(str, off, writeLen, "utf8") + for (var i = off + writeLen; i < end; i ++) block[i] = 0 + + return needExtended +} + +function calcSum (block) { + block = block || this.block + assert(Buffer.isBuffer(block) && block.length === 512) + + if (!block) throw new Error("Need block to checksum") + + // now figure out what it would be if the cksum was " " + var sum = 0 + , start = fieldOffs[fields.cksum] + , end = fieldEnds[fields.cksum] + + for (var i = 0; i < fieldOffs[fields.cksum]; i ++) { + sum += block[i] + } + + for (var i = start; i < end; i ++) { + sum += space + } + + for (var i = end; i < 512; i ++) { + sum += block[i] + } + + return sum +} + + +function checkSum (block) { + var sum = calcSum.call(this, block) + block = block || this.block + + var cksum = block.slice(fieldOffs[fields.cksum], fieldEnds[fields.cksum]) + cksum = parseNumeric(cksum) + + return cksum === sum +} + +function decode (block) { + block = block || this.block + assert(Buffer.isBuffer(block) && block.length === 512) + + this.block = block + this.cksumValid = this.checkSum() + + var prefix = null + + // slice off each field. + for (var f = 0; fields[f] !== null; f ++) { + var field = fields[f] + , val = block.slice(fieldOffs[f], fieldEnds[f]) + + switch (field) { + case "ustar": + // if not ustar, then everything after that is just padding. + if (val.toString() !== "ustar\0") { + this.ustar = false + return + } else { + // console.error("ustar:", val, val.toString()) + this.ustar = val.toString() + } + break + + // prefix is special, since it might signal the xstar header + case "prefix": + var atime = parseNumeric(val.slice(131, 131 + 12)) + , ctime = parseNumeric(val.slice(131 + 12, 131 + 12 + 12)) + if ((val[130] === 0 || val[130] === space) && + typeof atime === "number" && + typeof ctime === "number" && + val[131 + 12] === space && + val[131 + 12 + 12] === space) { + this.atime = atime + this.ctime = ctime + val = val.slice(0, 130) + } + prefix = val.toString("utf8").replace(/\0+$/, "") + // console.error("%% header reading prefix", prefix) + break + + // all other fields are null-padding text + // or a number. + default: + if (numeric[field]) { + this[field] = parseNumeric(val) + } else { + this[field] = val.toString("utf8").replace(/\0+$/, "") + } + break + } + } + + // if we got a prefix, then prepend it to the path. + if (prefix) { + this.path = prefix + "/" + this.path + // console.error("%% header got a prefix", this.path) + } +} + +function parse256 (buf) { + // first byte MUST be either 80 or FF + // 80 for positive, FF for 2's comp + var positive + if (buf[0] === 0x80) positive = true + else if (buf[0] === 0xFF) positive = false + else return null + + // build up a base-256 tuple from the least sig to the highest + var zero = false + , tuple = [] + for (var i = buf.length - 1; i > 0; i --) { + var byte = buf[i] + if (positive) tuple.push(byte) + else if (zero && byte === 0) tuple.push(0) + else if (zero) { + zero = false + tuple.push(0x100 - byte) + } else tuple.push(0xFF - byte) + } + + for (var sum = 0, i = 0, l = tuple.length; i < l; i ++) { + sum += tuple[i] * Math.pow(256, i) + } + + return positive ? sum : -1 * sum +} + +function parseNumeric (f) { + if (f[0] & 0x80) return parse256(f) + + var str = f.toString("utf8").split("\0")[0].trim() + , res = parseInt(str, 8) + + return isNaN(res) ? null : res +} + diff --git a/node_modules/node-gyp/node_modules/tar/lib/pack.js b/node_modules/node-gyp/node_modules/tar/lib/pack.js new file mode 100644 index 00000000000..5a3bb95a121 --- /dev/null +++ b/node_modules/node-gyp/node_modules/tar/lib/pack.js @@ -0,0 +1,236 @@ +// pipe in an fstream, and it'll make a tarball. +// key-value pair argument is global extended header props. + +module.exports = Pack + +var EntryWriter = require("./entry-writer.js") + , Stream = require("stream").Stream + , path = require("path") + , inherits = require("inherits") + , GlobalHeaderWriter = require("./global-header-writer.js") + , collect = require("fstream").collect + , eof = new Buffer(512) + +for (var i = 0; i < 512; i ++) eof[i] = 0 + +inherits(Pack, Stream) + +function Pack (props) { + // console.error("-- p ctor") + var me = this + if (!(me instanceof Pack)) return new Pack(props) + + if (props) me._noProprietary = props.noProprietary + else me._noProprietary = false + + me._global = props + + me.readable = true + me.writable = true + me._buffer = [] + // console.error("-- -- set current to null in ctor") + me._currentEntry = null + me._processing = false + + me._pipeRoot = null + me.on("pipe", function (src) { + if (src.root === me._pipeRoot) return + me._pipeRoot = src + src.on("end", function () { + me._pipeRoot = null + }) + me.add(src) + }) +} + +Pack.prototype.addGlobal = function (props) { + // console.error("-- p addGlobal") + if (this._didGlobal) return + this._didGlobal = true + + var me = this + GlobalHeaderWriter(props) + .on("data", function (c) { + me.emit("data", c) + }) + .end() +} + +Pack.prototype.add = function (stream) { + if (this._global && !this._didGlobal) this.addGlobal(this._global) + + if (this._ended) return this.emit("error", new Error("add after end")) + + collect(stream) + this._buffer.push(stream) + this._process() + this._needDrain = this._buffer.length > 0 + return !this._needDrain +} + +Pack.prototype.pause = function () { + this._paused = true + if (this._currentEntry) this._currentEntry.pause() + this.emit("pause") +} + +Pack.prototype.resume = function () { + this._paused = false + if (this._currentEntry) this._currentEntry.resume() + this.emit("resume") + this._process() +} + +Pack.prototype.end = function () { + this._ended = true + this._buffer.push(eof) + this._process() +} + +Pack.prototype._process = function () { + var me = this + if (me._paused || me._processing) { + return + } + + var entry = me._buffer.shift() + + if (!entry) { + if (me._needDrain) { + me.emit("drain") + } + return + } + + if (entry.ready === false) { + // console.error("-- entry is not ready", entry) + me._buffer.unshift(entry) + entry.on("ready", function () { + // console.error("-- -- ready!", entry) + me._process() + }) + return + } + + me._processing = true + + if (entry === eof) { + // need 2 ending null blocks. + me.emit("data", eof) + me.emit("data", eof) + me.emit("end") + me.emit("close") + return + } + + // Change the path to be relative to the root dir that was + // added to the tarball. + // + // XXX This should be more like how -C works, so you can + // explicitly set a root dir, and also explicitly set a pathname + // in the tarball to use. That way we can skip a lot of extra + // work when resolving symlinks for bundled dependencies in npm. + + var root = path.dirname((entry.root || entry).path); + if (me._global && me._global.fromBase && entry.root && entry.root.path) { + // user set 'fromBase: true' indicating tar root should be directory itself + root = entry.root.path; + } + + var wprops = {} + + Object.keys(entry.props || {}).forEach(function (k) { + wprops[k] = entry.props[k] + }) + + if (me._noProprietary) wprops.noProprietary = true + + wprops.path = path.relative(root, entry.path || '') + + // actually not a matter of opinion or taste. + if (process.platform === "win32") { + wprops.path = wprops.path.replace(/\\/g, "/") + } + + if (!wprops.type) + wprops.type = 'Directory' + + switch (wprops.type) { + // sockets not supported + case "Socket": + return + + case "Directory": + wprops.path += "/" + wprops.size = 0 + break + + case "Link": + var lp = path.resolve(path.dirname(entry.path), entry.linkpath) + wprops.linkpath = path.relative(root, lp) || "." + wprops.size = 0 + break + + case "SymbolicLink": + var lp = path.resolve(path.dirname(entry.path), entry.linkpath) + wprops.linkpath = path.relative(path.dirname(entry.path), lp) || "." + wprops.size = 0 + break + } + + // console.error("-- new writer", wprops) + // if (!wprops.type) { + // // console.error("-- no type?", entry.constructor.name, entry) + // } + + // console.error("-- -- set current to new writer", wprops.path) + var writer = me._currentEntry = EntryWriter(wprops) + + writer.parent = me + + // writer.on("end", function () { + // // console.error("-- -- writer end", writer.path) + // }) + + writer.on("data", function (c) { + me.emit("data", c) + }) + + writer.on("header", function () { + Buffer.prototype.toJSON = function () { + return this.toString().split(/\0/).join(".") + } + // console.error("-- -- writer header %j", writer.props) + if (writer.props.size === 0) nextEntry() + }) + writer.on("close", nextEntry) + + var ended = false + function nextEntry () { + if (ended) return + ended = true + + // console.error("-- -- writer close", writer.path) + // console.error("-- -- set current to null", wprops.path) + me._currentEntry = null + me._processing = false + me._process() + } + + writer.on("error", function (er) { + // console.error("-- -- writer error", writer.path) + me.emit("error", er) + }) + + // if it's the root, then there's no need to add its entries, + // or data, since they'll be added directly. + if (entry === me._pipeRoot) { + // console.error("-- is the root, don't auto-add") + writer.add = null + } + + entry.pipe(writer) +} + +Pack.prototype.destroy = function () {} +Pack.prototype.write = function () {} diff --git a/node_modules/node-gyp/node_modules/tar/lib/parse.js b/node_modules/node-gyp/node_modules/tar/lib/parse.js new file mode 100644 index 00000000000..600ad782f0f --- /dev/null +++ b/node_modules/node-gyp/node_modules/tar/lib/parse.js @@ -0,0 +1,275 @@ + +// A writable stream. +// It emits "entry" events, which provide a readable stream that has +// header info attached. + +module.exports = Parse.create = Parse + +var stream = require("stream") + , Stream = stream.Stream + , BlockStream = require("block-stream") + , tar = require("../tar.js") + , TarHeader = require("./header.js") + , Entry = require("./entry.js") + , BufferEntry = require("./buffer-entry.js") + , ExtendedHeader = require("./extended-header.js") + , assert = require("assert").ok + , inherits = require("inherits") + , fstream = require("fstream") + +// reading a tar is a lot like reading a directory +// However, we're actually not going to run the ctor, +// since it does a stat and various other stuff. +// This inheritance gives us the pause/resume/pipe +// behavior that is desired. +inherits(Parse, fstream.Reader) + +function Parse () { + var me = this + if (!(me instanceof Parse)) return new Parse() + + // doesn't apply fstream.Reader ctor? + // no, becasue we don't want to stat/etc, we just + // want to get the entry/add logic from .pipe() + Stream.apply(me) + + me.writable = true + me.readable = true + me._stream = new BlockStream(512) + me.position = 0 + me._ended = false + + me._stream.on("error", function (e) { + me.emit("error", e) + }) + + me._stream.on("data", function (c) { + me._process(c) + }) + + me._stream.on("end", function () { + me._streamEnd() + }) + + me._stream.on("drain", function () { + me.emit("drain") + }) +} + +// overridden in Extract class, since it needs to +// wait for its DirWriter part to finish before +// emitting "end" +Parse.prototype._streamEnd = function () { + var me = this + if (!me._ended || me._entry) me.error("unexpected eof") + me.emit("end") +} + +// a tar reader is actually a filter, not just a readable stream. +// So, you should pipe a tarball stream into it, and it needs these +// write/end methods to do that. +Parse.prototype.write = function (c) { + if (this._ended) { + // gnutar puts a LOT of nulls at the end. + // you can keep writing these things forever. + // Just ignore them. + for (var i = 0, l = c.length; i > l; i ++) { + if (c[i] !== 0) return this.error("write() after end()") + } + return + } + return this._stream.write(c) +} + +Parse.prototype.end = function (c) { + this._ended = true + return this._stream.end(c) +} + +// don't need to do anything, since we're just +// proxying the data up from the _stream. +// Just need to override the parent's "Not Implemented" +// error-thrower. +Parse.prototype._read = function () {} + +Parse.prototype._process = function (c) { + assert(c && c.length === 512, "block size should be 512") + + // one of three cases. + // 1. A new header + // 2. A part of a file/extended header + // 3. One of two or more EOF null blocks + + if (this._entry) { + var entry = this._entry + if(!entry._abort) entry.write(c) + else { + entry._remaining -= c.length + if(entry._remaining < 0) entry._remaining = 0 + } + if (entry._remaining === 0) { + entry.end() + this._entry = null + } + } else { + // either zeroes or a header + var zero = true + for (var i = 0; i < 512 && zero; i ++) { + zero = c[i] === 0 + } + + // eof is *at least* 2 blocks of nulls, and then the end of the + // file. you can put blocks of nulls between entries anywhere, + // so appending one tarball to another is technically valid. + // ending without the eof null blocks is not allowed, however. + if (zero) { + if (this._eofStarted) + this._ended = true + this._eofStarted = true + } else { + this._eofStarted = false + this._startEntry(c) + } + } + + this.position += 512 +} + +// take a header chunk, start the right kind of entry. +Parse.prototype._startEntry = function (c) { + var header = new TarHeader(c) + , self = this + , entry + , ev + , EntryType + , onend + , meta = false + + if (null === header.size || !header.cksumValid) { + var e = new Error("invalid tar file") + e.header = header + e.tar_file_offset = this.position + e.tar_block = this.position / 512 + return this.emit("error", e) + } + + switch (tar.types[header.type]) { + case "File": + case "OldFile": + case "Link": + case "SymbolicLink": + case "CharacterDevice": + case "BlockDevice": + case "Directory": + case "FIFO": + case "ContiguousFile": + case "GNUDumpDir": + // start a file. + // pass in any extended headers + // These ones consumers are typically most interested in. + EntryType = Entry + ev = "entry" + break + + case "GlobalExtendedHeader": + // extended headers that apply to the rest of the tarball + EntryType = ExtendedHeader + onend = function () { + self._global = self._global || {} + Object.keys(entry.fields).forEach(function (k) { + self._global[k] = entry.fields[k] + }) + } + ev = "globalExtendedHeader" + meta = true + break + + case "ExtendedHeader": + case "OldExtendedHeader": + // extended headers that apply to the next entry + EntryType = ExtendedHeader + onend = function () { + self._extended = entry.fields + } + ev = "extendedHeader" + meta = true + break + + case "NextFileHasLongLinkpath": + // set linkpath= in extended header + EntryType = BufferEntry + onend = function () { + self._extended = self._extended || {} + self._extended.linkpath = entry.body + } + ev = "longLinkpath" + meta = true + break + + case "NextFileHasLongPath": + case "OldGnuLongPath": + // set path= in file-extended header + EntryType = BufferEntry + onend = function () { + self._extended = self._extended || {} + self._extended.path = entry.body + } + ev = "longPath" + meta = true + break + + default: + // all the rest we skip, but still set the _entry + // member, so that we can skip over their data appropriately. + // emit an event to say that this is an ignored entry type? + EntryType = Entry + ev = "ignoredEntry" + break + } + + var global, extended + if (meta) { + global = extended = null + } else { + var global = this._global + var extended = this._extended + + // extendedHeader only applies to one entry, so once we start + // an entry, it's over. + this._extended = null + } + entry = new EntryType(header, extended, global) + entry.meta = meta + + // only proxy data events of normal files. + if (!meta) { + entry.on("data", function (c) { + me.emit("data", c) + }) + } + + if (onend) entry.on("end", onend) + + this._entry = entry + var me = this + + entry.on("pause", function () { + me.pause() + }) + + entry.on("resume", function () { + me.resume() + }) + + if (this.listeners("*").length) { + this.emit("*", ev, entry) + } + + this.emit(ev, entry) + + // Zero-byte entry. End immediately. + if (entry.props.size === 0) { + entry.end() + this._entry = null + } +} diff --git a/node_modules/tar/node_modules/block-stream/LICENCE b/node_modules/node-gyp/node_modules/tar/node_modules/block-stream/LICENCE similarity index 100% rename from node_modules/tar/node_modules/block-stream/LICENCE rename to node_modules/node-gyp/node_modules/tar/node_modules/block-stream/LICENCE diff --git a/node_modules/pacote/node_modules/tar/LICENSE b/node_modules/node-gyp/node_modules/tar/node_modules/block-stream/LICENSE similarity index 100% rename from node_modules/pacote/node_modules/tar/LICENSE rename to node_modules/node-gyp/node_modules/tar/node_modules/block-stream/LICENSE diff --git a/node_modules/tar/node_modules/block-stream/README.md b/node_modules/node-gyp/node_modules/tar/node_modules/block-stream/README.md similarity index 100% rename from node_modules/tar/node_modules/block-stream/README.md rename to node_modules/node-gyp/node_modules/tar/node_modules/block-stream/README.md diff --git a/node_modules/tar/node_modules/block-stream/block-stream.js b/node_modules/node-gyp/node_modules/tar/node_modules/block-stream/block-stream.js similarity index 100% rename from node_modules/tar/node_modules/block-stream/block-stream.js rename to node_modules/node-gyp/node_modules/tar/node_modules/block-stream/block-stream.js diff --git a/node_modules/tar/node_modules/block-stream/package.json b/node_modules/node-gyp/node_modules/tar/node_modules/block-stream/package.json similarity index 69% rename from node_modules/tar/node_modules/block-stream/package.json rename to node_modules/node-gyp/node_modules/tar/node_modules/block-stream/package.json index 045ca8d2431..bf449e9633b 100644 --- a/node_modules/tar/node_modules/block-stream/package.json +++ b/node_modules/node-gyp/node_modules/tar/node_modules/block-stream/package.json @@ -1,41 +1,43 @@ { - "_from": "block-stream@*", + "_args": [ + [ + "block-stream@0.0.9", + "/Users/rebecca/code/npm" + ] + ], + "_from": "block-stream@0.0.9", "_id": "block-stream@0.0.9", + "_inBundle": false, "_integrity": "sha1-E+v+d4oDIFz+A3UUgeu0szAMEmo=", - "_location": "/tar/block-stream", + "_location": "/node-gyp/tar/block-stream", "_phantomChildren": {}, "_requested": { - "type": "range", + "type": "version", "registry": true, - "raw": "block-stream@*", + "raw": "block-stream@0.0.9", "name": "block-stream", "escapedName": "block-stream", - "rawSpec": "*", + "rawSpec": "0.0.9", "saveSpec": null, - "fetchSpec": "*" + "fetchSpec": "0.0.9" }, "_requiredBy": [ - "/tar" + "/node-gyp/tar" ], "_resolved": "https://registry.npmjs.org/block-stream/-/block-stream-0.0.9.tgz", - "_shasum": "13ebfe778a03205cfe03751481ebb4b3300c126a", - "_shrinkwrap": null, - "_spec": "block-stream@*", - "_where": "/Users/zkat/Documents/code/npm/node_modules/tar", + "_spec": "0.0.9", + "_where": "/Users/rebecca/code/npm", "author": { "name": "Isaac Z. Schlueter", "email": "i@izs.me", "url": "http://blog.izs.me/" }, - "bin": null, "bugs": { "url": "https://github.com/isaacs/block-stream/issues" }, - "bundleDependencies": false, "dependencies": { "inherits": "~2.0.0" }, - "deprecated": false, "description": "a stream of blocks", "devDependencies": { "tap": "^5.7.1" @@ -50,8 +52,6 @@ "license": "ISC", "main": "block-stream.js", "name": "block-stream", - "optionalDependencies": {}, - "peerDependencies": {}, "repository": { "type": "git", "url": "git://github.com/isaacs/block-stream.git" diff --git a/node_modules/node-gyp/node_modules/tar/package.json b/node_modules/node-gyp/node_modules/tar/package.json new file mode 100644 index 00000000000..0cb9624034e --- /dev/null +++ b/node_modules/node-gyp/node_modules/tar/package.json @@ -0,0 +1,64 @@ +{ + "_args": [ + [ + "tar@2.2.1", + "/Users/rebecca/code/npm" + ] + ], + "_from": "tar@2.2.1", + "_id": "tar@2.2.1", + "_inBundle": false, + "_integrity": "sha1-jk0qJWwOIYXGsYrWlK7JaLg8sdE=", + "_location": "/node-gyp/tar", + "_phantomChildren": { + "inherits": "2.0.3" + }, + "_requested": { + "type": "version", + "registry": true, + "raw": "tar@2.2.1", + "name": "tar", + "escapedName": "tar", + "rawSpec": "2.2.1", + "saveSpec": null, + "fetchSpec": "2.2.1" + }, + "_requiredBy": [ + "/node-gyp" + ], + "_resolved": "https://registry.npmjs.org/tar/-/tar-2.2.1.tgz", + "_spec": "2.2.1", + "_where": "/Users/rebecca/code/npm", + "author": { + "name": "Isaac Z. Schlueter", + "email": "i@izs.me", + "url": "http://blog.izs.me/" + }, + "bugs": { + "url": "https://github.com/isaacs/node-tar/issues" + }, + "dependencies": { + "block-stream": "*", + "fstream": "^1.0.2", + "inherits": "2" + }, + "description": "tar for node", + "devDependencies": { + "graceful-fs": "^4.1.2", + "mkdirp": "^0.5.0", + "rimraf": "1.x", + "tap": "0.x" + }, + "homepage": "https://github.com/isaacs/node-tar#readme", + "license": "ISC", + "main": "tar.js", + "name": "tar", + "repository": { + "type": "git", + "url": "git://github.com/isaacs/node-tar.git" + }, + "scripts": { + "test": "tap test/*.js" + }, + "version": "2.2.1" +} diff --git a/node_modules/tar/tar.js b/node_modules/node-gyp/node_modules/tar/tar.js similarity index 100% rename from node_modules/tar/tar.js rename to node_modules/node-gyp/node_modules/tar/tar.js diff --git a/node_modules/tar/test/00-setup-fixtures.js b/node_modules/node-gyp/node_modules/tar/test/00-setup-fixtures.js similarity index 100% rename from node_modules/tar/test/00-setup-fixtures.js rename to node_modules/node-gyp/node_modules/tar/test/00-setup-fixtures.js diff --git a/node_modules/tar/test/cb-never-called-1.0.1.tgz b/node_modules/node-gyp/node_modules/tar/test/cb-never-called-1.0.1.tgz similarity index 100% rename from node_modules/tar/test/cb-never-called-1.0.1.tgz rename to node_modules/node-gyp/node_modules/tar/test/cb-never-called-1.0.1.tgz diff --git a/node_modules/tar/test/dir-normalization.js b/node_modules/node-gyp/node_modules/tar/test/dir-normalization.js similarity index 100% rename from node_modules/tar/test/dir-normalization.js rename to node_modules/node-gyp/node_modules/tar/test/dir-normalization.js diff --git a/node_modules/tar/test/dir-normalization.tar b/node_modules/node-gyp/node_modules/tar/test/dir-normalization.tar similarity index 100% rename from node_modules/tar/test/dir-normalization.tar rename to node_modules/node-gyp/node_modules/tar/test/dir-normalization.tar diff --git a/node_modules/tar/test/error-on-broken.js b/node_modules/node-gyp/node_modules/tar/test/error-on-broken.js similarity index 100% rename from node_modules/tar/test/error-on-broken.js rename to node_modules/node-gyp/node_modules/tar/test/error-on-broken.js diff --git a/node_modules/tar/test/extract-move.js b/node_modules/node-gyp/node_modules/tar/test/extract-move.js similarity index 100% rename from node_modules/tar/test/extract-move.js rename to node_modules/node-gyp/node_modules/tar/test/extract-move.js diff --git a/node_modules/tar/test/extract.js b/node_modules/node-gyp/node_modules/tar/test/extract.js similarity index 100% rename from node_modules/tar/test/extract.js rename to node_modules/node-gyp/node_modules/tar/test/extract.js diff --git a/node_modules/tar/test/fixtures.tgz b/node_modules/node-gyp/node_modules/tar/test/fixtures.tgz similarity index 100% rename from node_modules/tar/test/fixtures.tgz rename to node_modules/node-gyp/node_modules/tar/test/fixtures.tgz diff --git a/node_modules/tar/test/header.js b/node_modules/node-gyp/node_modules/tar/test/header.js similarity index 100% rename from node_modules/tar/test/header.js rename to node_modules/node-gyp/node_modules/tar/test/header.js diff --git a/node_modules/tar/test/pack-no-proprietary.js b/node_modules/node-gyp/node_modules/tar/test/pack-no-proprietary.js similarity index 100% rename from node_modules/tar/test/pack-no-proprietary.js rename to node_modules/node-gyp/node_modules/tar/test/pack-no-proprietary.js diff --git a/node_modules/tar/test/pack.js b/node_modules/node-gyp/node_modules/tar/test/pack.js similarity index 100% rename from node_modules/tar/test/pack.js rename to node_modules/node-gyp/node_modules/tar/test/pack.js diff --git a/node_modules/tar/test/parse-discard.js b/node_modules/node-gyp/node_modules/tar/test/parse-discard.js similarity index 100% rename from node_modules/tar/test/parse-discard.js rename to node_modules/node-gyp/node_modules/tar/test/parse-discard.js diff --git a/node_modules/tar/test/parse.js b/node_modules/node-gyp/node_modules/tar/test/parse.js similarity index 100% rename from node_modules/tar/test/parse.js rename to node_modules/node-gyp/node_modules/tar/test/parse.js diff --git a/node_modules/tar/test/zz-cleanup.js b/node_modules/node-gyp/node_modules/tar/test/zz-cleanup.js similarity index 100% rename from node_modules/tar/test/zz-cleanup.js rename to node_modules/node-gyp/node_modules/tar/test/zz-cleanup.js diff --git a/node_modules/pacote/node_modules/tar/README.md b/node_modules/pacote/node_modules/tar/README.md deleted file mode 100644 index a356a78da20..00000000000 --- a/node_modules/pacote/node_modules/tar/README.md +++ /dev/null @@ -1,883 +0,0 @@ -# node-tar - -[![Build Status](https://travis-ci.org/npm/node-tar.svg?branch=master)](https://travis-ci.org/npm/node-tar) - -[Fast](./benchmarks) and full-featured Tar for Node.js - -The API is designed to mimic the behavior of `tar(1)` on unix systems. -If you are familiar with how tar works, most of this will hopefully be -straightforward for you. If not, then hopefully this module can teach -you useful unix skills that may come in handy someday :) - -## Background - -A "tar file" or "tarball" is an archive of file system entries -(directories, files, links, etc.) The name comes from "tape archive". -If you run `man tar` on almost any Unix command line, you'll learn -quite a bit about what it can do, and its history. - -Tar has 5 main top-level commands: - -* `c` Create an archive -* `r` Replace entries within an archive -* `u` Update entries within an archive (ie, replace if they're newer) -* `t` List out the contents of an archive -* `x` Extract an archive to disk - -The other flags and options modify how this top level function works. - -## High-Level API - -These 5 functions are the high-level API. All of them have a -single-character name (for unix nerds familiar with `tar(1)`) as well -as a long name (for everyone else). - -All the high-level functions take the following arguments, all three -of which are optional and may be omitted. - -1. `options` - An optional object specifying various options -2. `paths` - An array of paths to add or extract -3. `callback` - Called when the command is completed, if async. (If - sync or no file specified, providing a callback throws a - `TypeError`.) - -If the command is sync (ie, if `options.sync=true`), then the -callback is not allowed, since the action will be completed immediately. - -If a `file` argument is specified, and the command is async, then a -`Promise` is returned. In this case, if async, a callback may be -provided which is called when the command is completed. - -If a `file` option is not specified, then a stream is returned. For -`create`, this is a readable stream of the generated archive. For -`list` and `extract` this is a writable stream that an archive should -be written into. If a file is not specified, then a callback is not -allowed, because you're already getting a stream to work with. - -`replace` and `update` only work on existing archives, and so require -a `file` argument. - -Sync commands without a file argument return a stream that acts on its -input immediately in the same tick. For readable streams, this means -that all of the data is immediately available by calling -`stream.read()`. For writable streams, it will be acted upon as soon -as it is provided, but this can be at any time. - -### Warnings - -Some things cause tar to emit a warning, but should usually not cause -the entire operation to fail. There are three ways to handle -warnings: - -1. **Ignore them** (default) Invalid entries won't be put in the - archive, and invalid entries won't be unpacked. This is usually - fine, but can hide failures that you might care about. -2. **Notice them** Add an `onwarn` function to the options, or listen - to the `'warn'` event on any tar stream. The function will get - called as `onwarn(message, data)`. Handle as appropriate. -3. **Explode them.** Set `strict: true` in the options object, and - `warn` messages will be emitted as `'error'` events instead. If - there's no `error` handler, this causes the program to crash. If - used with a promise-returning/callback-taking method, then it'll - send the error to the promise/callback. - -### Examples - -The API mimics the `tar(1)` command line functionality, with aliases -for more human-readable option and function names. The goal is that -if you know how to use `tar(1)` in Unix, then you know how to use -`require('tar')` in JavaScript. - -To replicate `tar czf my-tarball.tgz files and folders`, you'd do: - -```js -tar.c( - { - gzip: , - file: 'my-tarball.tgz' - }, - ['some', 'files', 'and', 'folders'] -).then(_ => { .. tarball has been created .. }) -``` - -To replicate `tar cz files and folders > my-tarball.tgz`, you'd do: - -```js -tar.c( // or tar.create - { - gzip: - }, - ['some', 'files', 'and', 'folders'] -).pipe(fs.createWriteStream('my-tarball.tgz') -``` - -To replicate `tar xf my-tarball.tgz` you'd do: - -```js -tar.x( // or tar.extract( - { - file: 'my-tarball.tgz' - } -).then(_=> { .. tarball has been dumped in cwd .. }) -``` - -To replicate `cat my-tarball.tgz | tar x -C some-dir --strip=1`: - -```js -fs.createReadStream('my-tarball.tgz').pipe( - tar.x({ - strip: 1, - C: 'some-dir' // alias for cwd:'some-dir', also ok - }) -) -``` - -To replicate `tar tf my-tarball.tgz`, do this: - -```js -tar.t({ - file: 'my-tarball.tgz', - onentry: entry => { .. do whatever with it .. } -}) -``` - -To replicate `cat my-tarball.tgz | tar t` do: - -```js -fs.createReadStream('my-tarball.tgz') - .pipe(tar.t()) - .on('entry', entry => { .. do whatever with it .. }) -``` - -To do anything synchronous, add `sync: true` to the options. Note -that sync functions don't take a callback and don't return a promise. -When the function returns, it's already done. Sync methods without a -file argument return a sync stream, which flushes immediately. But, -of course, it still won't be done until you `.end()` it. - -To filter entries, add `filter: ` to the options. -Tar-creating methods call the filter with `filter(path, stat)`. -Tar-reading methods (including extraction) call the filter with -`filter(path, entry)`. The filter is called in the `this`-context of -the `Pack` or `Unpack` stream object. - -The arguments list to `tar t` and `tar x` specify a list of filenames -to extract or list, so they're equivalent to a filter that tests if -the file is in the list. - -For those who _aren't_ fans of tar's single-character command names: - -``` -tar.c === tar.create -tar.r === tar.replace (appends to archive, file is required) -tar.u === tar.update (appends if newer, file is required) -tar.x === tar.extract -tar.t === tar.list -``` - -Keep reading for all the command descriptions and options, as well as -the low-level API that they are built on. - -### tar.c(options, fileList, callback) [alias: tar.create] - -Create a tarball archive. - -The `fileList` is an array of paths to add to the tarball. Adding a -directory also adds its children recursively. - -An entry in `fileList` that starts with an `@` symbol is a tar archive -whose entries will be added. To add a file that starts with `@`, -prepend it with `./`. - -The following options are supported: - -- `file` Write the tarball archive to the specified filename. If this - is specified, then the callback will be fired when the file has been - written, and a promise will be returned that resolves when the file - is written. If a filename is not specified, then a Readable Stream - will be returned which will emit the file data. [Alias: `f`] -- `sync` Act synchronously. If this is set, then any provided file - will be fully written after the call to `tar.c`. If this is set, - and a file is not provided, then the resulting stream will already - have the data ready to `read` or `emit('data')` as soon as you - request it. -- `onwarn` A function that will get called with `(message, data)` for - any warnings encountered. -- `strict` Treat warnings as crash-worthy errors. Default false. -- `cwd` The current working directory for creating the archive. - Defaults to `process.cwd()`. [Alias: `C`] -- `prefix` A path portion to prefix onto the entries in the archive. -- `gzip` Set to any truthy value to create a gzipped archive, or an - object with settings for `zlib.Gzip()` [Alias: `z`] -- `filter` A function that gets called with `(path, stat)` for each - entry being added. Return `true` to add the entry to the archive, - or `false` to omit it. -- `portable` Omit metadata that is system-specific: `ctime`, `atime`, - `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note - that `mtime` is still included, because this is necessary other - time-based operations. -- `preservePaths` Allow absolute paths. By default, `/` is stripped - from absolute paths. [Alias: `P`] -- `mode` The mode to set on the created file archive -- `noDirRecurse` Do not recursively archive the contents of - directories. [Alias: `n`] -- `follow` Set to true to pack the targets of symbolic links. Without - this option, symbolic links are archived as such. [Alias: `L`, `h`] -- `noPax` Suppress pax extended headers. Note that this means that - long paths and linkpaths will be truncated, and large or negative - numeric values may be interpreted incorrectly. - -The following options are mostly internal, but can be modified in some -advanced use cases, such as re-using caches between runs. - -- `linkCache` A Map object containing the device and inode value for - any file whose nlink is > 1, to identify hard links. -- `statCache` A Map object that caches calls `lstat`. -- `readdirCache` A Map object that caches calls to `readdir`. -- `jobs` A number specifying how many concurrent jobs to run. - Defaults to 4. -- `maxReadSize` The maximum buffer size for `fs.read()` operations. - Defaults to 16 MB. - -### tar.x(options, fileList, callback) [alias: tar.extract] - -Extract a tarball archive. - -The `fileList` is an array of paths to extract from the tarball. If -no paths are provided, then all the entries are extracted. - -If the archive is gzipped, then tar will detect this and unzip it. - -Note that all directories that are created will be forced to be -writable, readable, and listable by their owner, to avoid cases where -a directory prevents extraction of child entries by virtue of its -mode. - -Most extraction errors will cause a `warn` event to be emitted. If -the `cwd` is missing, or not a directory, then the extraction will -fail completely. - -The following options are supported: - -- `cwd` Extract files relative to the specified directory. Defaults - to `process.cwd()`. If provided, this must exist and must be a - directory. [Alias: `C`] -- `file` The archive file to extract. If not specified, then a - Writable stream is returned where the archive data should be - written. [Alias: `f`] -- `sync` Create files and directories synchronously. -- `strict` Treat warnings as crash-worthy errors. Default false. -- `filter` A function that gets called with `(path, entry)` for each - entry being unpacked. Return `true` to unpack the entry from the - archive, or `false` to skip it. -- `newer` Set to true to keep the existing file on disk if it's newer - than the file in the archive. [Alias: `keep-newer`, - `keep-newer-files`] -- `keep` Do not overwrite existing files. In particular, if a file - appears more than once in an archive, later copies will not - overwrite earlier copies. [Alias: `k`, `keep-existing`] -- `preservePaths` Allow absolute paths, paths containing `..`, and - extracting through symbolic links. By default, `/` is stripped from - absolute paths, `..` paths are not extracted, and any file whose - location would be modified by a symbolic link is not extracted. - [Alias: `P`] -- `unlink` Unlink files before creating them. Without this option, - tar overwrites existing files, which preserves existing hardlinks. - With this option, existing hardlinks will be broken, as will any - symlink that would affect the location of an extracted file. [Alias: - `U`] -- `strip` Remove the specified number of leading path elements. - Pathnames with fewer elements will be silently skipped. Note that - the pathname is edited after applying the filter, but before - security checks. [Alias: `strip-components`, `stripComponents`] -- `onwarn` A function that will get called with `(message, data)` for - any warnings encountered. -- `preserveOwner` If true, tar will set the `uid` and `gid` of - extracted entries to the `uid` and `gid` fields in the archive. - This defaults to true when run as root, and false otherwise. If - false, then files and directories will be set with the owner and - group of the user running the process. This is similar to `-p` in - `tar(1)`, but ACLs and other system-specific data is never unpacked - in this implementation, and modes are set by default already. - [Alias: `p`] -- `uid` Set to a number to force ownership of all extracted files and - folders, and all implicitly created directories, to be owned by the - specified user id, regardless of the `uid` field in the archive. - Cannot be used along with `preserveOwner`. Requires also setting a - `gid` option. -- `gid` Set to a number to force ownership of all extracted files and - folders, and all implicitly created directories, to be owned by the - specified group id, regardless of the `gid` field in the archive. - Cannot be used along with `preserveOwner`. Requires also setting a - `uid` option. - -The following options are mostly internal, but can be modified in some -advanced use cases, such as re-using caches between runs. - -- `maxReadSize` The maximum buffer size for `fs.read()` operations. - Defaults to 16 MB. -- `umask` Filter the modes of entries like `process.umask()`. -- `dmode` Default mode for directories -- `fmode` Default mode for files -- `dirCache` A Map object of which directories exist. -- `maxMetaEntrySize` The maximum size of meta entries that is - supported. Defaults to 1 MB. - -### tar.t(options, fileList, callback) [alias: tar.list] - -List the contents of a tarball archive. - -The `fileList` is an array of paths to list from the tarball. If -no paths are provided, then all the entries are listed. - -If the archive is gzipped, then tar will detect this and unzip it. - -Returns an event emitter that emits `entry` events with -`tar.ReadEntry` objects. However, they don't emit `'data'` or `'end'` -events. (If you want to get actual readable entries, use the -`tar.Parse` class instead.) - -The following options are supported: - -- `cwd` Extract files relative to the specified directory. Defaults - to `process.cwd()`. [Alias: `C`] -- `file` The archive file to list. If not specified, then a - Writable stream is returned where the archive data should be - written. [Alias: `f`] -- `sync` Read the specified file synchronously. (This has no effect - when a file option isn't specified, because entries are emitted as - fast as they are parsed from the stream anyway.) -- `strict` Treat warnings as crash-worthy errors. Default false. -- `filter` A function that gets called with `(path, entry)` for each - entry being listed. Return `true` to emit the entry from the - archive, or `false` to skip it. -- `onentry` A function that gets called with `(entry)` for each entry - that passes the filter. This is important for when both `file` and - `sync` are set, because it will be called synchronously. -- `maxReadSize` The maximum buffer size for `fs.read()` operations. - Defaults to 16 MB. -- `noResume` By default, `entry` streams are resumed immediately after - the call to `onentry`. Set `noResume: true` to suppress this - behavior. Note that by opting into this, the stream will never - complete until the entry data is consumed. - -### tar.u(options, fileList, callback) [alias: tar.update] - -Add files to an archive if they are newer than the entry already in -the tarball archive. - -The `fileList` is an array of paths to add to the tarball. Adding a -directory also adds its children recursively. - -An entry in `fileList` that starts with an `@` symbol is a tar archive -whose entries will be added. To add a file that starts with `@`, -prepend it with `./`. - -The following options are supported: - -- `file` Required. Write the tarball archive to the specified - filename. [Alias: `f`] -- `sync` Act synchronously. If this is set, then any provided file - will be fully written after the call to `tar.c`. -- `onwarn` A function that will get called with `(message, data)` for - any warnings encountered. -- `strict` Treat warnings as crash-worthy errors. Default false. -- `cwd` The current working directory for adding entries to the - archive. Defaults to `process.cwd()`. [Alias: `C`] -- `prefix` A path portion to prefix onto the entries in the archive. -- `gzip` Set to any truthy value to create a gzipped archive, or an - object with settings for `zlib.Gzip()` [Alias: `z`] -- `filter` A function that gets called with `(path, stat)` for each - entry being added. Return `true` to add the entry to the archive, - or `false` to omit it. -- `portable` Omit metadata that is system-specific: `ctime`, `atime`, - `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note - that `mtime` is still included, because this is necessary other - time-based operations. -- `preservePaths` Allow absolute paths. By default, `/` is stripped - from absolute paths. [Alias: `P`] -- `maxReadSize` The maximum buffer size for `fs.read()` operations. - Defaults to 16 MB. -- `noDirRecurse` Do not recursively archive the contents of - directories. [Alias: `n`] -- `follow` Set to true to pack the targets of symbolic links. Without - this option, symbolic links are archived as such. [Alias: `L`, `h`] -- `noPax` Suppress pax extended headers. Note that this means that - long paths and linkpaths will be truncated, and large or negative - numeric values may be interpreted incorrectly. - -### tar.r(options, fileList, callback) [alias: tar.replace] - -Add files to an existing archive. Because later entries override -earlier entries, this effectively replaces any existing entries. - -The `fileList` is an array of paths to add to the tarball. Adding a -directory also adds its children recursively. - -An entry in `fileList` that starts with an `@` symbol is a tar archive -whose entries will be added. To add a file that starts with `@`, -prepend it with `./`. - -The following options are supported: - -- `file` Required. Write the tarball archive to the specified - filename. [Alias: `f`] -- `sync` Act synchronously. If this is set, then any provided file - will be fully written after the call to `tar.c`. -- `onwarn` A function that will get called with `(message, data)` for - any warnings encountered. -- `strict` Treat warnings as crash-worthy errors. Default false. -- `cwd` The current working directory for adding entries to the - archive. Defaults to `process.cwd()`. [Alias: `C`] -- `prefix` A path portion to prefix onto the entries in the archive. -- `gzip` Set to any truthy value to create a gzipped archive, or an - object with settings for `zlib.Gzip()` [Alias: `z`] -- `filter` A function that gets called with `(path, stat)` for each - entry being added. Return `true` to add the entry to the archive, - or `false` to omit it. -- `portable` Omit metadata that is system-specific: `ctime`, `atime`, - `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note - that `mtime` is still included, because this is necessary other - time-based operations. -- `preservePaths` Allow absolute paths. By default, `/` is stripped - from absolute paths. [Alias: `P`] -- `maxReadSize` The maximum buffer size for `fs.read()` operations. - Defaults to 16 MB. -- `noDirRecurse` Do not recursively archive the contents of - directories. [Alias: `n`] -- `follow` Set to true to pack the targets of symbolic links. Without - this option, symbolic links are archived as such. [Alias: `L`, `h`] -- `noPax` Suppress pax extended headers. Note that this means that - long paths and linkpaths will be truncated, and large or negative - numeric values may be interpreted incorrectly. - -## Low-Level API - -### class tar.Pack - -A readable tar stream. - -Has all the standard readable stream interface stuff. `'data'` and -`'end'` events, `read()` method, `pause()` and `resume()`, etc. - -#### constructor(options) - -The following options are supported: - -- `onwarn` A function that will get called with `(message, data)` for - any warnings encountered. -- `strict` Treat warnings as crash-worthy errors. Default false. -- `cwd` The current working directory for creating the archive. - Defaults to `process.cwd()`. -- `prefix` A path portion to prefix onto the entries in the archive. -- `gzip` Set to any truthy value to create a gzipped archive, or an - object with settings for `zlib.Gzip()` -- `filter` A function that gets called with `(path, stat)` for each - entry being added. Return `true` to add the entry to the archive, - or `false` to omit it. -- `portable` Omit metadata that is system-specific: `ctime`, `atime`, - `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note - that `mtime` is still included, because this is necessary other - time-based operations. -- `preservePaths` Allow absolute paths. By default, `/` is stripped - from absolute paths. -- `linkCache` A Map object containing the device and inode value for - any file whose nlink is > 1, to identify hard links. -- `statCache` A Map object that caches calls `lstat`. -- `readdirCache` A Map object that caches calls to `readdir`. -- `jobs` A number specifying how many concurrent jobs to run. - Defaults to 4. -- `maxReadSize` The maximum buffer size for `fs.read()` operations. - Defaults to 16 MB. -- `noDirRecurse` Do not recursively archive the contents of - directories. -- `follow` Set to true to pack the targets of symbolic links. Without - this option, symbolic links are archived as such. -- `noPax` Suppress pax extended headers. Note that this means that - long paths and linkpaths will be truncated, and large or negative - numeric values may be interpreted incorrectly. - -#### add(path) - -Adds an entry to the archive. Returns the Pack stream. - -#### write(path) - -Adds an entry to the archive. Returns true if flushed. - -#### end() - -Finishes the archive. - -### class tar.Pack.Sync - -Synchronous version of `tar.Pack`. - -### class tar.Unpack - -A writable stream that unpacks a tar archive onto the file system. - -All the normal writable stream stuff is supported. `write()` and -`end()` methods, `'drain'` events, etc. - -Note that all directories that are created will be forced to be -writable, readable, and listable by their owner, to avoid cases where -a directory prevents extraction of child entries by virtue of its -mode. - -`'close'` is emitted when it's done writing stuff to the file system. - -Most unpack errors will cause a `warn` event to be emitted. If the -`cwd` is missing, or not a directory, then an error will be emitted. - -#### constructor(options) - -- `cwd` Extract files relative to the specified directory. Defaults - to `process.cwd()`. If provided, this must exist and must be a - directory. -- `filter` A function that gets called with `(path, entry)` for each - entry being unpacked. Return `true` to unpack the entry from the - archive, or `false` to skip it. -- `newer` Set to true to keep the existing file on disk if it's newer - than the file in the archive. -- `keep` Do not overwrite existing files. In particular, if a file - appears more than once in an archive, later copies will not - overwrite earlier copies. -- `preservePaths` Allow absolute paths, paths containing `..`, and - extracting through symbolic links. By default, `/` is stripped from - absolute paths, `..` paths are not extracted, and any file whose - location would be modified by a symbolic link is not extracted. -- `unlink` Unlink files before creating them. Without this option, - tar overwrites existing files, which preserves existing hardlinks. - With this option, existing hardlinks will be broken, as will any - symlink that would affect the location of an extracted file. -- `strip` Remove the specified number of leading path elements. - Pathnames with fewer elements will be silently skipped. Note that - the pathname is edited after applying the filter, but before - security checks. -- `onwarn` A function that will get called with `(message, data)` for - any warnings encountered. -- `umask` Filter the modes of entries like `process.umask()`. -- `dmode` Default mode for directories -- `fmode` Default mode for files -- `dirCache` A Map object of which directories exist. -- `maxMetaEntrySize` The maximum size of meta entries that is - supported. Defaults to 1 MB. -- `preserveOwner` If true, tar will set the `uid` and `gid` of - extracted entries to the `uid` and `gid` fields in the archive. - This defaults to true when run as root, and false otherwise. If - false, then files and directories will be set with the owner and - group of the user running the process. This is similar to `-p` in - `tar(1)`, but ACLs and other system-specific data is never unpacked - in this implementation, and modes are set by default already. -- `win32` True if on a windows platform. Causes behavior where - filenames containing `<|>?` chars are converted to - windows-compatible values while being unpacked. -- `uid` Set to a number to force ownership of all extracted files and - folders, and all implicitly created directories, to be owned by the - specified user id, regardless of the `uid` field in the archive. - Cannot be used along with `preserveOwner`. Requires also setting a - `gid` option. -- `gid` Set to a number to force ownership of all extracted files and - folders, and all implicitly created directories, to be owned by the - specified group id, regardless of the `gid` field in the archive. - Cannot be used along with `preserveOwner`. Requires also setting a - `uid` option. - -### class tar.Unpack.Sync - -Synchronous version of `tar.Unpack`. - -### class tar.Parse - -A writable stream that parses a tar archive stream. All the standard -writable stream stuff is supported. - -If the archive is gzipped, then tar will detect this and unzip it. - -Emits `'entry'` events with `tar.ReadEntry` objects, which are -themselves readable streams that you can pipe wherever. - -Each `entry` will not emit until the one before it is flushed through, -so make sure to either consume the data (with `on('data', ...)` or -`.pipe(...)`) or throw it away with `.resume()` to keep the stream -flowing. - -#### constructor(options) - -Returns an event emitter that emits `entry` events with -`tar.ReadEntry` objects. - -The following options are supported: - -- `strict` Treat warnings as crash-worthy errors. Default false. -- `filter` A function that gets called with `(path, entry)` for each - entry being listed. Return `true` to emit the entry from the - archive, or `false` to skip it. -- `onentry` A function that gets called with `(entry)` for each entry - that passes the filter. -- `onwarn` A function that will get called with `(message, data)` for - any warnings encountered. - -#### abort(message, error) - -Stop all parsing activities. This is called when there are zlib -errors. It also emits a warning with the message and error provided. - -### class tar.ReadEntry extends [MiniPass](http://npm.im/minipass) - -A representation of an entry that is being read out of a tar archive. - -It has the following fields: - -- `extended` The extended metadata object provided to the constructor. -- `globalExtended` The global extended metadata object provided to the - constructor. -- `remain` The number of bytes remaining to be written into the - stream. -- `blockRemain` The number of 512-byte blocks remaining to be written - into the stream. -- `ignore` Whether this entry should be ignored. -- `meta` True if this represents metadata about the next entry, false - if it represents a filesystem object. -- All the fields from the header, extended header, and global extended - header are added to the ReadEntry object. So it has `path`, `type`, - `size, `mode`, and so on. - -#### constructor(header, extended, globalExtended) - -Create a new ReadEntry object with the specified header, extended -header, and global extended header values. - -### class tar.WriteEntry extends [MiniPass](http://npm.im/minipass) - -A representation of an entry that is being written from the file -system into a tar archive. - -Emits data for the Header, and for the Pax Extended Header if one is -required, as well as any body data. - -Creating a WriteEntry for a directory does not also create -WriteEntry objects for all of the directory contents. - -It has the following fields: - -- `path` The path field that will be written to the archive. By - default, this is also the path from the cwd to the file system - object. -- `portable` Omit metadata that is system-specific: `ctime`, `atime`, - `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note - that `mtime` is still included, because this is necessary other - time-based operations. -- `myuid` If supported, the uid of the user running the current - process. -- `myuser` The `env.USER` string if set, or `''`. Set as the entry - `uname` field if the file's `uid` matches `this.myuid`. -- `maxReadSize` The maximum buffer size for `fs.read()` operations. - Defaults to 1 MB. -- `linkCache` A Map object containing the device and inode value for - any file whose nlink is > 1, to identify hard links. -- `statCache` A Map object that caches calls `lstat`. -- `preservePaths` Allow absolute paths. By default, `/` is stripped - from absolute paths. -- `cwd` The current working directory for creating the archive. - Defaults to `process.cwd()`. -- `absolute` The absolute path to the entry on the filesystem. By - default, this is `path.resolve(this.cwd, this.path)`, but it can be - overridden explicitly. -- `strict` Treat warnings as crash-worthy errors. Default false. -- `win32` True if on a windows platform. Causes behavior where paths - replace `\` with `/` and filenames containing the windows-compatible - forms of `<|>?:` characters are converted to actual `<|>?:` characters - in the archive. -- `noPax` Suppress pax extended headers. Note that this means that - long paths and linkpaths will be truncated, and large or negative - numeric values may be interpreted incorrectly. - -#### constructor(path, options) - -`path` is the path of the entry as it is written in the archive. - -The following options are supported: - -- `portable` Omit metadata that is system-specific: `ctime`, `atime`, - `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note - that `mtime` is still included, because this is necessary other - time-based operations. -- `maxReadSize` The maximum buffer size for `fs.read()` operations. - Defaults to 1 MB. -- `linkCache` A Map object containing the device and inode value for - any file whose nlink is > 1, to identify hard links. -- `statCache` A Map object that caches calls `lstat`. -- `preservePaths` Allow absolute paths. By default, `/` is stripped - from absolute paths. -- `cwd` The current working directory for creating the archive. - Defaults to `process.cwd()`. -- `absolute` The absolute path to the entry on the filesystem. By - default, this is `path.resolve(this.cwd, this.path)`, but it can be - overridden explicitly. -- `strict` Treat warnings as crash-worthy errors. Default false. -- `win32` True if on a windows platform. Causes behavior where paths - replace `\` with `/`. -- `onwarn` A function that will get called with `(message, data)` for - any warnings encountered. - -#### warn(message, data) - -If strict, emit an error with the provided message. - -Othewise, emit a `'warn'` event with the provided message and data. - -### class tar.WriteEntry.Sync - -Synchronous version of tar.WriteEntry - -### class tar.WriteEntry.Tar - -A version of tar.WriteEntry that gets its data from a tar.ReadEntry -instead of from the filesystem. - -#### constructor(readEntry, options) - -`readEntry` is the entry being read out of another archive. - -The following options are supported: - -- `portable` Omit metadata that is system-specific: `ctime`, `atime`, - `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note - that `mtime` is still included, because this is necessary other - time-based operations. -- `preservePaths` Allow absolute paths. By default, `/` is stripped - from absolute paths. -- `strict` Treat warnings as crash-worthy errors. Default false. -- `onwarn` A function that will get called with `(message, data)` for - any warnings encountered. - -### class tar.Header - -A class for reading and writing header blocks. - -It has the following fields: - -- `nullBlock` True if decoding a block which is entirely composed of - `0x00` null bytes. (Useful because tar files are terminated by - at least 2 null blocks.) -- `cksumValid` True if the checksum in the header is valid, false - otherwise. -- `needPax` True if the values, as encoded, will require a Pax - extended header. -- `path` The path of the entry. -- `mode` The 4 lowest-order octal digits of the file mode. That is, - read/write/execute permissions for world, group, and owner, and the - setuid, setgid, and sticky bits. -- `uid` Numeric user id of the file owner -- `gid` Numeric group id of the file owner -- `size` Size of the file in bytes -- `mtime` Modified time of the file -- `cksum` The checksum of the header. This is generated by adding all - the bytes of the header block, treating the checksum field itself as - all ascii space characters (that is, `0x20`). -- `type` The human-readable name of the type of entry this represents, - or the alphanumeric key if unknown. -- `typeKey` The alphanumeric key for the type of entry this header - represents. -- `linkpath` The target of Link and SymbolicLink entries. -- `uname` Human-readable user name of the file owner -- `gname` Human-readable group name of the file owner -- `devmaj` The major portion of the device number. Always `0` for - files, directories, and links. -- `devmin` The minor portion of the device number. Always `0` for - files, directories, and links. -- `atime` File access time. -- `ctime` File change time. - -#### constructor(data, [offset=0]) - -`data` is optional. It is either a Buffer that should be interpreted -as a tar Header starting at the specified offset and continuing for -512 bytes, or a data object of keys and values to set on the header -object, and eventually encode as a tar Header. - -#### decode(block, offset) - -Decode the provided buffer starting at the specified offset. - -Buffer length must be greater than 512 bytes. - -#### set(data) - -Set the fields in the data object. - -#### encode(buffer, offset) - -Encode the header fields into the buffer at the specified offset. - -Returns `this.needPax` to indicate whether a Pax Extended Header is -required to properly encode the specified data. - -### class tar.Pax - -An object representing a set of key-value pairs in an Pax extended -header entry. - -It has the following fields. Where the same name is used, they have -the same semantics as the tar.Header field of the same name. - -- `global` True if this represents a global extended header, or false - if it is for a single entry. -- `atime` -- `charset` -- `comment` -- `ctime` -- `gid` -- `gname` -- `linkpath` -- `mtime` -- `path` -- `size` -- `uid` -- `uname` -- `dev` -- `ino` -- `nlink` - -#### constructor(object, global) - -Set the fields set in the object. `global` is a boolean that defaults -to false. - -#### encode() - -Return a Buffer containing the header and body for the Pax extended -header entry, or `null` if there is nothing to encode. - -#### encodeBody() - -Return a string representing the body of the pax extended header -entry. - -#### encodeField(fieldName) - -Return a string representing the key/value encoding for the specified -fieldName, or `''` if the field is unset. - -### tar.Pax.parse(string, extended, global) - -Return a new Pax object created by parsing the contents of the string -provided. - -If the `extended` object is set, then also add the fields from that -object. (This is necessary because multiple metadata entries can -occur in sequence.) - -### tar.types - -A translation table for the `type` field in tar headers. - -#### tar.types.name.get(code) - -Get the human-readable name for a given alphanumeric code. - -#### tar.types.code.get(name) - -Get the alphanumeric code for a given human-readable name. diff --git a/node_modules/pacote/node_modules/tar/lib/extract.js b/node_modules/pacote/node_modules/tar/lib/extract.js deleted file mode 100644 index 53ecf67894c..00000000000 --- a/node_modules/pacote/node_modules/tar/lib/extract.js +++ /dev/null @@ -1,127 +0,0 @@ -'use strict' - -// tar -x -const hlo = require('./high-level-opt.js') -const Unpack = require('./unpack.js') -const fs = require('fs') -const path = require('path') - -const x = module.exports = (opt_, files, cb) => { - if (typeof opt_ === 'function') - cb = opt_, files = null, opt_ = {} - else if (Array.isArray(opt_)) - files = opt_, opt_ = {} - - if (typeof files === 'function') - cb = files, files = null - - if (!files) - files = [] - else - files = Array.from(files) - - const opt = hlo(opt_) - - if (opt.sync && typeof cb === 'function') - throw new TypeError('callback not supported for sync tar functions') - - if (!opt.file && typeof cb === 'function') - throw new TypeError('callback only supported with file option') - - if (files.length) - filesFilter(opt, files) - - return opt.file && opt.sync ? extractFileSync(opt) - : opt.file ? extractFile(opt, cb) - : opt.sync ? extractSync(opt) - : extract(opt) -} - -// construct a filter that limits the file entries listed -// include child entries if a dir is included -const filesFilter = (opt, files) => { - const map = new Map(files.map(f => [f.replace(/\/+$/, ''), true])) - const filter = opt.filter - - const mapHas = (file, r) => { - const root = r || path.parse(file).root || '.' - const ret = file === root ? false - : map.has(file) ? map.get(file) - : mapHas(path.dirname(file), root) - - map.set(file, ret) - return ret - } - - opt.filter = filter - ? (file, entry) => filter(file, entry) && mapHas(file.replace(/\/+$/, '')) - : file => mapHas(file.replace(/\/+$/, '')) -} - -const extractFileSync = opt => { - const u = new Unpack.Sync(opt) - - const file = opt.file - let threw = true - let fd - try { - const stat = fs.statSync(file) - const readSize = opt.maxReadSize || 16*1024*1024 - if (stat.size < readSize) - u.end(fs.readFileSync(file)) - else { - let pos = 0 - const buf = Buffer.allocUnsafe(readSize) - fd = fs.openSync(file, 'r') - while (pos < stat.size) { - let bytesRead = fs.readSync(fd, buf, 0, readSize, pos) - pos += bytesRead - u.write(buf.slice(0, bytesRead)) - } - u.end() - fs.closeSync(fd) - } - threw = false - } finally { - if (threw && fd) - try { fs.closeSync(fd) } catch (er) {} - } -} - -const extractFile = (opt, cb) => { - const u = new Unpack(opt) - const readSize = opt.maxReadSize || 16*1024*1024 - - const file = opt.file - const p = new Promise((resolve, reject) => { - u.on('error', reject) - u.on('close', resolve) - - fs.stat(file, (er, stat) => { - if (er) - reject(er) - else if (stat.size < readSize) - fs.readFile(file, (er, data) => { - if (er) - return reject(er) - u.end(data) - }) - else { - const stream = fs.createReadStream(file, { - highWaterMark: readSize - }) - stream.on('error', reject) - stream.pipe(u) - } - }) - }) - return cb ? p.then(cb, cb) : p -} - -const extractSync = opt => { - return new Unpack.Sync(opt) -} - -const extract = opt => { - return new Unpack(opt) -} diff --git a/node_modules/pacote/node_modules/tar/lib/header.js b/node_modules/pacote/node_modules/tar/lib/header.js deleted file mode 100644 index db002e8c188..00000000000 --- a/node_modules/pacote/node_modules/tar/lib/header.js +++ /dev/null @@ -1,272 +0,0 @@ -'use strict' -// parse a 512-byte header block to a data object, or vice-versa -// encode returns `true` if a pax extended header is needed, because -// the data could not be faithfully encoded in a simple header. -// (Also, check header.needPax to see if it needs a pax header.) - -const types = require('./types.js') -const pathModule = require('path') -const large = require('./large-numbers.js') - -const TYPE = Symbol('type') - -class Header { - constructor (data, off) { - this.cksumValid = false - this.needPax = false - this.nullBlock = false - - this.block = null - this.path = null - this.mode = null - this.uid = null - this.gid = null - this.size = null - this.mtime = null - this.cksum = null - this[TYPE] = '0' - this.linkpath = null - this.uname = null - this.gname = null - this.devmaj = 0 - this.devmin = 0 - this.atime = null - this.ctime = null - - if (Buffer.isBuffer(data)) { - this.decode(data, off || 0) - } else if (data) - this.set(data) - } - - decode (buf, off) { - if (!off) - off = 0 - - if (!buf || !(buf.length >= off + 512)) - throw new Error('need 512 bytes for header') - - this.path = decString(buf, off, 100) - this.mode = decNumber(buf, off + 100, 8) - this.uid = decNumber(buf, off + 108, 8) - this.gid = decNumber(buf, off + 116, 8) - this.size = decNumber(buf, off + 124, 12) - this.mtime = decDate(buf, off + 136, 12) - this.cksum = decNumber(buf, off + 148, 12) - - // old tar versions marked dirs as a file with a trailing / - this[TYPE] = decString(buf, off + 156, 1) - if (this[TYPE] === '') - this[TYPE] = '0' - if (this[TYPE] === '0' && this.path.substr(-1) === '/') - this[TYPE] = '5' - - // tar implementations sometimes incorrectly put the stat(dir).size - // as the size in the tarball, even though Directory entries are - // not able to have any body at all. In the very rare chance that - // it actually DOES have a body, we weren't going to do anything with - // it anyway, and it'll just be a warning about an invalid header. - if (this[TYPE] === '5') - this.size = 0 - - this.linkpath = decString(buf, off + 157, 100) - if (buf.slice(off + 257, off + 265).toString() === 'ustar\u000000') { - this.uname = decString(buf, off + 265, 32) - this.gname = decString(buf, off + 297, 32) - this.devmaj = decNumber(buf, off + 329, 8) - this.devmin = decNumber(buf, off + 337, 8) - if (buf[off + 475] !== 0) { - // definitely a prefix, definitely >130 chars. - const prefix = decString(buf, off + 345, 155) - this.path = prefix + '/' + this.path - } else { - const prefix = decString(buf, off + 345, 130) - if (prefix) - this.path = prefix + '/' + this.path - this.atime = decDate(buf, off + 476, 12) - this.ctime = decDate(buf, off + 488, 12) - } - } - - let sum = 8 * 0x20 - for (let i = off; i < off + 148; i++) { - sum += buf[i] - } - for (let i = off + 156; i < off + 512; i++) { - sum += buf[i] - } - this.cksumValid = sum === this.cksum - if (this.cksum === null && sum === 8 * 0x20) - this.nullBlock = true - } - - encode (buf, off) { - if (!buf) { - buf = this.block = Buffer.alloc(512) - off = 0 - } - - if (!off) - off = 0 - - if (!(buf.length >= off + 512)) - throw new Error('need 512 bytes for header') - - const prefixSize = this.ctime || this.atime ? 130 : 155 - const split = splitPrefix(this.path || '', prefixSize) - const path = split[0] - const prefix = split[1] - this.needPax = split[2] - - this.needPax = encString(buf, off, 100, path) || this.needPax - this.needPax = encNumber(buf, off + 100, 8, this.mode) || this.needPax - this.needPax = encNumber(buf, off + 108, 8, this.uid) || this.needPax - this.needPax = encNumber(buf, off + 116, 8, this.gid) || this.needPax - this.needPax = encNumber(buf, off + 124, 12, this.size) || this.needPax - this.needPax = encDate(buf, off + 136, 12, this.mtime) || this.needPax - buf[off + 156] = this[TYPE].charCodeAt(0) - this.needPax = encString(buf, off + 157, 100, this.linkpath) || this.needPax - buf.write('ustar\u000000', off + 257, 8) - this.needPax = encString(buf, off + 265, 32, this.uname) || this.needPax - this.needPax = encString(buf, off + 297, 32, this.gname) || this.needPax - this.needPax = encNumber(buf, off + 329, 8, this.devmaj) || this.needPax - this.needPax = encNumber(buf, off + 337, 8, this.devmin) || this.needPax - this.needPax = encString(buf, off + 345, prefixSize, prefix) || this.needPax - if (buf[off + 475] !== 0) - this.needPax = encString(buf, off + 345, 155, prefix) || this.needPax - else { - this.needPax = encString(buf, off + 345, 130, prefix) || this.needPax - this.needPax = encDate(buf, off + 476, 12, this.atime) || this.needPax - this.needPax = encDate(buf, off + 488, 12, this.ctime) || this.needPax - } - - let sum = 8 * 0x20 - for (let i = off; i < off + 148; i++) { - sum += buf[i] - } - for (let i = off + 156; i < off + 512; i++) { - sum += buf[i] - } - this.cksum = sum - encNumber(buf, off + 148, 8, this.cksum) - this.cksumValid = true - - return this.needPax - } - - set (data) { - for (let i in data) { - if (data[i] !== null && data[i] !== undefined) - this[i] = data[i] - } - } - - get type () { - return types.name.get(this[TYPE]) || this[TYPE] - } - - get typeKey () { - return this[TYPE] - } - - set type (type) { - if (types.code.has(type)) - this[TYPE] = types.code.get(type) - else - this[TYPE] = type - } -} - -const splitPrefix = (p, prefixSize) => { - const pathSize = 100 - let pp = p - let prefix = '' - let ret - const root = pathModule.parse(p).root || '.' - - if (Buffer.byteLength(pp) < pathSize) - ret = [pp, prefix, false] - else { - // first set prefix to the dir, and path to the base - prefix = pathModule.dirname(pp) - pp = pathModule.basename(pp) - - do { - // both fit! - if (Buffer.byteLength(pp) <= pathSize && - Buffer.byteLength(prefix) <= prefixSize) - ret = [pp, prefix, false] - - // prefix fits in prefix, but path doesn't fit in path - else if (Buffer.byteLength(pp) > pathSize && - Buffer.byteLength(prefix) <= prefixSize) - ret = [pp.substr(0, pathSize - 1), prefix, true] - - else { - // make path take a bit from prefix - pp = pathModule.join(pathModule.basename(prefix), pp) - prefix = pathModule.dirname(prefix) - } - } while (prefix !== root && !ret) - - // at this point, found no resolution, just truncate - if (!ret) - ret = [p.substr(0, pathSize - 1), '', true] - } - return ret -} - -const decString = (buf, off, size) => - buf.slice(off, off + size).toString('utf8').replace(/\0.*/, '') - -const decDate = (buf, off, size) => - numToDate(decNumber(buf, off, size)) - -const numToDate = num => num === null ? null : new Date(num * 1000) - -const decNumber = (buf, off, size) => - buf[off] & 0x80 ? large.parse(buf.slice(off, off + size)) - : decSmallNumber(buf, off, size) - -const nanNull = value => isNaN(value) ? null : value - -const decSmallNumber = (buf, off, size) => - nanNull(parseInt( - buf.slice(off, off + size) - .toString('utf8').replace(/\0.*$/, '').trim(), 8)) - -// the maximum encodable as a null-terminated octal, by field size -const MAXNUM = { - 12: 0o77777777777, - 8 : 0o7777777 -} - -const encNumber = (buf, off, size, number) => - number === null ? false : - number > MAXNUM[size] || number < 0 - ? (large.encode(number, buf.slice(off, off + size)), true) - : (encSmallNumber(buf, off, size, number), false) - -const encSmallNumber = (buf, off, size, number) => - buf.write(octalString(number, size), off, size, 'ascii') - -const octalString = (number, size) => - padOctal(Math.floor(number).toString(8), size) - -const padOctal = (string, size) => - (string.length === size - 1 ? string - : new Array(size - string.length - 1).join('0') + string + ' ') + '\0' - -const encDate = (buf, off, size, date) => - date === null ? false : - encNumber(buf, off, size, date.getTime() / 1000) - -// enough to fill the longest string we've got -const NULLS = new Array(156).join('\0') -// pad with nulls, return true if it's longer or non-ascii -const encString = (buf, off, size, string) => - string === null ? false : - (buf.write(string + NULLS, off, size, 'utf8'), - string.length !== Buffer.byteLength(string) || string.length > size) - -module.exports = Header diff --git a/node_modules/pacote/node_modules/tar/lib/pack.js b/node_modules/pacote/node_modules/tar/lib/pack.js deleted file mode 100644 index 09b6ac590b7..00000000000 --- a/node_modules/pacote/node_modules/tar/lib/pack.js +++ /dev/null @@ -1,399 +0,0 @@ -'use strict' - -// A readable tar stream creator -// Technically, this is a transform stream that you write paths into, -// and tar format comes out of. -// The `add()` method is like `write()` but returns this, -// and end() return `this` as well, so you can -// do `new Pack(opt).add('files').add('dir').end().pipe(output) -// You could also do something like: -// streamOfPaths().pipe(new Pack()).pipe(new fs.WriteStream('out.tar')) - -class PackJob { - constructor (path, absolute) { - this.path = path || './' - this.absolute = absolute - this.entry = null - this.stat = null - this.readdir = null - this.pending = false - this.ignore = false - this.piped = false - } -} - -const MiniPass = require('minipass') -const zlib = require('minizlib') -const ReadEntry = require('./read-entry.js') -const WriteEntry = require('./write-entry.js') -const WriteEntrySync = WriteEntry.Sync -const WriteEntryTar = WriteEntry.Tar -const Yallist = require('yallist') -const EOF = Buffer.alloc(1024) -const ONSTAT = Symbol('onStat') -const ENDED = Symbol('ended') -const QUEUE = Symbol('queue') -const CURRENT = Symbol('current') -const PROCESS = Symbol('process') -const PROCESSING = Symbol('processing') -const PROCESSJOB = Symbol('processJob') -const JOBS = Symbol('jobs') -const JOBDONE = Symbol('jobDone') -const ADDFSENTRY = Symbol('addFSEntry') -const ADDTARENTRY = Symbol('addTarEntry') -const STAT = Symbol('stat') -const READDIR = Symbol('readdir') -const ONREADDIR = Symbol('onreaddir') -const PIPE = Symbol('pipe') -const ENTRY = Symbol('entry') -const ENTRYOPT = Symbol('entryOpt') -const WRITEENTRYCLASS = Symbol('writeEntryClass') -const WRITE = Symbol('write') -const ONDRAIN = Symbol('ondrain') - -const fs = require('fs') -const path = require('path') -const warner = require('./warn-mixin.js') - -const Pack = warner(class Pack extends MiniPass { - constructor (opt) { - super(opt) - opt = opt || Object.create(null) - this.opt = opt - this.cwd = opt.cwd || process.cwd() - this.maxReadSize = opt.maxReadSize - this.preservePaths = !!opt.preservePaths - this.strict = !!opt.strict - this.noPax = !!opt.noPax - this.prefix = (opt.prefix || '').replace(/(\\|\/)+$/, '') - this.linkCache = opt.linkCache || new Map() - this.statCache = opt.statCache || new Map() - this.readdirCache = opt.readdirCache || new Map() - this[WRITEENTRYCLASS] = WriteEntry - if (typeof opt.onwarn === 'function') - this.on('warn', opt.onwarn) - - this.zip = null - if (opt.gzip) { - if (typeof opt.gzip !== 'object') - opt.gzip = {} - this.zip = new zlib.Gzip(opt.gzip) - this.zip.on('data', chunk => super.write(chunk)) - this.zip.on('end', _ => super.end()) - this.zip.on('drain', _ => this[ONDRAIN]()) - this.on('resume', _ => this.zip.resume()) - } else - this.on('drain', this[ONDRAIN]) - - this.portable = !!opt.portable - this.noDirRecurse = !!opt.noDirRecurse - this.follow = !!opt.follow - - this.filter = typeof opt.filter === 'function' ? opt.filter : _ => true - - this[QUEUE] = new Yallist - this[JOBS] = 0 - this.jobs = +opt.jobs || 4 - this[PROCESSING] = false - this[ENDED] = false - } - - [WRITE] (chunk) { - return super.write(chunk) - } - - add (path) { - this.write(path) - return this - } - - end (path) { - if (path) - this.write(path) - this[ENDED] = true - this[PROCESS]() - return this - } - - write (path) { - if (this[ENDED]) - throw new Error('write after end') - - if (path instanceof ReadEntry) - this[ADDTARENTRY](path) - else - this[ADDFSENTRY](path) - return this.flowing - } - - [ADDTARENTRY] (p) { - const absolute = path.resolve(this.cwd, p.path) - if (this.prefix) - p.path = this.prefix + '/' + p.path.replace(/^\.(\/+|$)/, '') - - // in this case, we don't have to wait for the stat - if (!this.filter(p.path, p)) - p.resume() - else { - const job = new PackJob(p.path, absolute, false) - job.entry = new WriteEntryTar(p, this[ENTRYOPT](job)) - job.entry.on('end', _ => this[JOBDONE](job)) - this[JOBS] += 1 - this[QUEUE].push(job) - } - - this[PROCESS]() - } - - [ADDFSENTRY] (p) { - const absolute = path.resolve(this.cwd, p) - if (this.prefix) - p = this.prefix + '/' + p.replace(/^\.(\/+|$)/, '') - - this[QUEUE].push(new PackJob(p, absolute)) - this[PROCESS]() - } - - [STAT] (job) { - job.pending = true - this[JOBS] += 1 - const stat = this.follow ? 'stat' : 'lstat' - fs[stat](job.absolute, (er, stat) => { - job.pending = false - this[JOBS] -= 1 - if (er) - this.emit('error', er) - else - this[ONSTAT](job, stat) - }) - } - - [ONSTAT] (job, stat) { - this.statCache.set(job.absolute, stat) - job.stat = stat - - // now we have the stat, we can filter it. - if (!this.filter(job.path, stat)) - job.ignore = true - - this[PROCESS]() - } - - [READDIR] (job) { - job.pending = true - this[JOBS] += 1 - fs.readdir(job.absolute, (er, entries) => { - job.pending = false - this[JOBS] -= 1 - if (er) - return this.emit('error', er) - this[ONREADDIR](job, entries) - }) - } - - [ONREADDIR] (job, entries) { - this.readdirCache.set(job.absolute, entries) - job.readdir = entries - this[PROCESS]() - } - - [PROCESS] () { - if (this[PROCESSING]) - return - - this[PROCESSING] = true - for (let w = this[QUEUE].head; - w !== null && this[JOBS] < this.jobs; - w = w.next) { - this[PROCESSJOB](w.value) - if (w.value.ignore) { - const p = w.next - this[QUEUE].removeNode(w) - w.next = p - } - } - - this[PROCESSING] = false - - if (this[ENDED] && !this[QUEUE].length && this[JOBS] === 0) { - if (this.zip) - this.zip.end(EOF) - else { - super.write(EOF) - super.end() - } - } - } - - get [CURRENT] () { - return this[QUEUE] && this[QUEUE].head && this[QUEUE].head.value - } - - [JOBDONE] (job) { - this[QUEUE].shift() - this[JOBS] -= 1 - this[PROCESS]() - } - - [PROCESSJOB] (job) { - if (job.pending) - return - - if (job.entry) { - if (job === this[CURRENT] && !job.piped) - this[PIPE](job) - return - } - - if (!job.stat) { - if (this.statCache.has(job.absolute)) - this[ONSTAT](job, this.statCache.get(job.absolute)) - else - this[STAT](job) - } - if (!job.stat) - return - - // filtered out! - if (job.ignore) - return - - if (!this.noDirRecurse && job.stat.isDirectory() && !job.readdir) { - if (this.readdirCache.has(job.absolute)) - this[ONREADDIR](job, this.readdirCache.get(job.absolute)) - else - this[READDIR](job) - if (!job.readdir) - return - } - - // we know it doesn't have an entry, because that got checked above - job.entry = this[ENTRY](job) - if (!job.entry) { - job.ignore = true - return - } - - if (job === this[CURRENT] && !job.piped) - this[PIPE](job) - } - - [ENTRYOPT] (job) { - return { - onwarn: (msg, data) => { - this.warn(msg, data) - }, - noPax: this.noPax, - cwd: this.cwd, - absolute: job.absolute, - preservePaths: this.preservePaths, - maxReadSize: this.maxReadSize, - strict: this.strict, - portable: this.portable, - linkCache: this.linkCache, - statCache: this.statCache - } - } - - [ENTRY] (job) { - this[JOBS] += 1 - try { - return new this[WRITEENTRYCLASS]( - job.path, this[ENTRYOPT](job)).on('end', _ => { - this[JOBDONE](job) - }).on('error', er => this.emit('error', er)) - } catch (er) { - this.emit('error', er) - } - } - - [ONDRAIN] () { - if (this[CURRENT] && this[CURRENT].entry) - this[CURRENT].entry.resume() - } - - // like .pipe() but using super, because our write() is special - [PIPE] (job) { - job.piped = true - - if (job.readdir) - job.readdir.forEach(entry => { - const p = this.prefix ? - job.path.slice(this.prefix.length + 1) || './' - : job.path - - const base = p === './' ? '' : p.replace(/\/*$/, '/') - this[ADDFSENTRY](base + entry) - }) - - const source = job.entry - const zip = this.zip - - if (zip) - source.on('data', chunk => { - if (!zip.write(chunk)) - source.pause() - }) - else - source.on('data', chunk => { - if (!super.write(chunk)) - source.pause() - }) - } - - pause () { - if (this.zip) - this.zip.pause() - return super.pause() - } -}) - -class PackSync extends Pack { - constructor (opt) { - super(opt) - this[WRITEENTRYCLASS] = WriteEntrySync - } - - // pause/resume are no-ops in sync streams. - pause () {} - resume () {} - - [STAT] (job) { - const stat = this.follow ? 'statSync' : 'lstatSync' - this[ONSTAT](job, fs[stat](job.absolute)) - } - - [READDIR] (job, stat) { - this[ONREADDIR](job, fs.readdirSync(job.absolute)) - } - - // gotta get it all in this tick - [PIPE] (job) { - const source = job.entry - const zip = this.zip - - if (job.readdir) - job.readdir.forEach(entry => { - const p = this.prefix ? - job.path.slice(this.prefix.length + 1) || './' - : job.path - - - const base = p === './' ? '' : p.replace(/\/*$/, '/') - this[ADDFSENTRY](base + entry) - }) - - if (zip) - source.on('data', chunk => { - zip.write(chunk) - }) - else - source.on('data', chunk => { - super[WRITE](chunk) - }) - } -} - -Pack.Sync = PackSync - -module.exports = Pack diff --git a/node_modules/pacote/node_modules/tar/lib/parse.js b/node_modules/pacote/node_modules/tar/lib/parse.js deleted file mode 100644 index 63c7ee9cefd..00000000000 --- a/node_modules/pacote/node_modules/tar/lib/parse.js +++ /dev/null @@ -1,415 +0,0 @@ -'use strict' - -// this[BUFFER] is the remainder of a chunk if we're waiting for -// the full 512 bytes of a header to come in. We will Buffer.concat() -// it to the next write(), which is a mem copy, but a small one. -// -// this[QUEUE] is a Yallist of entries that haven't been emitted -// yet this can only get filled up if the user keeps write()ing after -// a write() returns false, or does a write() with more than one entry -// -// We don't buffer chunks, we always parse them and either create an -// entry, or push it into the active entry. The ReadEntry class knows -// to throw data away if .ignore=true -// -// Shift entry off the buffer when it emits 'end', and emit 'entry' for -// the next one in the list. -// -// At any time, we're pushing body chunks into the entry at WRITEENTRY, -// and waiting for 'end' on the entry at READENTRY -// -// ignored entries get .resume() called on them straight away - -const warner = require('./warn-mixin.js') -const path = require('path') -const Header = require('./header.js') -const EE = require('events') -const Yallist = require('yallist') -const maxMetaEntrySize = 1024 * 1024 -const Entry = require('./read-entry.js') -const Pax = require('./pax.js') -const zlib = require('minizlib') - -const gzipHeader = new Buffer([0x1f, 0x8b]) -const STATE = Symbol('state') -const WRITEENTRY = Symbol('writeEntry') -const READENTRY = Symbol('readEntry') -const NEXTENTRY = Symbol('nextEntry') -const PROCESSENTRY = Symbol('processEntry') -const EX = Symbol('extendedHeader') -const GEX = Symbol('globalExtendedHeader') -const META = Symbol('meta') -const EMITMETA = Symbol('emitMeta') -const BUFFER = Symbol('buffer') -const QUEUE = Symbol('queue') -const ENDED = Symbol('ended') -const EMITTEDEND = Symbol('emittedEnd') -const EMIT = Symbol('emit') -const UNZIP = Symbol('unzip') -const CONSUMECHUNK = Symbol('consumeChunk') -const CONSUMECHUNKSUB = Symbol('consumeChunkSub') -const CONSUMEBODY = Symbol('consumeBody') -const CONSUMEMETA = Symbol('consumeMeta') -const CONSUMEHEADER = Symbol('consumeHeader') -const CONSUMING = Symbol('consuming') -const BUFFERCONCAT = Symbol('bufferConcat') -const MAYBEEND = Symbol('maybeEnd') -const WRITING = Symbol('writing') -const ABORTED = Symbol('aborted') -const DONE = Symbol('onDone') - -const noop = _ => true - -module.exports = warner(class Parser extends EE { - constructor (opt) { - opt = opt || {} - super(opt) - - if (opt.ondone) - this.on(DONE, opt.ondone) - else - this.on(DONE, _ => { - this.emit('prefinish') - this.emit('finish') - this.emit('end') - this.emit('close') - }) - - this.strict = !!opt.strict - this.maxMetaEntrySize = opt.maxMetaEntrySize || maxMetaEntrySize - this.filter = typeof opt.filter === 'function' ? opt.filter : noop - - // have to set this so that streams are ok piping into it - this.writable = true - this.readable = false - - this[QUEUE] = new Yallist() - this[BUFFER] = null - this[READENTRY] = null - this[WRITEENTRY] = null - this[STATE] = 'begin' - this[META] = '' - this[EX] = null - this[GEX] = null - this[ENDED] = false - this[UNZIP] = null - this[ABORTED] = false - if (typeof opt.onwarn === 'function') - this.on('warn', opt.onwarn) - if (typeof opt.onentry === 'function') - this.on('entry', opt.onentry) - } - - [CONSUMEHEADER] (chunk, position) { - const header = new Header(chunk, position) - - if (header.nullBlock) - this[EMIT]('nullBlock') - else if (!header.cksumValid) - this.warn('invalid entry', header) - else if (!header.path) - this.warn('invalid: path is required', header) - else { - const type = header.type - if (/^(Symbolic)?Link$/.test(type) && !header.linkpath) - this.warn('invalid: linkpath required', header) - else if (!/^(Symbolic)?Link$/.test(type) && header.linkpath) - this.warn('invalid: linkpath forbidden', header) - else { - const entry = this[WRITEENTRY] = new Entry(header, this[EX], this[GEX]) - - if (entry.meta) { - if (entry.size > this.maxMetaEntrySize) { - entry.ignore = true - this[EMIT]('ignoredEntry', entry) - this[STATE] = 'ignore' - } else if (entry.size > 0) { - this[META] = '' - entry.on('data', c => this[META] += c) - this[STATE] = 'meta' - } - } else { - - this[EX] = null - entry.ignore = entry.ignore || !this.filter(entry.path, entry) - if (entry.ignore) { - this[EMIT]('ignoredEntry', entry) - this[STATE] = entry.remain ? 'ignore' : 'begin' - } else { - if (entry.remain) - this[STATE] = 'body' - else { - this[STATE] = 'begin' - entry.end() - } - - if (!this[READENTRY]) { - this[QUEUE].push(entry) - this[NEXTENTRY]() - } else - this[QUEUE].push(entry) - } - } - } - } - } - - [PROCESSENTRY] (entry) { - let go = true - - if (!entry) { - this[READENTRY] = null - go = false - } else if (Array.isArray(entry)) - this.emit.apply(this, entry) - else { - this[READENTRY] = entry - this.emit('entry', entry) - if (!entry.emittedEnd) { - entry.on('end', _ => this[NEXTENTRY]()) - go = false - } - } - - return go - } - - [NEXTENTRY] () { - do {} while (this[PROCESSENTRY](this[QUEUE].shift())) - - if (!this[QUEUE].length) { - // At this point, there's nothing in the queue, but we may have an - // entry which is being consumed (readEntry). - // If we don't, then we definitely can handle more data. - // If we do, and either it's flowing, or it has never had any data - // written to it, then it needs more. - // The only other possibility is that it has returned false from a - // write() call, so we wait for the next drain to continue. - const re = this[READENTRY] - const drainNow = !re || re.flowing || re.size === re.remain - if (drainNow) { - if (!this[WRITING]) - this.emit('drain') - } else - re.once('drain', _ => this.emit('drain')) - } - } - - [CONSUMEBODY] (chunk, position) { - // write up to but no more than writeEntry.blockRemain - const entry = this[WRITEENTRY] - const br = entry.blockRemain - const c = (br >= chunk.length && position === 0) ? chunk - : chunk.slice(position, position + br) - - entry.write(c) - - if (!entry.blockRemain) { - this[STATE] = 'begin' - this[WRITEENTRY] = null - entry.end() - } - - return c.length - } - - [CONSUMEMETA] (chunk, position) { - const entry = this[WRITEENTRY] - const ret = this[CONSUMEBODY](chunk, position) - - // if we finished, then the entry is reset - if (!this[WRITEENTRY]) - this[EMITMETA](entry) - - return ret - } - - [EMIT] (ev, data, extra) { - if (!this[QUEUE].length && !this[READENTRY]) - this.emit(ev, data, extra) - else - this[QUEUE].push([ev, data, extra]) - } - - [EMITMETA] (entry) { - this[EMIT]('meta', this[META]) - switch (entry.type) { - case 'ExtendedHeader': - case 'OldExtendedHeader': - this[EX] = Pax.parse(this[META], this[EX], false) - break - - case 'GlobalExtendedHeader': - this[GEX] = Pax.parse(this[META], this[GEX], true) - break - - case 'NextFileHasLongPath': - case 'OldGnuLongPath': - this[EX] = this[EX] || Object.create(null) - this[EX].path = this[META].replace(/\0.*/, '') - break - - case 'NextFileHasLongLinkpath': - this[EX] = this[EX] || Object.create(null) - this[EX].linkpath = this[META].replace(/\0.*/, '') - break - - /* istanbul ignore next */ - default: throw new Error('unknown meta: ' + entry.type) - } - } - - abort (msg, error) { - this[ABORTED] = true - this.warn(msg, error) - this.emit('abort') - } - - write (chunk) { - if (this[ABORTED]) - return - - // first write, might be gzipped - if (this[UNZIP] === null && chunk) { - if (this[BUFFER]) { - chunk = Buffer.concat([this[BUFFER], chunk]) - this[BUFFER] = null - } - if (chunk.length < gzipHeader.length) { - this[BUFFER] = chunk - return true - } - for (let i = 0; this[UNZIP] === null && i < gzipHeader.length; i++) { - if (chunk[i] !== gzipHeader[i]) - this[UNZIP] = false - } - if (this[UNZIP] === null) { - const ended = this[ENDED] - this[ENDED] = false - this[UNZIP] = new zlib.Unzip() - this[UNZIP].on('data', chunk => this[CONSUMECHUNK](chunk)) - this[UNZIP].on('error', er => - this.abort('zlib error: ' + er.message, er)) - this[UNZIP].on('end', _ => { - this[ENDED] = true - this[CONSUMECHUNK]() - }) - return ended ? this[UNZIP].end(chunk) : this[UNZIP].write(chunk) - } - } - - this[WRITING] = true - if (this[UNZIP]) - this[UNZIP].write(chunk) - else - this[CONSUMECHUNK](chunk) - this[WRITING] = false - - // return false if there's a queue, or if the current entry isn't flowing - const ret = - this[QUEUE].length ? false : - this[READENTRY] ? this[READENTRY].flowing : - true - - // if we have no queue, then that means a clogged READENTRY - if (!ret && !this[QUEUE].length) - this[READENTRY].once('drain', _ => this.emit('drain')) - - return ret - } - - [BUFFERCONCAT] (c) { - if (c && !this[ABORTED]) - this[BUFFER] = this[BUFFER] ? Buffer.concat([this[BUFFER], c]) : c - } - - [MAYBEEND] () { - if (this[ENDED] && !this[EMITTEDEND] && !this[ABORTED]) { - this[EMITTEDEND] = true - const entry = this[WRITEENTRY] - if (entry && entry.blockRemain) { - const have = this[BUFFER] ? this[BUFFER].length : 0 - this.warn('Truncated input (needed ' + entry.blockRemain + - ' more bytes, only ' + have + ' available)', entry) - if (this[BUFFER]) - entry.write(this[BUFFER]) - entry.end() - } - this[EMIT](DONE) - } - } - - [CONSUMECHUNK] (chunk) { - if (this[CONSUMING]) { - this[BUFFERCONCAT](chunk) - } else if (!chunk && !this[BUFFER]) { - this[MAYBEEND]() - } else { - this[CONSUMING] = true - if (this[BUFFER]) { - this[BUFFERCONCAT](chunk) - const c = this[BUFFER] - this[BUFFER] = null - this[CONSUMECHUNKSUB](c) - } else { - this[CONSUMECHUNKSUB](chunk) - } - - while (this[BUFFER] && this[BUFFER].length >= 512 && !this[ABORTED]) { - const c = this[BUFFER] - this[BUFFER] = null - this[CONSUMECHUNKSUB](c) - } - this[CONSUMING] = false - } - - if (!this[BUFFER] || this[ENDED]) - this[MAYBEEND]() - } - - [CONSUMECHUNKSUB] (chunk) { - // we know that we are in CONSUMING mode, so anything written goes into - // the buffer. Advance the position and put any remainder in the buffer. - let position = 0 - let length = chunk.length - while (position + 512 <= length && !this[ABORTED]) { - switch (this[STATE]) { - case 'begin': - this[CONSUMEHEADER](chunk, position) - position += 512 - break - - case 'ignore': - case 'body': - position += this[CONSUMEBODY](chunk, position) - break - - case 'meta': - position += this[CONSUMEMETA](chunk, position) - break - - /* istanbul ignore next */ - default: - throw new Error('invalid state: ' + this[STATE]) - } - } - - if (position < length) { - if (this[BUFFER]) - this[BUFFER] = Buffer.concat([chunk.slice(position), this[BUFFER]]) - else - this[BUFFER] = chunk.slice(position) - } - } - - end (chunk) { - if (!this[ABORTED]) { - if (this[UNZIP]) - this[UNZIP].end(chunk) - else { - this[ENDED] = true - this.write(chunk) - } - } - } -}) diff --git a/node_modules/pacote/node_modules/tar/package.json b/node_modules/pacote/node_modules/tar/package.json deleted file mode 100644 index bb5dc29526d..00000000000 --- a/node_modules/pacote/node_modules/tar/package.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "_from": "tar@^4.0.0", - "_id": "tar@4.0.1", - "_inBundle": false, - "_integrity": "sha512-XBpU+/azPOMvE5m2Tn7Sl6U1ahpGfe77LkdrAlFilwrgHZsR+2iy0l8klQtfJNM+DACZO2Xrw10MTyQRB4du5A==", - "_location": "/pacote/tar", - "_phantomChildren": {}, - "_requested": { - "type": "range", - "registry": true, - "raw": "tar@^4.0.0", - "name": "tar", - "escapedName": "tar", - "rawSpec": "^4.0.0", - "saveSpec": null, - "fetchSpec": "^4.0.0" - }, - "_requiredBy": [ - "/pacote" - ], - "_resolved": "https://registry.npmjs.org/tar/-/tar-4.0.1.tgz", - "_shasum": "3f5b2e5289db30c2abe4c960f43d0d9fff96aaf0", - "_spec": "tar@^4.0.0", - "_where": "/Users/rebecca/code/npm/node_modules/pacote", - "author": { - "name": "Isaac Z. Schlueter", - "email": "i@izs.me", - "url": "http://blog.izs.me/" - }, - "bugs": { - "url": "https://github.com/npm/node-tar/issues" - }, - "bundleDependencies": false, - "dependencies": { - "chownr": "^1.0.1", - "minipass": "^2.0.2", - "minizlib": "^1.0.3", - "mkdirp": "^0.5.0", - "yallist": "^3.0.2" - }, - "deprecated": false, - "description": "tar for node", - "devDependencies": { - "chmodr": "^1.0.2", - "end-of-stream": "^1.4.0", - "events-to-array": "^1.1.2", - "mutate-fs": "^1.1.0", - "rimraf": "1.x", - "tap": "^10.3.3", - "tar-fs": "^1.15.2", - "tar-stream": "^1.5.2" - }, - "engines": { - "node": ">=4.5" - }, - "files": [ - "index.js", - "lib/" - ], - "homepage": "https://github.com/npm/node-tar#readme", - "license": "ISC", - "name": "tar", - "repository": { - "type": "git", - "url": "git+https://github.com/npm/node-tar.git" - }, - "scripts": { - "bench": "for i in benchmarks/*/*.js; do echo $i; for j in {1..5}; do node $i || break; done; done", - "genparse": "node scripts/generate-parse-fixtures.js", - "postpublish": "git push origin --all; git push origin --tags", - "postversion": "npm publish", - "preversion": "npm test", - "test": "tap test/*.js --100 -J --coverage-report=text" - }, - "version": "4.0.1" -} diff --git a/node_modules/tar/LICENSE b/node_modules/tar/LICENSE index 019b7e40ea0..19129e315fe 100644 --- a/node_modules/tar/LICENSE +++ b/node_modules/tar/LICENSE @@ -1,8 +1,11 @@ The ISC License + Copyright (c) Isaac Z. Schlueter and Contributors + Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR diff --git a/node_modules/tar/README.md b/node_modules/tar/README.md index cfda2ac1806..a356a78da20 100644 --- a/node_modules/tar/README.md +++ b/node_modules/tar/README.md @@ -1,50 +1,883 @@ # node-tar -Tar for Node.js. +[![Build Status](https://travis-ci.org/npm/node-tar.svg?branch=master)](https://travis-ci.org/npm/node-tar) -[![NPM](https://nodei.co/npm/tar.png)](https://nodei.co/npm/tar/) +[Fast](./benchmarks) and full-featured Tar for Node.js -## API +The API is designed to mimic the behavior of `tar(1)` on unix systems. +If you are familiar with how tar works, most of this will hopefully be +straightforward for you. If not, then hopefully this module can teach +you useful unix skills that may come in handy someday :) -See `examples/` for usage examples. +## Background -### var tar = require('tar') +A "tar file" or "tarball" is an archive of file system entries +(directories, files, links, etc.) The name comes from "tape archive". +If you run `man tar` on almost any Unix command line, you'll learn +quite a bit about what it can do, and its history. -Returns an object with `.Pack`, `.Extract` and `.Parse` methods. +Tar has 5 main top-level commands: -### tar.Pack([properties]) +* `c` Create an archive +* `r` Replace entries within an archive +* `u` Update entries within an archive (ie, replace if they're newer) +* `t` List out the contents of an archive +* `x` Extract an archive to disk -Returns a through stream. Use -[fstream](https://npmjs.org/package/fstream) to write files into the -pack stream and you will receive tar archive data from the pack -stream. +The other flags and options modify how this top level function works. -This only works with directories, it does not work with individual files. +## High-Level API -The optional `properties` object are used to set properties in the tar -'Global Extended Header'. If the `fromBase` property is set to true, -the tar will contain files relative to the path passed, and not with -the path included. +These 5 functions are the high-level API. All of them have a +single-character name (for unix nerds familiar with `tar(1)`) as well +as a long name (for everyone else). -### tar.Extract([options]) +All the high-level functions take the following arguments, all three +of which are optional and may be omitted. -Returns a through stream. Write tar data to the stream and the files -in the tarball will be extracted onto the filesystem. +1. `options` - An optional object specifying various options +2. `paths` - An array of paths to add or extract +3. `callback` - Called when the command is completed, if async. (If + sync or no file specified, providing a callback throws a + `TypeError`.) -`options` can be: +If the command is sync (ie, if `options.sync=true`), then the +callback is not allowed, since the action will be completed immediately. + +If a `file` argument is specified, and the command is async, then a +`Promise` is returned. In this case, if async, a callback may be +provided which is called when the command is completed. + +If a `file` option is not specified, then a stream is returned. For +`create`, this is a readable stream of the generated archive. For +`list` and `extract` this is a writable stream that an archive should +be written into. If a file is not specified, then a callback is not +allowed, because you're already getting a stream to work with. + +`replace` and `update` only work on existing archives, and so require +a `file` argument. + +Sync commands without a file argument return a stream that acts on its +input immediately in the same tick. For readable streams, this means +that all of the data is immediately available by calling +`stream.read()`. For writable streams, it will be acted upon as soon +as it is provided, but this can be at any time. + +### Warnings + +Some things cause tar to emit a warning, but should usually not cause +the entire operation to fail. There are three ways to handle +warnings: + +1. **Ignore them** (default) Invalid entries won't be put in the + archive, and invalid entries won't be unpacked. This is usually + fine, but can hide failures that you might care about. +2. **Notice them** Add an `onwarn` function to the options, or listen + to the `'warn'` event on any tar stream. The function will get + called as `onwarn(message, data)`. Handle as appropriate. +3. **Explode them.** Set `strict: true` in the options object, and + `warn` messages will be emitted as `'error'` events instead. If + there's no `error` handler, this causes the program to crash. If + used with a promise-returning/callback-taking method, then it'll + send the error to the promise/callback. + +### Examples + +The API mimics the `tar(1)` command line functionality, with aliases +for more human-readable option and function names. The goal is that +if you know how to use `tar(1)` in Unix, then you know how to use +`require('tar')` in JavaScript. + +To replicate `tar czf my-tarball.tgz files and folders`, you'd do: + +```js +tar.c( + { + gzip: , + file: 'my-tarball.tgz' + }, + ['some', 'files', 'and', 'folders'] +).then(_ => { .. tarball has been created .. }) +``` + +To replicate `tar cz files and folders > my-tarball.tgz`, you'd do: + +```js +tar.c( // or tar.create + { + gzip: + }, + ['some', 'files', 'and', 'folders'] +).pipe(fs.createWriteStream('my-tarball.tgz') +``` + +To replicate `tar xf my-tarball.tgz` you'd do: + +```js +tar.x( // or tar.extract( + { + file: 'my-tarball.tgz' + } +).then(_=> { .. tarball has been dumped in cwd .. }) +``` + +To replicate `cat my-tarball.tgz | tar x -C some-dir --strip=1`: ```js -{ - path: '/path/to/extract/tar/into', - strip: 0, // how many path segments to strip from the root when extracting -} +fs.createReadStream('my-tarball.tgz').pipe( + tar.x({ + strip: 1, + C: 'some-dir' // alias for cwd:'some-dir', also ok + }) +) ``` -`options` also get passed to the `fstream.Writer` instance that `tar` -uses internally. +To replicate `tar tf my-tarball.tgz`, do this: + +```js +tar.t({ + file: 'my-tarball.tgz', + onentry: entry => { .. do whatever with it .. } +}) +``` + +To replicate `cat my-tarball.tgz | tar t` do: + +```js +fs.createReadStream('my-tarball.tgz') + .pipe(tar.t()) + .on('entry', entry => { .. do whatever with it .. }) +``` + +To do anything synchronous, add `sync: true` to the options. Note +that sync functions don't take a callback and don't return a promise. +When the function returns, it's already done. Sync methods without a +file argument return a sync stream, which flushes immediately. But, +of course, it still won't be done until you `.end()` it. + +To filter entries, add `filter: ` to the options. +Tar-creating methods call the filter with `filter(path, stat)`. +Tar-reading methods (including extraction) call the filter with +`filter(path, entry)`. The filter is called in the `this`-context of +the `Pack` or `Unpack` stream object. + +The arguments list to `tar t` and `tar x` specify a list of filenames +to extract or list, so they're equivalent to a filter that tests if +the file is in the list. + +For those who _aren't_ fans of tar's single-character command names: + +``` +tar.c === tar.create +tar.r === tar.replace (appends to archive, file is required) +tar.u === tar.update (appends if newer, file is required) +tar.x === tar.extract +tar.t === tar.list +``` + +Keep reading for all the command descriptions and options, as well as +the low-level API that they are built on. + +### tar.c(options, fileList, callback) [alias: tar.create] + +Create a tarball archive. + +The `fileList` is an array of paths to add to the tarball. Adding a +directory also adds its children recursively. + +An entry in `fileList` that starts with an `@` symbol is a tar archive +whose entries will be added. To add a file that starts with `@`, +prepend it with `./`. + +The following options are supported: + +- `file` Write the tarball archive to the specified filename. If this + is specified, then the callback will be fired when the file has been + written, and a promise will be returned that resolves when the file + is written. If a filename is not specified, then a Readable Stream + will be returned which will emit the file data. [Alias: `f`] +- `sync` Act synchronously. If this is set, then any provided file + will be fully written after the call to `tar.c`. If this is set, + and a file is not provided, then the resulting stream will already + have the data ready to `read` or `emit('data')` as soon as you + request it. +- `onwarn` A function that will get called with `(message, data)` for + any warnings encountered. +- `strict` Treat warnings as crash-worthy errors. Default false. +- `cwd` The current working directory for creating the archive. + Defaults to `process.cwd()`. [Alias: `C`] +- `prefix` A path portion to prefix onto the entries in the archive. +- `gzip` Set to any truthy value to create a gzipped archive, or an + object with settings for `zlib.Gzip()` [Alias: `z`] +- `filter` A function that gets called with `(path, stat)` for each + entry being added. Return `true` to add the entry to the archive, + or `false` to omit it. +- `portable` Omit metadata that is system-specific: `ctime`, `atime`, + `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note + that `mtime` is still included, because this is necessary other + time-based operations. +- `preservePaths` Allow absolute paths. By default, `/` is stripped + from absolute paths. [Alias: `P`] +- `mode` The mode to set on the created file archive +- `noDirRecurse` Do not recursively archive the contents of + directories. [Alias: `n`] +- `follow` Set to true to pack the targets of symbolic links. Without + this option, symbolic links are archived as such. [Alias: `L`, `h`] +- `noPax` Suppress pax extended headers. Note that this means that + long paths and linkpaths will be truncated, and large or negative + numeric values may be interpreted incorrectly. + +The following options are mostly internal, but can be modified in some +advanced use cases, such as re-using caches between runs. + +- `linkCache` A Map object containing the device and inode value for + any file whose nlink is > 1, to identify hard links. +- `statCache` A Map object that caches calls `lstat`. +- `readdirCache` A Map object that caches calls to `readdir`. +- `jobs` A number specifying how many concurrent jobs to run. + Defaults to 4. +- `maxReadSize` The maximum buffer size for `fs.read()` operations. + Defaults to 16 MB. + +### tar.x(options, fileList, callback) [alias: tar.extract] + +Extract a tarball archive. + +The `fileList` is an array of paths to extract from the tarball. If +no paths are provided, then all the entries are extracted. + +If the archive is gzipped, then tar will detect this and unzip it. + +Note that all directories that are created will be forced to be +writable, readable, and listable by their owner, to avoid cases where +a directory prevents extraction of child entries by virtue of its +mode. + +Most extraction errors will cause a `warn` event to be emitted. If +the `cwd` is missing, or not a directory, then the extraction will +fail completely. + +The following options are supported: + +- `cwd` Extract files relative to the specified directory. Defaults + to `process.cwd()`. If provided, this must exist and must be a + directory. [Alias: `C`] +- `file` The archive file to extract. If not specified, then a + Writable stream is returned where the archive data should be + written. [Alias: `f`] +- `sync` Create files and directories synchronously. +- `strict` Treat warnings as crash-worthy errors. Default false. +- `filter` A function that gets called with `(path, entry)` for each + entry being unpacked. Return `true` to unpack the entry from the + archive, or `false` to skip it. +- `newer` Set to true to keep the existing file on disk if it's newer + than the file in the archive. [Alias: `keep-newer`, + `keep-newer-files`] +- `keep` Do not overwrite existing files. In particular, if a file + appears more than once in an archive, later copies will not + overwrite earlier copies. [Alias: `k`, `keep-existing`] +- `preservePaths` Allow absolute paths, paths containing `..`, and + extracting through symbolic links. By default, `/` is stripped from + absolute paths, `..` paths are not extracted, and any file whose + location would be modified by a symbolic link is not extracted. + [Alias: `P`] +- `unlink` Unlink files before creating them. Without this option, + tar overwrites existing files, which preserves existing hardlinks. + With this option, existing hardlinks will be broken, as will any + symlink that would affect the location of an extracted file. [Alias: + `U`] +- `strip` Remove the specified number of leading path elements. + Pathnames with fewer elements will be silently skipped. Note that + the pathname is edited after applying the filter, but before + security checks. [Alias: `strip-components`, `stripComponents`] +- `onwarn` A function that will get called with `(message, data)` for + any warnings encountered. +- `preserveOwner` If true, tar will set the `uid` and `gid` of + extracted entries to the `uid` and `gid` fields in the archive. + This defaults to true when run as root, and false otherwise. If + false, then files and directories will be set with the owner and + group of the user running the process. This is similar to `-p` in + `tar(1)`, but ACLs and other system-specific data is never unpacked + in this implementation, and modes are set by default already. + [Alias: `p`] +- `uid` Set to a number to force ownership of all extracted files and + folders, and all implicitly created directories, to be owned by the + specified user id, regardless of the `uid` field in the archive. + Cannot be used along with `preserveOwner`. Requires also setting a + `gid` option. +- `gid` Set to a number to force ownership of all extracted files and + folders, and all implicitly created directories, to be owned by the + specified group id, regardless of the `gid` field in the archive. + Cannot be used along with `preserveOwner`. Requires also setting a + `uid` option. + +The following options are mostly internal, but can be modified in some +advanced use cases, such as re-using caches between runs. + +- `maxReadSize` The maximum buffer size for `fs.read()` operations. + Defaults to 16 MB. +- `umask` Filter the modes of entries like `process.umask()`. +- `dmode` Default mode for directories +- `fmode` Default mode for files +- `dirCache` A Map object of which directories exist. +- `maxMetaEntrySize` The maximum size of meta entries that is + supported. Defaults to 1 MB. + +### tar.t(options, fileList, callback) [alias: tar.list] + +List the contents of a tarball archive. + +The `fileList` is an array of paths to list from the tarball. If +no paths are provided, then all the entries are listed. + +If the archive is gzipped, then tar will detect this and unzip it. + +Returns an event emitter that emits `entry` events with +`tar.ReadEntry` objects. However, they don't emit `'data'` or `'end'` +events. (If you want to get actual readable entries, use the +`tar.Parse` class instead.) + +The following options are supported: + +- `cwd` Extract files relative to the specified directory. Defaults + to `process.cwd()`. [Alias: `C`] +- `file` The archive file to list. If not specified, then a + Writable stream is returned where the archive data should be + written. [Alias: `f`] +- `sync` Read the specified file synchronously. (This has no effect + when a file option isn't specified, because entries are emitted as + fast as they are parsed from the stream anyway.) +- `strict` Treat warnings as crash-worthy errors. Default false. +- `filter` A function that gets called with `(path, entry)` for each + entry being listed. Return `true` to emit the entry from the + archive, or `false` to skip it. +- `onentry` A function that gets called with `(entry)` for each entry + that passes the filter. This is important for when both `file` and + `sync` are set, because it will be called synchronously. +- `maxReadSize` The maximum buffer size for `fs.read()` operations. + Defaults to 16 MB. +- `noResume` By default, `entry` streams are resumed immediately after + the call to `onentry`. Set `noResume: true` to suppress this + behavior. Note that by opting into this, the stream will never + complete until the entry data is consumed. + +### tar.u(options, fileList, callback) [alias: tar.update] + +Add files to an archive if they are newer than the entry already in +the tarball archive. + +The `fileList` is an array of paths to add to the tarball. Adding a +directory also adds its children recursively. + +An entry in `fileList` that starts with an `@` symbol is a tar archive +whose entries will be added. To add a file that starts with `@`, +prepend it with `./`. + +The following options are supported: + +- `file` Required. Write the tarball archive to the specified + filename. [Alias: `f`] +- `sync` Act synchronously. If this is set, then any provided file + will be fully written after the call to `tar.c`. +- `onwarn` A function that will get called with `(message, data)` for + any warnings encountered. +- `strict` Treat warnings as crash-worthy errors. Default false. +- `cwd` The current working directory for adding entries to the + archive. Defaults to `process.cwd()`. [Alias: `C`] +- `prefix` A path portion to prefix onto the entries in the archive. +- `gzip` Set to any truthy value to create a gzipped archive, or an + object with settings for `zlib.Gzip()` [Alias: `z`] +- `filter` A function that gets called with `(path, stat)` for each + entry being added. Return `true` to add the entry to the archive, + or `false` to omit it. +- `portable` Omit metadata that is system-specific: `ctime`, `atime`, + `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note + that `mtime` is still included, because this is necessary other + time-based operations. +- `preservePaths` Allow absolute paths. By default, `/` is stripped + from absolute paths. [Alias: `P`] +- `maxReadSize` The maximum buffer size for `fs.read()` operations. + Defaults to 16 MB. +- `noDirRecurse` Do not recursively archive the contents of + directories. [Alias: `n`] +- `follow` Set to true to pack the targets of symbolic links. Without + this option, symbolic links are archived as such. [Alias: `L`, `h`] +- `noPax` Suppress pax extended headers. Note that this means that + long paths and linkpaths will be truncated, and large or negative + numeric values may be interpreted incorrectly. + +### tar.r(options, fileList, callback) [alias: tar.replace] + +Add files to an existing archive. Because later entries override +earlier entries, this effectively replaces any existing entries. + +The `fileList` is an array of paths to add to the tarball. Adding a +directory also adds its children recursively. + +An entry in `fileList` that starts with an `@` symbol is a tar archive +whose entries will be added. To add a file that starts with `@`, +prepend it with `./`. + +The following options are supported: + +- `file` Required. Write the tarball archive to the specified + filename. [Alias: `f`] +- `sync` Act synchronously. If this is set, then any provided file + will be fully written after the call to `tar.c`. +- `onwarn` A function that will get called with `(message, data)` for + any warnings encountered. +- `strict` Treat warnings as crash-worthy errors. Default false. +- `cwd` The current working directory for adding entries to the + archive. Defaults to `process.cwd()`. [Alias: `C`] +- `prefix` A path portion to prefix onto the entries in the archive. +- `gzip` Set to any truthy value to create a gzipped archive, or an + object with settings for `zlib.Gzip()` [Alias: `z`] +- `filter` A function that gets called with `(path, stat)` for each + entry being added. Return `true` to add the entry to the archive, + or `false` to omit it. +- `portable` Omit metadata that is system-specific: `ctime`, `atime`, + `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note + that `mtime` is still included, because this is necessary other + time-based operations. +- `preservePaths` Allow absolute paths. By default, `/` is stripped + from absolute paths. [Alias: `P`] +- `maxReadSize` The maximum buffer size for `fs.read()` operations. + Defaults to 16 MB. +- `noDirRecurse` Do not recursively archive the contents of + directories. [Alias: `n`] +- `follow` Set to true to pack the targets of symbolic links. Without + this option, symbolic links are archived as such. [Alias: `L`, `h`] +- `noPax` Suppress pax extended headers. Note that this means that + long paths and linkpaths will be truncated, and large or negative + numeric values may be interpreted incorrectly. + +## Low-Level API + +### class tar.Pack + +A readable tar stream. + +Has all the standard readable stream interface stuff. `'data'` and +`'end'` events, `read()` method, `pause()` and `resume()`, etc. + +#### constructor(options) + +The following options are supported: + +- `onwarn` A function that will get called with `(message, data)` for + any warnings encountered. +- `strict` Treat warnings as crash-worthy errors. Default false. +- `cwd` The current working directory for creating the archive. + Defaults to `process.cwd()`. +- `prefix` A path portion to prefix onto the entries in the archive. +- `gzip` Set to any truthy value to create a gzipped archive, or an + object with settings for `zlib.Gzip()` +- `filter` A function that gets called with `(path, stat)` for each + entry being added. Return `true` to add the entry to the archive, + or `false` to omit it. +- `portable` Omit metadata that is system-specific: `ctime`, `atime`, + `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note + that `mtime` is still included, because this is necessary other + time-based operations. +- `preservePaths` Allow absolute paths. By default, `/` is stripped + from absolute paths. +- `linkCache` A Map object containing the device and inode value for + any file whose nlink is > 1, to identify hard links. +- `statCache` A Map object that caches calls `lstat`. +- `readdirCache` A Map object that caches calls to `readdir`. +- `jobs` A number specifying how many concurrent jobs to run. + Defaults to 4. +- `maxReadSize` The maximum buffer size for `fs.read()` operations. + Defaults to 16 MB. +- `noDirRecurse` Do not recursively archive the contents of + directories. +- `follow` Set to true to pack the targets of symbolic links. Without + this option, symbolic links are archived as such. +- `noPax` Suppress pax extended headers. Note that this means that + long paths and linkpaths will be truncated, and large or negative + numeric values may be interpreted incorrectly. + +#### add(path) + +Adds an entry to the archive. Returns the Pack stream. + +#### write(path) + +Adds an entry to the archive. Returns true if flushed. + +#### end() + +Finishes the archive. + +### class tar.Pack.Sync + +Synchronous version of `tar.Pack`. + +### class tar.Unpack + +A writable stream that unpacks a tar archive onto the file system. + +All the normal writable stream stuff is supported. `write()` and +`end()` methods, `'drain'` events, etc. + +Note that all directories that are created will be forced to be +writable, readable, and listable by their owner, to avoid cases where +a directory prevents extraction of child entries by virtue of its +mode. + +`'close'` is emitted when it's done writing stuff to the file system. + +Most unpack errors will cause a `warn` event to be emitted. If the +`cwd` is missing, or not a directory, then an error will be emitted. + +#### constructor(options) + +- `cwd` Extract files relative to the specified directory. Defaults + to `process.cwd()`. If provided, this must exist and must be a + directory. +- `filter` A function that gets called with `(path, entry)` for each + entry being unpacked. Return `true` to unpack the entry from the + archive, or `false` to skip it. +- `newer` Set to true to keep the existing file on disk if it's newer + than the file in the archive. +- `keep` Do not overwrite existing files. In particular, if a file + appears more than once in an archive, later copies will not + overwrite earlier copies. +- `preservePaths` Allow absolute paths, paths containing `..`, and + extracting through symbolic links. By default, `/` is stripped from + absolute paths, `..` paths are not extracted, and any file whose + location would be modified by a symbolic link is not extracted. +- `unlink` Unlink files before creating them. Without this option, + tar overwrites existing files, which preserves existing hardlinks. + With this option, existing hardlinks will be broken, as will any + symlink that would affect the location of an extracted file. +- `strip` Remove the specified number of leading path elements. + Pathnames with fewer elements will be silently skipped. Note that + the pathname is edited after applying the filter, but before + security checks. +- `onwarn` A function that will get called with `(message, data)` for + any warnings encountered. +- `umask` Filter the modes of entries like `process.umask()`. +- `dmode` Default mode for directories +- `fmode` Default mode for files +- `dirCache` A Map object of which directories exist. +- `maxMetaEntrySize` The maximum size of meta entries that is + supported. Defaults to 1 MB. +- `preserveOwner` If true, tar will set the `uid` and `gid` of + extracted entries to the `uid` and `gid` fields in the archive. + This defaults to true when run as root, and false otherwise. If + false, then files and directories will be set with the owner and + group of the user running the process. This is similar to `-p` in + `tar(1)`, but ACLs and other system-specific data is never unpacked + in this implementation, and modes are set by default already. +- `win32` True if on a windows platform. Causes behavior where + filenames containing `<|>?` chars are converted to + windows-compatible values while being unpacked. +- `uid` Set to a number to force ownership of all extracted files and + folders, and all implicitly created directories, to be owned by the + specified user id, regardless of the `uid` field in the archive. + Cannot be used along with `preserveOwner`. Requires also setting a + `gid` option. +- `gid` Set to a number to force ownership of all extracted files and + folders, and all implicitly created directories, to be owned by the + specified group id, regardless of the `gid` field in the archive. + Cannot be used along with `preserveOwner`. Requires also setting a + `uid` option. + +### class tar.Unpack.Sync + +Synchronous version of `tar.Unpack`. + +### class tar.Parse + +A writable stream that parses a tar archive stream. All the standard +writable stream stuff is supported. + +If the archive is gzipped, then tar will detect this and unzip it. + +Emits `'entry'` events with `tar.ReadEntry` objects, which are +themselves readable streams that you can pipe wherever. + +Each `entry` will not emit until the one before it is flushed through, +so make sure to either consume the data (with `on('data', ...)` or +`.pipe(...)`) or throw it away with `.resume()` to keep the stream +flowing. + +#### constructor(options) + +Returns an event emitter that emits `entry` events with +`tar.ReadEntry` objects. + +The following options are supported: + +- `strict` Treat warnings as crash-worthy errors. Default false. +- `filter` A function that gets called with `(path, entry)` for each + entry being listed. Return `true` to emit the entry from the + archive, or `false` to skip it. +- `onentry` A function that gets called with `(entry)` for each entry + that passes the filter. +- `onwarn` A function that will get called with `(message, data)` for + any warnings encountered. + +#### abort(message, error) + +Stop all parsing activities. This is called when there are zlib +errors. It also emits a warning with the message and error provided. + +### class tar.ReadEntry extends [MiniPass](http://npm.im/minipass) + +A representation of an entry that is being read out of a tar archive. + +It has the following fields: + +- `extended` The extended metadata object provided to the constructor. +- `globalExtended` The global extended metadata object provided to the + constructor. +- `remain` The number of bytes remaining to be written into the + stream. +- `blockRemain` The number of 512-byte blocks remaining to be written + into the stream. +- `ignore` Whether this entry should be ignored. +- `meta` True if this represents metadata about the next entry, false + if it represents a filesystem object. +- All the fields from the header, extended header, and global extended + header are added to the ReadEntry object. So it has `path`, `type`, + `size, `mode`, and so on. + +#### constructor(header, extended, globalExtended) + +Create a new ReadEntry object with the specified header, extended +header, and global extended header values. + +### class tar.WriteEntry extends [MiniPass](http://npm.im/minipass) + +A representation of an entry that is being written from the file +system into a tar archive. + +Emits data for the Header, and for the Pax Extended Header if one is +required, as well as any body data. + +Creating a WriteEntry for a directory does not also create +WriteEntry objects for all of the directory contents. + +It has the following fields: + +- `path` The path field that will be written to the archive. By + default, this is also the path from the cwd to the file system + object. +- `portable` Omit metadata that is system-specific: `ctime`, `atime`, + `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note + that `mtime` is still included, because this is necessary other + time-based operations. +- `myuid` If supported, the uid of the user running the current + process. +- `myuser` The `env.USER` string if set, or `''`. Set as the entry + `uname` field if the file's `uid` matches `this.myuid`. +- `maxReadSize` The maximum buffer size for `fs.read()` operations. + Defaults to 1 MB. +- `linkCache` A Map object containing the device and inode value for + any file whose nlink is > 1, to identify hard links. +- `statCache` A Map object that caches calls `lstat`. +- `preservePaths` Allow absolute paths. By default, `/` is stripped + from absolute paths. +- `cwd` The current working directory for creating the archive. + Defaults to `process.cwd()`. +- `absolute` The absolute path to the entry on the filesystem. By + default, this is `path.resolve(this.cwd, this.path)`, but it can be + overridden explicitly. +- `strict` Treat warnings as crash-worthy errors. Default false. +- `win32` True if on a windows platform. Causes behavior where paths + replace `\` with `/` and filenames containing the windows-compatible + forms of `<|>?:` characters are converted to actual `<|>?:` characters + in the archive. +- `noPax` Suppress pax extended headers. Note that this means that + long paths and linkpaths will be truncated, and large or negative + numeric values may be interpreted incorrectly. + +#### constructor(path, options) + +`path` is the path of the entry as it is written in the archive. + +The following options are supported: + +- `portable` Omit metadata that is system-specific: `ctime`, `atime`, + `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note + that `mtime` is still included, because this is necessary other + time-based operations. +- `maxReadSize` The maximum buffer size for `fs.read()` operations. + Defaults to 1 MB. +- `linkCache` A Map object containing the device and inode value for + any file whose nlink is > 1, to identify hard links. +- `statCache` A Map object that caches calls `lstat`. +- `preservePaths` Allow absolute paths. By default, `/` is stripped + from absolute paths. +- `cwd` The current working directory for creating the archive. + Defaults to `process.cwd()`. +- `absolute` The absolute path to the entry on the filesystem. By + default, this is `path.resolve(this.cwd, this.path)`, but it can be + overridden explicitly. +- `strict` Treat warnings as crash-worthy errors. Default false. +- `win32` True if on a windows platform. Causes behavior where paths + replace `\` with `/`. +- `onwarn` A function that will get called with `(message, data)` for + any warnings encountered. + +#### warn(message, data) + +If strict, emit an error with the provided message. + +Othewise, emit a `'warn'` event with the provided message and data. + +### class tar.WriteEntry.Sync + +Synchronous version of tar.WriteEntry + +### class tar.WriteEntry.Tar + +A version of tar.WriteEntry that gets its data from a tar.ReadEntry +instead of from the filesystem. + +#### constructor(readEntry, options) + +`readEntry` is the entry being read out of another archive. + +The following options are supported: + +- `portable` Omit metadata that is system-specific: `ctime`, `atime`, + `uid`, `gid`, `uname`, `gname`, `dev`, `ino`, and `nlink`. Note + that `mtime` is still included, because this is necessary other + time-based operations. +- `preservePaths` Allow absolute paths. By default, `/` is stripped + from absolute paths. +- `strict` Treat warnings as crash-worthy errors. Default false. +- `onwarn` A function that will get called with `(message, data)` for + any warnings encountered. + +### class tar.Header + +A class for reading and writing header blocks. + +It has the following fields: + +- `nullBlock` True if decoding a block which is entirely composed of + `0x00` null bytes. (Useful because tar files are terminated by + at least 2 null blocks.) +- `cksumValid` True if the checksum in the header is valid, false + otherwise. +- `needPax` True if the values, as encoded, will require a Pax + extended header. +- `path` The path of the entry. +- `mode` The 4 lowest-order octal digits of the file mode. That is, + read/write/execute permissions for world, group, and owner, and the + setuid, setgid, and sticky bits. +- `uid` Numeric user id of the file owner +- `gid` Numeric group id of the file owner +- `size` Size of the file in bytes +- `mtime` Modified time of the file +- `cksum` The checksum of the header. This is generated by adding all + the bytes of the header block, treating the checksum field itself as + all ascii space characters (that is, `0x20`). +- `type` The human-readable name of the type of entry this represents, + or the alphanumeric key if unknown. +- `typeKey` The alphanumeric key for the type of entry this header + represents. +- `linkpath` The target of Link and SymbolicLink entries. +- `uname` Human-readable user name of the file owner +- `gname` Human-readable group name of the file owner +- `devmaj` The major portion of the device number. Always `0` for + files, directories, and links. +- `devmin` The minor portion of the device number. Always `0` for + files, directories, and links. +- `atime` File access time. +- `ctime` File change time. + +#### constructor(data, [offset=0]) + +`data` is optional. It is either a Buffer that should be interpreted +as a tar Header starting at the specified offset and continuing for +512 bytes, or a data object of keys and values to set on the header +object, and eventually encode as a tar Header. + +#### decode(block, offset) + +Decode the provided buffer starting at the specified offset. + +Buffer length must be greater than 512 bytes. + +#### set(data) + +Set the fields in the data object. + +#### encode(buffer, offset) + +Encode the header fields into the buffer at the specified offset. + +Returns `this.needPax` to indicate whether a Pax Extended Header is +required to properly encode the specified data. + +### class tar.Pax + +An object representing a set of key-value pairs in an Pax extended +header entry. + +It has the following fields. Where the same name is used, they have +the same semantics as the tar.Header field of the same name. + +- `global` True if this represents a global extended header, or false + if it is for a single entry. +- `atime` +- `charset` +- `comment` +- `ctime` +- `gid` +- `gname` +- `linkpath` +- `mtime` +- `path` +- `size` +- `uid` +- `uname` +- `dev` +- `ino` +- `nlink` + +#### constructor(object, global) + +Set the fields set in the object. `global` is a boolean that defaults +to false. + +#### encode() + +Return a Buffer containing the header and body for the Pax extended +header entry, or `null` if there is nothing to encode. + +#### encodeBody() + +Return a string representing the body of the pax extended header +entry. + +#### encodeField(fieldName) + +Return a string representing the key/value encoding for the specified +fieldName, or `''` if the field is unset. + +### tar.Pax.parse(string, extended, global) + +Return a new Pax object created by parsing the contents of the string +provided. + +If the `extended` object is set, then also add the fields from that +object. (This is necessary because multiple metadata entries can +occur in sequence.) + +### tar.types + +A translation table for the `type` field in tar headers. + +#### tar.types.name.get(code) + +Get the human-readable name for a given alphanumeric code. -### tar.Parse() +#### tar.types.code.get(name) -Returns a writable stream. Write tar data to it and it will emit -`entry` events for each entry parsed from the tarball. This is used by -`tar.Extract`. +Get the alphanumeric code for a given human-readable name. diff --git a/node_modules/pacote/node_modules/tar/index.js b/node_modules/tar/index.js similarity index 100% rename from node_modules/pacote/node_modules/tar/index.js rename to node_modules/tar/index.js diff --git a/node_modules/pacote/node_modules/tar/lib/create.js b/node_modules/tar/lib/create.js similarity index 100% rename from node_modules/pacote/node_modules/tar/lib/create.js rename to node_modules/tar/lib/create.js diff --git a/node_modules/tar/lib/extract.js b/node_modules/tar/lib/extract.js index fe1bb976eb0..53ecf67894c 100644 --- a/node_modules/tar/lib/extract.js +++ b/node_modules/tar/lib/extract.js @@ -1,94 +1,127 @@ -// give it a tarball and a path, and it'll dump the contents +'use strict' -module.exports = Extract +// tar -x +const hlo = require('./high-level-opt.js') +const Unpack = require('./unpack.js') +const fs = require('fs') +const path = require('path') -var tar = require("../tar.js") - , fstream = require("fstream") - , inherits = require("inherits") - , path = require("path") +const x = module.exports = (opt_, files, cb) => { + if (typeof opt_ === 'function') + cb = opt_, files = null, opt_ = {} + else if (Array.isArray(opt_)) + files = opt_, opt_ = {} -function Extract (opts) { - if (!(this instanceof Extract)) return new Extract(opts) - tar.Parse.apply(this) + if (typeof files === 'function') + cb = files, files = null - if (typeof opts !== "object") { - opts = { path: opts } - } + if (!files) + files = [] + else + files = Array.from(files) - // better to drop in cwd? seems more standard. - opts.path = opts.path || path.resolve("node-tar-extract") - opts.type = "Directory" - opts.Directory = true - - // similar to --strip or --strip-components - opts.strip = +opts.strip - if (!opts.strip || opts.strip <= 0) opts.strip = 0 - - this._fst = fstream.Writer(opts) - - this.pause() - var me = this - - // Hardlinks in tarballs are relative to the root - // of the tarball. So, they need to be resolved against - // the target directory in order to be created properly. - me.on("entry", function (entry) { - // if there's a "strip" argument, then strip off that many - // path components. - if (opts.strip) { - var p = entry.path.split("/").slice(opts.strip).join("/") - entry.path = entry.props.path = p - if (entry.linkpath) { - var lp = entry.linkpath.split("/").slice(opts.strip).join("/") - entry.linkpath = entry.props.linkpath = lp - } - } - if (entry.type === "Link") { - entry.linkpath = entry.props.linkpath = - path.join(opts.path, path.join("/", entry.props.linkpath)) - } + const opt = hlo(opt_) - if (entry.type === "SymbolicLink") { - var dn = path.dirname(entry.path) || "" - var linkpath = entry.props.linkpath - var target = path.resolve(opts.path, dn, linkpath) - if (target.indexOf(opts.path) !== 0) { - linkpath = path.join(opts.path, path.join("/", linkpath)) - } - entry.linkpath = entry.props.linkpath = linkpath - } - }) + if (opt.sync && typeof cb === 'function') + throw new TypeError('callback not supported for sync tar functions') - this._fst.on("ready", function () { - me.pipe(me._fst, { end: false }) - me.resume() - }) + if (!opt.file && typeof cb === 'function') + throw new TypeError('callback only supported with file option') - this._fst.on('error', function(err) { - me.emit('error', err) - }) + if (files.length) + filesFilter(opt, files) - this._fst.on('drain', function() { - me.emit('drain') - }) + return opt.file && opt.sync ? extractFileSync(opt) + : opt.file ? extractFile(opt, cb) + : opt.sync ? extractSync(opt) + : extract(opt) +} + +// construct a filter that limits the file entries listed +// include child entries if a dir is included +const filesFilter = (opt, files) => { + const map = new Map(files.map(f => [f.replace(/\/+$/, ''), true])) + const filter = opt.filter - // this._fst.on("end", function () { - // console.error("\nEEEE Extract End", me._fst.path) - // }) + const mapHas = (file, r) => { + const root = r || path.parse(file).root || '.' + const ret = file === root ? false + : map.has(file) ? map.get(file) + : mapHas(path.dirname(file), root) - this._fst.on("close", function () { - // console.error("\nEEEE Extract End", me._fst.path) - me.emit("finish") - me.emit("end") - me.emit("close") + map.set(file, ret) + return ret + } + + opt.filter = filter + ? (file, entry) => filter(file, entry) && mapHas(file.replace(/\/+$/, '')) + : file => mapHas(file.replace(/\/+$/, '')) +} + +const extractFileSync = opt => { + const u = new Unpack.Sync(opt) + + const file = opt.file + let threw = true + let fd + try { + const stat = fs.statSync(file) + const readSize = opt.maxReadSize || 16*1024*1024 + if (stat.size < readSize) + u.end(fs.readFileSync(file)) + else { + let pos = 0 + const buf = Buffer.allocUnsafe(readSize) + fd = fs.openSync(file, 'r') + while (pos < stat.size) { + let bytesRead = fs.readSync(fd, buf, 0, readSize, pos) + pos += bytesRead + u.write(buf.slice(0, bytesRead)) + } + u.end() + fs.closeSync(fd) + } + threw = false + } finally { + if (threw && fd) + try { fs.closeSync(fd) } catch (er) {} + } +} + +const extractFile = (opt, cb) => { + const u = new Unpack(opt) + const readSize = opt.maxReadSize || 16*1024*1024 + + const file = opt.file + const p = new Promise((resolve, reject) => { + u.on('error', reject) + u.on('close', resolve) + + fs.stat(file, (er, stat) => { + if (er) + reject(er) + else if (stat.size < readSize) + fs.readFile(file, (er, data) => { + if (er) + return reject(er) + u.end(data) + }) + else { + const stream = fs.createReadStream(file, { + highWaterMark: readSize + }) + stream.on('error', reject) + stream.pipe(u) + } + }) }) + return cb ? p.then(cb, cb) : p } -inherits(Extract, tar.Parse) +const extractSync = opt => { + return new Unpack.Sync(opt) +} -Extract.prototype._streamEnd = function () { - var me = this - if (!me._ended || me._entry) me.error("unexpected eof") - me._fst.end() - // my .end() is coming later. +const extract = opt => { + return new Unpack(opt) } diff --git a/node_modules/tar/lib/header.js b/node_modules/tar/lib/header.js index 05b237c0c7b..db002e8c188 100644 --- a/node_modules/tar/lib/header.js +++ b/node_modules/tar/lib/header.js @@ -1,385 +1,272 @@ +'use strict' // parse a 512-byte header block to a data object, or vice-versa -// If the data won't fit nicely in a simple header, then generate -// the appropriate extended header file, and return that. - -module.exports = TarHeader - -var tar = require("../tar.js") - , fields = tar.fields - , fieldOffs = tar.fieldOffs - , fieldEnds = tar.fieldEnds - , fieldSize = tar.fieldSize - , numeric = tar.numeric - , assert = require("assert").ok - , space = " ".charCodeAt(0) - , slash = "/".charCodeAt(0) - , bslash = process.platform === "win32" ? "\\".charCodeAt(0) : null - -function TarHeader (block) { - if (!(this instanceof TarHeader)) return new TarHeader(block) - if (block) this.decode(block) -} - -TarHeader.prototype = - { decode : decode - , encode: encode - , calcSum: calcSum - , checkSum: checkSum +// encode returns `true` if a pax extended header is needed, because +// the data could not be faithfully encoded in a simple header. +// (Also, check header.needPax to see if it needs a pax header.) + +const types = require('./types.js') +const pathModule = require('path') +const large = require('./large-numbers.js') + +const TYPE = Symbol('type') + +class Header { + constructor (data, off) { + this.cksumValid = false + this.needPax = false + this.nullBlock = false + + this.block = null + this.path = null + this.mode = null + this.uid = null + this.gid = null + this.size = null + this.mtime = null + this.cksum = null + this[TYPE] = '0' + this.linkpath = null + this.uname = null + this.gname = null + this.devmaj = 0 + this.devmin = 0 + this.atime = null + this.ctime = null + + if (Buffer.isBuffer(data)) { + this.decode(data, off || 0) + } else if (data) + this.set(data) } -TarHeader.parseNumeric = parseNumeric -TarHeader.encode = encode -TarHeader.decode = decode - -// note that this will only do the normal ustar header, not any kind -// of extended posix header file. If something doesn't fit comfortably, -// then it will set obj.needExtended = true, and set the block to -// the closest approximation. -function encode (obj) { - if (!obj && !(this instanceof TarHeader)) throw new Error( - "encode must be called on a TarHeader, or supplied an object") - - obj = obj || this - var block = obj.block = new Buffer(512) - - // if the object has a "prefix", then that's actually an extension of - // the path field. - if (obj.prefix) { - // console.error("%% header encoding, got a prefix", obj.prefix) - obj.path = obj.prefix + "/" + obj.path - // console.error("%% header encoding, prefixed path", obj.path) - obj.prefix = "" - } - - obj.needExtended = false - - if (obj.mode) { - if (typeof obj.mode === "string") obj.mode = parseInt(obj.mode, 8) - obj.mode = obj.mode & 0777 - } + decode (buf, off) { + if (!off) + off = 0 + + if (!buf || !(buf.length >= off + 512)) + throw new Error('need 512 bytes for header') + + this.path = decString(buf, off, 100) + this.mode = decNumber(buf, off + 100, 8) + this.uid = decNumber(buf, off + 108, 8) + this.gid = decNumber(buf, off + 116, 8) + this.size = decNumber(buf, off + 124, 12) + this.mtime = decDate(buf, off + 136, 12) + this.cksum = decNumber(buf, off + 148, 12) + + // old tar versions marked dirs as a file with a trailing / + this[TYPE] = decString(buf, off + 156, 1) + if (this[TYPE] === '') + this[TYPE] = '0' + if (this[TYPE] === '0' && this.path.substr(-1) === '/') + this[TYPE] = '5' + + // tar implementations sometimes incorrectly put the stat(dir).size + // as the size in the tarball, even though Directory entries are + // not able to have any body at all. In the very rare chance that + // it actually DOES have a body, we weren't going to do anything with + // it anyway, and it'll just be a warning about an invalid header. + if (this[TYPE] === '5') + this.size = 0 + + this.linkpath = decString(buf, off + 157, 100) + if (buf.slice(off + 257, off + 265).toString() === 'ustar\u000000') { + this.uname = decString(buf, off + 265, 32) + this.gname = decString(buf, off + 297, 32) + this.devmaj = decNumber(buf, off + 329, 8) + this.devmin = decNumber(buf, off + 337, 8) + if (buf[off + 475] !== 0) { + // definitely a prefix, definitely >130 chars. + const prefix = decString(buf, off + 345, 155) + this.path = prefix + '/' + this.path + } else { + const prefix = decString(buf, off + 345, 130) + if (prefix) + this.path = prefix + '/' + this.path + this.atime = decDate(buf, off + 476, 12) + this.ctime = decDate(buf, off + 488, 12) + } + } - for (var f = 0; fields[f] !== null; f ++) { - var field = fields[f] - , off = fieldOffs[f] - , end = fieldEnds[f] - , ret - - switch (field) { - case "cksum": - // special, done below, after all the others - break - - case "prefix": - // special, this is an extension of the "path" field. - // console.error("%% header encoding, skip prefix later") - break - - case "type": - // convert from long name to a single char. - var type = obj.type || "0" - if (type.length > 1) { - type = tar.types[obj.type] - if (!type) type = "0" - } - writeText(block, off, end, type) - break - - case "path": - // uses the "prefix" field if > 100 bytes, but <= 255 - var pathLen = Buffer.byteLength(obj.path) - , pathFSize = fieldSize[fields.path] - , prefFSize = fieldSize[fields.prefix] - - // paths between 100 and 255 should use the prefix field. - // longer than 255 - if (pathLen > pathFSize && - pathLen <= pathFSize + prefFSize) { - // need to find a slash somewhere in the middle so that - // path and prefix both fit in their respective fields - var searchStart = pathLen - 1 - pathFSize - , searchEnd = prefFSize - , found = false - , pathBuf = new Buffer(obj.path) - - for ( var s = searchStart - ; (s <= searchEnd) - ; s ++ ) { - if (pathBuf[s] === slash || pathBuf[s] === bslash) { - found = s - break - } - } - - if (found !== false) { - prefix = pathBuf.slice(0, found).toString("utf8") - path = pathBuf.slice(found + 1).toString("utf8") - - ret = writeText(block, off, end, path) - off = fieldOffs[fields.prefix] - end = fieldEnds[fields.prefix] - // console.error("%% header writing prefix", off, end, prefix) - ret = writeText(block, off, end, prefix) || ret - break - } - } - - // paths less than 100 chars don't need a prefix - // and paths longer than 255 need an extended header and will fail - // on old implementations no matter what we do here. - // Null out the prefix, and fallthrough to default. - // console.error("%% header writing no prefix") - var poff = fieldOffs[fields.prefix] - , pend = fieldEnds[fields.prefix] - writeText(block, poff, pend, "") - // fallthrough - - // all other fields are numeric or text - default: - ret = numeric[field] - ? writeNumeric(block, off, end, obj[field]) - : writeText(block, off, end, obj[field] || "") - break + let sum = 8 * 0x20 + for (let i = off; i < off + 148; i++) { + sum += buf[i] + } + for (let i = off + 156; i < off + 512; i++) { + sum += buf[i] } - obj.needExtended = obj.needExtended || ret + this.cksumValid = sum === this.cksum + if (this.cksum === null && sum === 8 * 0x20) + this.nullBlock = true } - var off = fieldOffs[fields.cksum] - , end = fieldEnds[fields.cksum] - - writeNumeric(block, off, end, calcSum.call(this, block)) + encode (buf, off) { + if (!buf) { + buf = this.block = Buffer.alloc(512) + off = 0 + } - return block -} + if (!off) + off = 0 + + if (!(buf.length >= off + 512)) + throw new Error('need 512 bytes for header') + + const prefixSize = this.ctime || this.atime ? 130 : 155 + const split = splitPrefix(this.path || '', prefixSize) + const path = split[0] + const prefix = split[1] + this.needPax = split[2] + + this.needPax = encString(buf, off, 100, path) || this.needPax + this.needPax = encNumber(buf, off + 100, 8, this.mode) || this.needPax + this.needPax = encNumber(buf, off + 108, 8, this.uid) || this.needPax + this.needPax = encNumber(buf, off + 116, 8, this.gid) || this.needPax + this.needPax = encNumber(buf, off + 124, 12, this.size) || this.needPax + this.needPax = encDate(buf, off + 136, 12, this.mtime) || this.needPax + buf[off + 156] = this[TYPE].charCodeAt(0) + this.needPax = encString(buf, off + 157, 100, this.linkpath) || this.needPax + buf.write('ustar\u000000', off + 257, 8) + this.needPax = encString(buf, off + 265, 32, this.uname) || this.needPax + this.needPax = encString(buf, off + 297, 32, this.gname) || this.needPax + this.needPax = encNumber(buf, off + 329, 8, this.devmaj) || this.needPax + this.needPax = encNumber(buf, off + 337, 8, this.devmin) || this.needPax + this.needPax = encString(buf, off + 345, prefixSize, prefix) || this.needPax + if (buf[off + 475] !== 0) + this.needPax = encString(buf, off + 345, 155, prefix) || this.needPax + else { + this.needPax = encString(buf, off + 345, 130, prefix) || this.needPax + this.needPax = encDate(buf, off + 476, 12, this.atime) || this.needPax + this.needPax = encDate(buf, off + 488, 12, this.ctime) || this.needPax + } -// if it's a negative number, or greater than will fit, -// then use write256. -var MAXNUM = { 12: 077777777777 - , 11: 07777777777 - , 8 : 07777777 - , 7 : 0777777 } -function writeNumeric (block, off, end, num) { - var writeLen = end - off - , maxNum = MAXNUM[writeLen] || 0 - - num = num || 0 - // console.error(" numeric", num) - - if (num instanceof Date || - Object.prototype.toString.call(num) === "[object Date]") { - num = num.getTime() / 1000 - } + let sum = 8 * 0x20 + for (let i = off; i < off + 148; i++) { + sum += buf[i] + } + for (let i = off + 156; i < off + 512; i++) { + sum += buf[i] + } + this.cksum = sum + encNumber(buf, off + 148, 8, this.cksum) + this.cksumValid = true - if (num > maxNum || num < 0) { - write256(block, off, end, num) - // need an extended header if negative or too big. - return true + return this.needPax } - // god, tar is so annoying - // if the string is small enough, you should put a space - // between the octal string and the \0, but if it doesn't - // fit, then don't. - var numStr = Math.floor(num).toString(8) - if (num < MAXNUM[writeLen - 1]) numStr += " " - - // pad with "0" chars - if (numStr.length < writeLen) { - numStr = (new Array(writeLen - numStr.length).join("0")) + numStr + set (data) { + for (let i in data) { + if (data[i] !== null && data[i] !== undefined) + this[i] = data[i] + } } - if (numStr.length !== writeLen - 1) { - throw new Error("invalid length: " + JSON.stringify(numStr) + "\n" + - "expected: "+writeLen) + get type () { + return types.name.get(this[TYPE]) || this[TYPE] } - block.write(numStr, off, writeLen, "utf8") - block[end - 1] = 0 -} -function write256 (block, off, end, num) { - var buf = block.slice(off, end) - var positive = num >= 0 - buf[0] = positive ? 0x80 : 0xFF - - // get the number as a base-256 tuple - if (!positive) num *= -1 - var tuple = [] - do { - var n = num % 256 - tuple.push(n) - num = (num - n) / 256 - } while (num) - - var bytes = tuple.length - - var fill = buf.length - bytes - for (var i = 1; i < fill; i ++) { - buf[i] = positive ? 0 : 0xFF + get typeKey () { + return this[TYPE] } - // tuple is a base256 number, with [0] as the *least* significant byte - // if it's negative, then we need to flip all the bits once we hit the - // first non-zero bit. The 2's-complement is (0x100 - n), and the 1's- - // complement is (0xFF - n). - var zero = true - for (i = bytes; i > 0; i --) { - var byte = tuple[bytes - i] - if (positive) buf[fill + i] = byte - else if (zero && byte === 0) buf[fill + i] = 0 - else if (zero) { - zero = false - buf[fill + i] = 0x100 - byte - } else buf[fill + i] = 0xFF - byte + set type (type) { + if (types.code.has(type)) + this[TYPE] = types.code.get(type) + else + this[TYPE] = type } } -function writeText (block, off, end, str) { - // strings are written as utf8, then padded with \0 - var strLen = Buffer.byteLength(str) - , writeLen = Math.min(strLen, end - off) - // non-ascii fields need extended headers - // long fields get truncated - , needExtended = strLen !== str.length || strLen > writeLen - - // write the string, and null-pad - if (writeLen > 0) block.write(str, off, writeLen, "utf8") - for (var i = off + writeLen; i < end; i ++) block[i] = 0 - - return needExtended -} - -function calcSum (block) { - block = block || this.block - assert(Buffer.isBuffer(block) && block.length === 512) - - if (!block) throw new Error("Need block to checksum") - - // now figure out what it would be if the cksum was " " - var sum = 0 - , start = fieldOffs[fields.cksum] - , end = fieldEnds[fields.cksum] - - for (var i = 0; i < fieldOffs[fields.cksum]; i ++) { - sum += block[i] +const splitPrefix = (p, prefixSize) => { + const pathSize = 100 + let pp = p + let prefix = '' + let ret + const root = pathModule.parse(p).root || '.' + + if (Buffer.byteLength(pp) < pathSize) + ret = [pp, prefix, false] + else { + // first set prefix to the dir, and path to the base + prefix = pathModule.dirname(pp) + pp = pathModule.basename(pp) + + do { + // both fit! + if (Buffer.byteLength(pp) <= pathSize && + Buffer.byteLength(prefix) <= prefixSize) + ret = [pp, prefix, false] + + // prefix fits in prefix, but path doesn't fit in path + else if (Buffer.byteLength(pp) > pathSize && + Buffer.byteLength(prefix) <= prefixSize) + ret = [pp.substr(0, pathSize - 1), prefix, true] + + else { + // make path take a bit from prefix + pp = pathModule.join(pathModule.basename(prefix), pp) + prefix = pathModule.dirname(prefix) + } + } while (prefix !== root && !ret) + + // at this point, found no resolution, just truncate + if (!ret) + ret = [p.substr(0, pathSize - 1), '', true] } - - for (var i = start; i < end; i ++) { - sum += space - } - - for (var i = end; i < 512; i ++) { - sum += block[i] - } - - return sum + return ret } +const decString = (buf, off, size) => + buf.slice(off, off + size).toString('utf8').replace(/\0.*/, '') -function checkSum (block) { - var sum = calcSum.call(this, block) - block = block || this.block +const decDate = (buf, off, size) => + numToDate(decNumber(buf, off, size)) - var cksum = block.slice(fieldOffs[fields.cksum], fieldEnds[fields.cksum]) - cksum = parseNumeric(cksum) +const numToDate = num => num === null ? null : new Date(num * 1000) - return cksum === sum -} +const decNumber = (buf, off, size) => + buf[off] & 0x80 ? large.parse(buf.slice(off, off + size)) + : decSmallNumber(buf, off, size) -function decode (block) { - block = block || this.block - assert(Buffer.isBuffer(block) && block.length === 512) - - this.block = block - this.cksumValid = this.checkSum() - - var prefix = null - - // slice off each field. - for (var f = 0; fields[f] !== null; f ++) { - var field = fields[f] - , val = block.slice(fieldOffs[f], fieldEnds[f]) - - switch (field) { - case "ustar": - // if not ustar, then everything after that is just padding. - if (val.toString() !== "ustar\0") { - this.ustar = false - return - } else { - // console.error("ustar:", val, val.toString()) - this.ustar = val.toString() - } - break - - // prefix is special, since it might signal the xstar header - case "prefix": - var atime = parseNumeric(val.slice(131, 131 + 12)) - , ctime = parseNumeric(val.slice(131 + 12, 131 + 12 + 12)) - if ((val[130] === 0 || val[130] === space) && - typeof atime === "number" && - typeof ctime === "number" && - val[131 + 12] === space && - val[131 + 12 + 12] === space) { - this.atime = atime - this.ctime = ctime - val = val.slice(0, 130) - } - prefix = val.toString("utf8").replace(/\0+$/, "") - // console.error("%% header reading prefix", prefix) - break - - // all other fields are null-padding text - // or a number. - default: - if (numeric[field]) { - this[field] = parseNumeric(val) - } else { - this[field] = val.toString("utf8").replace(/\0+$/, "") - } - break - } - } +const nanNull = value => isNaN(value) ? null : value - // if we got a prefix, then prepend it to the path. - if (prefix) { - this.path = prefix + "/" + this.path - // console.error("%% header got a prefix", this.path) - } +const decSmallNumber = (buf, off, size) => + nanNull(parseInt( + buf.slice(off, off + size) + .toString('utf8').replace(/\0.*$/, '').trim(), 8)) + +// the maximum encodable as a null-terminated octal, by field size +const MAXNUM = { + 12: 0o77777777777, + 8 : 0o7777777 } -function parse256 (buf) { - // first byte MUST be either 80 or FF - // 80 for positive, FF for 2's comp - var positive - if (buf[0] === 0x80) positive = true - else if (buf[0] === 0xFF) positive = false - else return null - - // build up a base-256 tuple from the least sig to the highest - var zero = false - , tuple = [] - for (var i = buf.length - 1; i > 0; i --) { - var byte = buf[i] - if (positive) tuple.push(byte) - else if (zero && byte === 0) tuple.push(0) - else if (zero) { - zero = false - tuple.push(0x100 - byte) - } else tuple.push(0xFF - byte) - } +const encNumber = (buf, off, size, number) => + number === null ? false : + number > MAXNUM[size] || number < 0 + ? (large.encode(number, buf.slice(off, off + size)), true) + : (encSmallNumber(buf, off, size, number), false) - for (var sum = 0, i = 0, l = tuple.length; i < l; i ++) { - sum += tuple[i] * Math.pow(256, i) - } +const encSmallNumber = (buf, off, size, number) => + buf.write(octalString(number, size), off, size, 'ascii') - return positive ? sum : -1 * sum -} +const octalString = (number, size) => + padOctal(Math.floor(number).toString(8), size) -function parseNumeric (f) { - if (f[0] & 0x80) return parse256(f) +const padOctal = (string, size) => + (string.length === size - 1 ? string + : new Array(size - string.length - 1).join('0') + string + ' ') + '\0' - var str = f.toString("utf8").split("\0")[0].trim() - , res = parseInt(str, 8) +const encDate = (buf, off, size, date) => + date === null ? false : + encNumber(buf, off, size, date.getTime() / 1000) - return isNaN(res) ? null : res -} +// enough to fill the longest string we've got +const NULLS = new Array(156).join('\0') +// pad with nulls, return true if it's longer or non-ascii +const encString = (buf, off, size, string) => + string === null ? false : + (buf.write(string + NULLS, off, size, 'utf8'), + string.length !== Buffer.byteLength(string) || string.length > size) +module.exports = Header diff --git a/node_modules/pacote/node_modules/tar/lib/high-level-opt.js b/node_modules/tar/lib/high-level-opt.js similarity index 100% rename from node_modules/pacote/node_modules/tar/lib/high-level-opt.js rename to node_modules/tar/lib/high-level-opt.js diff --git a/node_modules/pacote/node_modules/tar/lib/large-numbers.js b/node_modules/tar/lib/large-numbers.js similarity index 100% rename from node_modules/pacote/node_modules/tar/lib/large-numbers.js rename to node_modules/tar/lib/large-numbers.js diff --git a/node_modules/pacote/node_modules/tar/lib/list.js b/node_modules/tar/lib/list.js similarity index 100% rename from node_modules/pacote/node_modules/tar/lib/list.js rename to node_modules/tar/lib/list.js diff --git a/node_modules/pacote/node_modules/tar/lib/mkdir.js b/node_modules/tar/lib/mkdir.js similarity index 100% rename from node_modules/pacote/node_modules/tar/lib/mkdir.js rename to node_modules/tar/lib/mkdir.js diff --git a/node_modules/tar/lib/pack.js b/node_modules/tar/lib/pack.js index 5a3bb95a121..09b6ac590b7 100644 --- a/node_modules/tar/lib/pack.js +++ b/node_modules/tar/lib/pack.js @@ -1,236 +1,399 @@ -// pipe in an fstream, and it'll make a tarball. -// key-value pair argument is global extended header props. - -module.exports = Pack - -var EntryWriter = require("./entry-writer.js") - , Stream = require("stream").Stream - , path = require("path") - , inherits = require("inherits") - , GlobalHeaderWriter = require("./global-header-writer.js") - , collect = require("fstream").collect - , eof = new Buffer(512) - -for (var i = 0; i < 512; i ++) eof[i] = 0 - -inherits(Pack, Stream) - -function Pack (props) { - // console.error("-- p ctor") - var me = this - if (!(me instanceof Pack)) return new Pack(props) - - if (props) me._noProprietary = props.noProprietary - else me._noProprietary = false - - me._global = props - - me.readable = true - me.writable = true - me._buffer = [] - // console.error("-- -- set current to null in ctor") - me._currentEntry = null - me._processing = false - - me._pipeRoot = null - me.on("pipe", function (src) { - if (src.root === me._pipeRoot) return - me._pipeRoot = src - src.on("end", function () { - me._pipeRoot = null - }) - me.add(src) - }) +'use strict' + +// A readable tar stream creator +// Technically, this is a transform stream that you write paths into, +// and tar format comes out of. +// The `add()` method is like `write()` but returns this, +// and end() return `this` as well, so you can +// do `new Pack(opt).add('files').add('dir').end().pipe(output) +// You could also do something like: +// streamOfPaths().pipe(new Pack()).pipe(new fs.WriteStream('out.tar')) + +class PackJob { + constructor (path, absolute) { + this.path = path || './' + this.absolute = absolute + this.entry = null + this.stat = null + this.readdir = null + this.pending = false + this.ignore = false + this.piped = false + } } -Pack.prototype.addGlobal = function (props) { - // console.error("-- p addGlobal") - if (this._didGlobal) return - this._didGlobal = true - - var me = this - GlobalHeaderWriter(props) - .on("data", function (c) { - me.emit("data", c) - }) - .end() -} +const MiniPass = require('minipass') +const zlib = require('minizlib') +const ReadEntry = require('./read-entry.js') +const WriteEntry = require('./write-entry.js') +const WriteEntrySync = WriteEntry.Sync +const WriteEntryTar = WriteEntry.Tar +const Yallist = require('yallist') +const EOF = Buffer.alloc(1024) +const ONSTAT = Symbol('onStat') +const ENDED = Symbol('ended') +const QUEUE = Symbol('queue') +const CURRENT = Symbol('current') +const PROCESS = Symbol('process') +const PROCESSING = Symbol('processing') +const PROCESSJOB = Symbol('processJob') +const JOBS = Symbol('jobs') +const JOBDONE = Symbol('jobDone') +const ADDFSENTRY = Symbol('addFSEntry') +const ADDTARENTRY = Symbol('addTarEntry') +const STAT = Symbol('stat') +const READDIR = Symbol('readdir') +const ONREADDIR = Symbol('onreaddir') +const PIPE = Symbol('pipe') +const ENTRY = Symbol('entry') +const ENTRYOPT = Symbol('entryOpt') +const WRITEENTRYCLASS = Symbol('writeEntryClass') +const WRITE = Symbol('write') +const ONDRAIN = Symbol('ondrain') + +const fs = require('fs') +const path = require('path') +const warner = require('./warn-mixin.js') + +const Pack = warner(class Pack extends MiniPass { + constructor (opt) { + super(opt) + opt = opt || Object.create(null) + this.opt = opt + this.cwd = opt.cwd || process.cwd() + this.maxReadSize = opt.maxReadSize + this.preservePaths = !!opt.preservePaths + this.strict = !!opt.strict + this.noPax = !!opt.noPax + this.prefix = (opt.prefix || '').replace(/(\\|\/)+$/, '') + this.linkCache = opt.linkCache || new Map() + this.statCache = opt.statCache || new Map() + this.readdirCache = opt.readdirCache || new Map() + this[WRITEENTRYCLASS] = WriteEntry + if (typeof opt.onwarn === 'function') + this.on('warn', opt.onwarn) + + this.zip = null + if (opt.gzip) { + if (typeof opt.gzip !== 'object') + opt.gzip = {} + this.zip = new zlib.Gzip(opt.gzip) + this.zip.on('data', chunk => super.write(chunk)) + this.zip.on('end', _ => super.end()) + this.zip.on('drain', _ => this[ONDRAIN]()) + this.on('resume', _ => this.zip.resume()) + } else + this.on('drain', this[ONDRAIN]) + + this.portable = !!opt.portable + this.noDirRecurse = !!opt.noDirRecurse + this.follow = !!opt.follow + + this.filter = typeof opt.filter === 'function' ? opt.filter : _ => true + + this[QUEUE] = new Yallist + this[JOBS] = 0 + this.jobs = +opt.jobs || 4 + this[PROCESSING] = false + this[ENDED] = false + } -Pack.prototype.add = function (stream) { - if (this._global && !this._didGlobal) this.addGlobal(this._global) + [WRITE] (chunk) { + return super.write(chunk) + } - if (this._ended) return this.emit("error", new Error("add after end")) + add (path) { + this.write(path) + return this + } - collect(stream) - this._buffer.push(stream) - this._process() - this._needDrain = this._buffer.length > 0 - return !this._needDrain -} + end (path) { + if (path) + this.write(path) + this[ENDED] = true + this[PROCESS]() + return this + } -Pack.prototype.pause = function () { - this._paused = true - if (this._currentEntry) this._currentEntry.pause() - this.emit("pause") -} + write (path) { + if (this[ENDED]) + throw new Error('write after end') -Pack.prototype.resume = function () { - this._paused = false - if (this._currentEntry) this._currentEntry.resume() - this.emit("resume") - this._process() -} + if (path instanceof ReadEntry) + this[ADDTARENTRY](path) + else + this[ADDFSENTRY](path) + return this.flowing + } -Pack.prototype.end = function () { - this._ended = true - this._buffer.push(eof) - this._process() -} + [ADDTARENTRY] (p) { + const absolute = path.resolve(this.cwd, p.path) + if (this.prefix) + p.path = this.prefix + '/' + p.path.replace(/^\.(\/+|$)/, '') + + // in this case, we don't have to wait for the stat + if (!this.filter(p.path, p)) + p.resume() + else { + const job = new PackJob(p.path, absolute, false) + job.entry = new WriteEntryTar(p, this[ENTRYOPT](job)) + job.entry.on('end', _ => this[JOBDONE](job)) + this[JOBS] += 1 + this[QUEUE].push(job) + } -Pack.prototype._process = function () { - var me = this - if (me._paused || me._processing) { - return + this[PROCESS]() } - var entry = me._buffer.shift() + [ADDFSENTRY] (p) { + const absolute = path.resolve(this.cwd, p) + if (this.prefix) + p = this.prefix + '/' + p.replace(/^\.(\/+|$)/, '') - if (!entry) { - if (me._needDrain) { - me.emit("drain") - } - return + this[QUEUE].push(new PackJob(p, absolute)) + this[PROCESS]() } - if (entry.ready === false) { - // console.error("-- entry is not ready", entry) - me._buffer.unshift(entry) - entry.on("ready", function () { - // console.error("-- -- ready!", entry) - me._process() + [STAT] (job) { + job.pending = true + this[JOBS] += 1 + const stat = this.follow ? 'stat' : 'lstat' + fs[stat](job.absolute, (er, stat) => { + job.pending = false + this[JOBS] -= 1 + if (er) + this.emit('error', er) + else + this[ONSTAT](job, stat) }) - return } - me._processing = true + [ONSTAT] (job, stat) { + this.statCache.set(job.absolute, stat) + job.stat = stat + + // now we have the stat, we can filter it. + if (!this.filter(job.path, stat)) + job.ignore = true - if (entry === eof) { - // need 2 ending null blocks. - me.emit("data", eof) - me.emit("data", eof) - me.emit("end") - me.emit("close") - return + this[PROCESS]() } - // Change the path to be relative to the root dir that was - // added to the tarball. - // - // XXX This should be more like how -C works, so you can - // explicitly set a root dir, and also explicitly set a pathname - // in the tarball to use. That way we can skip a lot of extra - // work when resolving symlinks for bundled dependencies in npm. + [READDIR] (job) { + job.pending = true + this[JOBS] += 1 + fs.readdir(job.absolute, (er, entries) => { + job.pending = false + this[JOBS] -= 1 + if (er) + return this.emit('error', er) + this[ONREADDIR](job, entries) + }) + } - var root = path.dirname((entry.root || entry).path); - if (me._global && me._global.fromBase && entry.root && entry.root.path) { - // user set 'fromBase: true' indicating tar root should be directory itself - root = entry.root.path; + [ONREADDIR] (job, entries) { + this.readdirCache.set(job.absolute, entries) + job.readdir = entries + this[PROCESS]() } - var wprops = {} + [PROCESS] () { + if (this[PROCESSING]) + return - Object.keys(entry.props || {}).forEach(function (k) { - wprops[k] = entry.props[k] - }) + this[PROCESSING] = true + for (let w = this[QUEUE].head; + w !== null && this[JOBS] < this.jobs; + w = w.next) { + this[PROCESSJOB](w.value) + if (w.value.ignore) { + const p = w.next + this[QUEUE].removeNode(w) + w.next = p + } + } - if (me._noProprietary) wprops.noProprietary = true + this[PROCESSING] = false - wprops.path = path.relative(root, entry.path || '') + if (this[ENDED] && !this[QUEUE].length && this[JOBS] === 0) { + if (this.zip) + this.zip.end(EOF) + else { + super.write(EOF) + super.end() + } + } + } - // actually not a matter of opinion or taste. - if (process.platform === "win32") { - wprops.path = wprops.path.replace(/\\/g, "/") + get [CURRENT] () { + return this[QUEUE] && this[QUEUE].head && this[QUEUE].head.value } - if (!wprops.type) - wprops.type = 'Directory' + [JOBDONE] (job) { + this[QUEUE].shift() + this[JOBS] -= 1 + this[PROCESS]() + } - switch (wprops.type) { - // sockets not supported - case "Socket": + [PROCESSJOB] (job) { + if (job.pending) return - case "Directory": - wprops.path += "/" - wprops.size = 0 - break - - case "Link": - var lp = path.resolve(path.dirname(entry.path), entry.linkpath) - wprops.linkpath = path.relative(root, lp) || "." - wprops.size = 0 - break + if (job.entry) { + if (job === this[CURRENT] && !job.piped) + this[PIPE](job) + return + } - case "SymbolicLink": - var lp = path.resolve(path.dirname(entry.path), entry.linkpath) - wprops.linkpath = path.relative(path.dirname(entry.path), lp) || "." - wprops.size = 0 - break - } + if (!job.stat) { + if (this.statCache.has(job.absolute)) + this[ONSTAT](job, this.statCache.get(job.absolute)) + else + this[STAT](job) + } + if (!job.stat) + return - // console.error("-- new writer", wprops) - // if (!wprops.type) { - // // console.error("-- no type?", entry.constructor.name, entry) - // } + // filtered out! + if (job.ignore) + return - // console.error("-- -- set current to new writer", wprops.path) - var writer = me._currentEntry = EntryWriter(wprops) + if (!this.noDirRecurse && job.stat.isDirectory() && !job.readdir) { + if (this.readdirCache.has(job.absolute)) + this[ONREADDIR](job, this.readdirCache.get(job.absolute)) + else + this[READDIR](job) + if (!job.readdir) + return + } - writer.parent = me + // we know it doesn't have an entry, because that got checked above + job.entry = this[ENTRY](job) + if (!job.entry) { + job.ignore = true + return + } - // writer.on("end", function () { - // // console.error("-- -- writer end", writer.path) - // }) + if (job === this[CURRENT] && !job.piped) + this[PIPE](job) + } - writer.on("data", function (c) { - me.emit("data", c) - }) + [ENTRYOPT] (job) { + return { + onwarn: (msg, data) => { + this.warn(msg, data) + }, + noPax: this.noPax, + cwd: this.cwd, + absolute: job.absolute, + preservePaths: this.preservePaths, + maxReadSize: this.maxReadSize, + strict: this.strict, + portable: this.portable, + linkCache: this.linkCache, + statCache: this.statCache + } + } - writer.on("header", function () { - Buffer.prototype.toJSON = function () { - return this.toString().split(/\0/).join(".") + [ENTRY] (job) { + this[JOBS] += 1 + try { + return new this[WRITEENTRYCLASS]( + job.path, this[ENTRYOPT](job)).on('end', _ => { + this[JOBDONE](job) + }).on('error', er => this.emit('error', er)) + } catch (er) { + this.emit('error', er) } - // console.error("-- -- writer header %j", writer.props) - if (writer.props.size === 0) nextEntry() - }) - writer.on("close", nextEntry) + } + + [ONDRAIN] () { + if (this[CURRENT] && this[CURRENT].entry) + this[CURRENT].entry.resume() + } + + // like .pipe() but using super, because our write() is special + [PIPE] (job) { + job.piped = true + + if (job.readdir) + job.readdir.forEach(entry => { + const p = this.prefix ? + job.path.slice(this.prefix.length + 1) || './' + : job.path + + const base = p === './' ? '' : p.replace(/\/*$/, '/') + this[ADDFSENTRY](base + entry) + }) + + const source = job.entry + const zip = this.zip + + if (zip) + source.on('data', chunk => { + if (!zip.write(chunk)) + source.pause() + }) + else + source.on('data', chunk => { + if (!super.write(chunk)) + source.pause() + }) + } - var ended = false - function nextEntry () { - if (ended) return - ended = true + pause () { + if (this.zip) + this.zip.pause() + return super.pause() + } +}) - // console.error("-- -- writer close", writer.path) - // console.error("-- -- set current to null", wprops.path) - me._currentEntry = null - me._processing = false - me._process() +class PackSync extends Pack { + constructor (opt) { + super(opt) + this[WRITEENTRYCLASS] = WriteEntrySync } - writer.on("error", function (er) { - // console.error("-- -- writer error", writer.path) - me.emit("error", er) - }) + // pause/resume are no-ops in sync streams. + pause () {} + resume () {} + + [STAT] (job) { + const stat = this.follow ? 'statSync' : 'lstatSync' + this[ONSTAT](job, fs[stat](job.absolute)) + } - // if it's the root, then there's no need to add its entries, - // or data, since they'll be added directly. - if (entry === me._pipeRoot) { - // console.error("-- is the root, don't auto-add") - writer.add = null + [READDIR] (job, stat) { + this[ONREADDIR](job, fs.readdirSync(job.absolute)) } - entry.pipe(writer) + // gotta get it all in this tick + [PIPE] (job) { + const source = job.entry + const zip = this.zip + + if (job.readdir) + job.readdir.forEach(entry => { + const p = this.prefix ? + job.path.slice(this.prefix.length + 1) || './' + : job.path + + + const base = p === './' ? '' : p.replace(/\/*$/, '/') + this[ADDFSENTRY](base + entry) + }) + + if (zip) + source.on('data', chunk => { + zip.write(chunk) + }) + else + source.on('data', chunk => { + super[WRITE](chunk) + }) + } } -Pack.prototype.destroy = function () {} -Pack.prototype.write = function () {} +Pack.Sync = PackSync + +module.exports = Pack diff --git a/node_modules/tar/lib/parse.js b/node_modules/tar/lib/parse.js index 600ad782f0f..63c7ee9cefd 100644 --- a/node_modules/tar/lib/parse.js +++ b/node_modules/tar/lib/parse.js @@ -1,275 +1,415 @@ +'use strict' + +// this[BUFFER] is the remainder of a chunk if we're waiting for +// the full 512 bytes of a header to come in. We will Buffer.concat() +// it to the next write(), which is a mem copy, but a small one. +// +// this[QUEUE] is a Yallist of entries that haven't been emitted +// yet this can only get filled up if the user keeps write()ing after +// a write() returns false, or does a write() with more than one entry +// +// We don't buffer chunks, we always parse them and either create an +// entry, or push it into the active entry. The ReadEntry class knows +// to throw data away if .ignore=true +// +// Shift entry off the buffer when it emits 'end', and emit 'entry' for +// the next one in the list. +// +// At any time, we're pushing body chunks into the entry at WRITEENTRY, +// and waiting for 'end' on the entry at READENTRY +// +// ignored entries get .resume() called on them straight away + +const warner = require('./warn-mixin.js') +const path = require('path') +const Header = require('./header.js') +const EE = require('events') +const Yallist = require('yallist') +const maxMetaEntrySize = 1024 * 1024 +const Entry = require('./read-entry.js') +const Pax = require('./pax.js') +const zlib = require('minizlib') + +const gzipHeader = new Buffer([0x1f, 0x8b]) +const STATE = Symbol('state') +const WRITEENTRY = Symbol('writeEntry') +const READENTRY = Symbol('readEntry') +const NEXTENTRY = Symbol('nextEntry') +const PROCESSENTRY = Symbol('processEntry') +const EX = Symbol('extendedHeader') +const GEX = Symbol('globalExtendedHeader') +const META = Symbol('meta') +const EMITMETA = Symbol('emitMeta') +const BUFFER = Symbol('buffer') +const QUEUE = Symbol('queue') +const ENDED = Symbol('ended') +const EMITTEDEND = Symbol('emittedEnd') +const EMIT = Symbol('emit') +const UNZIP = Symbol('unzip') +const CONSUMECHUNK = Symbol('consumeChunk') +const CONSUMECHUNKSUB = Symbol('consumeChunkSub') +const CONSUMEBODY = Symbol('consumeBody') +const CONSUMEMETA = Symbol('consumeMeta') +const CONSUMEHEADER = Symbol('consumeHeader') +const CONSUMING = Symbol('consuming') +const BUFFERCONCAT = Symbol('bufferConcat') +const MAYBEEND = Symbol('maybeEnd') +const WRITING = Symbol('writing') +const ABORTED = Symbol('aborted') +const DONE = Symbol('onDone') + +const noop = _ => true + +module.exports = warner(class Parser extends EE { + constructor (opt) { + opt = opt || {} + super(opt) + + if (opt.ondone) + this.on(DONE, opt.ondone) + else + this.on(DONE, _ => { + this.emit('prefinish') + this.emit('finish') + this.emit('end') + this.emit('close') + }) + + this.strict = !!opt.strict + this.maxMetaEntrySize = opt.maxMetaEntrySize || maxMetaEntrySize + this.filter = typeof opt.filter === 'function' ? opt.filter : noop + + // have to set this so that streams are ok piping into it + this.writable = true + this.readable = false + + this[QUEUE] = new Yallist() + this[BUFFER] = null + this[READENTRY] = null + this[WRITEENTRY] = null + this[STATE] = 'begin' + this[META] = '' + this[EX] = null + this[GEX] = null + this[ENDED] = false + this[UNZIP] = null + this[ABORTED] = false + if (typeof opt.onwarn === 'function') + this.on('warn', opt.onwarn) + if (typeof opt.onentry === 'function') + this.on('entry', opt.onentry) + } + + [CONSUMEHEADER] (chunk, position) { + const header = new Header(chunk, position) -// A writable stream. -// It emits "entry" events, which provide a readable stream that has -// header info attached. - -module.exports = Parse.create = Parse - -var stream = require("stream") - , Stream = stream.Stream - , BlockStream = require("block-stream") - , tar = require("../tar.js") - , TarHeader = require("./header.js") - , Entry = require("./entry.js") - , BufferEntry = require("./buffer-entry.js") - , ExtendedHeader = require("./extended-header.js") - , assert = require("assert").ok - , inherits = require("inherits") - , fstream = require("fstream") - -// reading a tar is a lot like reading a directory -// However, we're actually not going to run the ctor, -// since it does a stat and various other stuff. -// This inheritance gives us the pause/resume/pipe -// behavior that is desired. -inherits(Parse, fstream.Reader) - -function Parse () { - var me = this - if (!(me instanceof Parse)) return new Parse() - - // doesn't apply fstream.Reader ctor? - // no, becasue we don't want to stat/etc, we just - // want to get the entry/add logic from .pipe() - Stream.apply(me) - - me.writable = true - me.readable = true - me._stream = new BlockStream(512) - me.position = 0 - me._ended = false - - me._stream.on("error", function (e) { - me.emit("error", e) - }) - - me._stream.on("data", function (c) { - me._process(c) - }) - - me._stream.on("end", function () { - me._streamEnd() - }) - - me._stream.on("drain", function () { - me.emit("drain") - }) -} - -// overridden in Extract class, since it needs to -// wait for its DirWriter part to finish before -// emitting "end" -Parse.prototype._streamEnd = function () { - var me = this - if (!me._ended || me._entry) me.error("unexpected eof") - me.emit("end") -} - -// a tar reader is actually a filter, not just a readable stream. -// So, you should pipe a tarball stream into it, and it needs these -// write/end methods to do that. -Parse.prototype.write = function (c) { - if (this._ended) { - // gnutar puts a LOT of nulls at the end. - // you can keep writing these things forever. - // Just ignore them. - for (var i = 0, l = c.length; i > l; i ++) { - if (c[i] !== 0) return this.error("write() after end()") + if (header.nullBlock) + this[EMIT]('nullBlock') + else if (!header.cksumValid) + this.warn('invalid entry', header) + else if (!header.path) + this.warn('invalid: path is required', header) + else { + const type = header.type + if (/^(Symbolic)?Link$/.test(type) && !header.linkpath) + this.warn('invalid: linkpath required', header) + else if (!/^(Symbolic)?Link$/.test(type) && header.linkpath) + this.warn('invalid: linkpath forbidden', header) + else { + const entry = this[WRITEENTRY] = new Entry(header, this[EX], this[GEX]) + + if (entry.meta) { + if (entry.size > this.maxMetaEntrySize) { + entry.ignore = true + this[EMIT]('ignoredEntry', entry) + this[STATE] = 'ignore' + } else if (entry.size > 0) { + this[META] = '' + entry.on('data', c => this[META] += c) + this[STATE] = 'meta' + } + } else { + + this[EX] = null + entry.ignore = entry.ignore || !this.filter(entry.path, entry) + if (entry.ignore) { + this[EMIT]('ignoredEntry', entry) + this[STATE] = entry.remain ? 'ignore' : 'begin' + } else { + if (entry.remain) + this[STATE] = 'body' + else { + this[STATE] = 'begin' + entry.end() + } + + if (!this[READENTRY]) { + this[QUEUE].push(entry) + this[NEXTENTRY]() + } else + this[QUEUE].push(entry) + } + } + } } - return } - return this._stream.write(c) -} - -Parse.prototype.end = function (c) { - this._ended = true - return this._stream.end(c) -} - -// don't need to do anything, since we're just -// proxying the data up from the _stream. -// Just need to override the parent's "Not Implemented" -// error-thrower. -Parse.prototype._read = function () {} - -Parse.prototype._process = function (c) { - assert(c && c.length === 512, "block size should be 512") - - // one of three cases. - // 1. A new header - // 2. A part of a file/extended header - // 3. One of two or more EOF null blocks - - if (this._entry) { - var entry = this._entry - if(!entry._abort) entry.write(c) + + [PROCESSENTRY] (entry) { + let go = true + + if (!entry) { + this[READENTRY] = null + go = false + } else if (Array.isArray(entry)) + this.emit.apply(this, entry) else { - entry._remaining -= c.length - if(entry._remaining < 0) entry._remaining = 0 + this[READENTRY] = entry + this.emit('entry', entry) + if (!entry.emittedEnd) { + entry.on('end', _ => this[NEXTENTRY]()) + go = false + } } - if (entry._remaining === 0) { + + return go + } + + [NEXTENTRY] () { + do {} while (this[PROCESSENTRY](this[QUEUE].shift())) + + if (!this[QUEUE].length) { + // At this point, there's nothing in the queue, but we may have an + // entry which is being consumed (readEntry). + // If we don't, then we definitely can handle more data. + // If we do, and either it's flowing, or it has never had any data + // written to it, then it needs more. + // The only other possibility is that it has returned false from a + // write() call, so we wait for the next drain to continue. + const re = this[READENTRY] + const drainNow = !re || re.flowing || re.size === re.remain + if (drainNow) { + if (!this[WRITING]) + this.emit('drain') + } else + re.once('drain', _ => this.emit('drain')) + } + } + + [CONSUMEBODY] (chunk, position) { + // write up to but no more than writeEntry.blockRemain + const entry = this[WRITEENTRY] + const br = entry.blockRemain + const c = (br >= chunk.length && position === 0) ? chunk + : chunk.slice(position, position + br) + + entry.write(c) + + if (!entry.blockRemain) { + this[STATE] = 'begin' + this[WRITEENTRY] = null entry.end() - this._entry = null - } - } else { - // either zeroes or a header - var zero = true - for (var i = 0; i < 512 && zero; i ++) { - zero = c[i] === 0 } - // eof is *at least* 2 blocks of nulls, and then the end of the - // file. you can put blocks of nulls between entries anywhere, - // so appending one tarball to another is technically valid. - // ending without the eof null blocks is not allowed, however. - if (zero) { - if (this._eofStarted) - this._ended = true - this._eofStarted = true - } else { - this._eofStarted = false - this._startEntry(c) + return c.length + } + + [CONSUMEMETA] (chunk, position) { + const entry = this[WRITEENTRY] + const ret = this[CONSUMEBODY](chunk, position) + + // if we finished, then the entry is reset + if (!this[WRITEENTRY]) + this[EMITMETA](entry) + + return ret + } + + [EMIT] (ev, data, extra) { + if (!this[QUEUE].length && !this[READENTRY]) + this.emit(ev, data, extra) + else + this[QUEUE].push([ev, data, extra]) + } + + [EMITMETA] (entry) { + this[EMIT]('meta', this[META]) + switch (entry.type) { + case 'ExtendedHeader': + case 'OldExtendedHeader': + this[EX] = Pax.parse(this[META], this[EX], false) + break + + case 'GlobalExtendedHeader': + this[GEX] = Pax.parse(this[META], this[GEX], true) + break + + case 'NextFileHasLongPath': + case 'OldGnuLongPath': + this[EX] = this[EX] || Object.create(null) + this[EX].path = this[META].replace(/\0.*/, '') + break + + case 'NextFileHasLongLinkpath': + this[EX] = this[EX] || Object.create(null) + this[EX].linkpath = this[META].replace(/\0.*/, '') + break + + /* istanbul ignore next */ + default: throw new Error('unknown meta: ' + entry.type) } } - this.position += 512 -} - -// take a header chunk, start the right kind of entry. -Parse.prototype._startEntry = function (c) { - var header = new TarHeader(c) - , self = this - , entry - , ev - , EntryType - , onend - , meta = false - - if (null === header.size || !header.cksumValid) { - var e = new Error("invalid tar file") - e.header = header - e.tar_file_offset = this.position - e.tar_block = this.position / 512 - return this.emit("error", e) + abort (msg, error) { + this[ABORTED] = true + this.warn(msg, error) + this.emit('abort') } - switch (tar.types[header.type]) { - case "File": - case "OldFile": - case "Link": - case "SymbolicLink": - case "CharacterDevice": - case "BlockDevice": - case "Directory": - case "FIFO": - case "ContiguousFile": - case "GNUDumpDir": - // start a file. - // pass in any extended headers - // These ones consumers are typically most interested in. - EntryType = Entry - ev = "entry" - break - - case "GlobalExtendedHeader": - // extended headers that apply to the rest of the tarball - EntryType = ExtendedHeader - onend = function () { - self._global = self._global || {} - Object.keys(entry.fields).forEach(function (k) { - self._global[k] = entry.fields[k] - }) + write (chunk) { + if (this[ABORTED]) + return + + // first write, might be gzipped + if (this[UNZIP] === null && chunk) { + if (this[BUFFER]) { + chunk = Buffer.concat([this[BUFFER], chunk]) + this[BUFFER] = null } - ev = "globalExtendedHeader" - meta = true - break - - case "ExtendedHeader": - case "OldExtendedHeader": - // extended headers that apply to the next entry - EntryType = ExtendedHeader - onend = function () { - self._extended = entry.fields + if (chunk.length < gzipHeader.length) { + this[BUFFER] = chunk + return true } - ev = "extendedHeader" - meta = true - break - - case "NextFileHasLongLinkpath": - // set linkpath= in extended header - EntryType = BufferEntry - onend = function () { - self._extended = self._extended || {} - self._extended.linkpath = entry.body + for (let i = 0; this[UNZIP] === null && i < gzipHeader.length; i++) { + if (chunk[i] !== gzipHeader[i]) + this[UNZIP] = false } - ev = "longLinkpath" - meta = true - break - - case "NextFileHasLongPath": - case "OldGnuLongPath": - // set path= in file-extended header - EntryType = BufferEntry - onend = function () { - self._extended = self._extended || {} - self._extended.path = entry.body + if (this[UNZIP] === null) { + const ended = this[ENDED] + this[ENDED] = false + this[UNZIP] = new zlib.Unzip() + this[UNZIP].on('data', chunk => this[CONSUMECHUNK](chunk)) + this[UNZIP].on('error', er => + this.abort('zlib error: ' + er.message, er)) + this[UNZIP].on('end', _ => { + this[ENDED] = true + this[CONSUMECHUNK]() + }) + return ended ? this[UNZIP].end(chunk) : this[UNZIP].write(chunk) } - ev = "longPath" - meta = true - break - - default: - // all the rest we skip, but still set the _entry - // member, so that we can skip over their data appropriately. - // emit an event to say that this is an ignored entry type? - EntryType = Entry - ev = "ignoredEntry" - break - } + } - var global, extended - if (meta) { - global = extended = null - } else { - var global = this._global - var extended = this._extended + this[WRITING] = true + if (this[UNZIP]) + this[UNZIP].write(chunk) + else + this[CONSUMECHUNK](chunk) + this[WRITING] = false - // extendedHeader only applies to one entry, so once we start - // an entry, it's over. - this._extended = null - } - entry = new EntryType(header, extended, global) - entry.meta = meta - - // only proxy data events of normal files. - if (!meta) { - entry.on("data", function (c) { - me.emit("data", c) - }) + // return false if there's a queue, or if the current entry isn't flowing + const ret = + this[QUEUE].length ? false : + this[READENTRY] ? this[READENTRY].flowing : + true + + // if we have no queue, then that means a clogged READENTRY + if (!ret && !this[QUEUE].length) + this[READENTRY].once('drain', _ => this.emit('drain')) + + return ret } - if (onend) entry.on("end", onend) + [BUFFERCONCAT] (c) { + if (c && !this[ABORTED]) + this[BUFFER] = this[BUFFER] ? Buffer.concat([this[BUFFER], c]) : c + } - this._entry = entry - var me = this + [MAYBEEND] () { + if (this[ENDED] && !this[EMITTEDEND] && !this[ABORTED]) { + this[EMITTEDEND] = true + const entry = this[WRITEENTRY] + if (entry && entry.blockRemain) { + const have = this[BUFFER] ? this[BUFFER].length : 0 + this.warn('Truncated input (needed ' + entry.blockRemain + + ' more bytes, only ' + have + ' available)', entry) + if (this[BUFFER]) + entry.write(this[BUFFER]) + entry.end() + } + this[EMIT](DONE) + } + } - entry.on("pause", function () { - me.pause() - }) + [CONSUMECHUNK] (chunk) { + if (this[CONSUMING]) { + this[BUFFERCONCAT](chunk) + } else if (!chunk && !this[BUFFER]) { + this[MAYBEEND]() + } else { + this[CONSUMING] = true + if (this[BUFFER]) { + this[BUFFERCONCAT](chunk) + const c = this[BUFFER] + this[BUFFER] = null + this[CONSUMECHUNKSUB](c) + } else { + this[CONSUMECHUNKSUB](chunk) + } - entry.on("resume", function () { - me.resume() - }) + while (this[BUFFER] && this[BUFFER].length >= 512 && !this[ABORTED]) { + const c = this[BUFFER] + this[BUFFER] = null + this[CONSUMECHUNKSUB](c) + } + this[CONSUMING] = false + } - if (this.listeners("*").length) { - this.emit("*", ev, entry) + if (!this[BUFFER] || this[ENDED]) + this[MAYBEEND]() } - this.emit(ev, entry) + [CONSUMECHUNKSUB] (chunk) { + // we know that we are in CONSUMING mode, so anything written goes into + // the buffer. Advance the position and put any remainder in the buffer. + let position = 0 + let length = chunk.length + while (position + 512 <= length && !this[ABORTED]) { + switch (this[STATE]) { + case 'begin': + this[CONSUMEHEADER](chunk, position) + position += 512 + break + + case 'ignore': + case 'body': + position += this[CONSUMEBODY](chunk, position) + break + + case 'meta': + position += this[CONSUMEMETA](chunk, position) + break + + /* istanbul ignore next */ + default: + throw new Error('invalid state: ' + this[STATE]) + } + } - // Zero-byte entry. End immediately. - if (entry.props.size === 0) { - entry.end() - this._entry = null + if (position < length) { + if (this[BUFFER]) + this[BUFFER] = Buffer.concat([chunk.slice(position), this[BUFFER]]) + else + this[BUFFER] = chunk.slice(position) + } + } + + end (chunk) { + if (!this[ABORTED]) { + if (this[UNZIP]) + this[UNZIP].end(chunk) + else { + this[ENDED] = true + this.write(chunk) + } + } } -} +}) diff --git a/node_modules/pacote/node_modules/tar/lib/pax.js b/node_modules/tar/lib/pax.js similarity index 100% rename from node_modules/pacote/node_modules/tar/lib/pax.js rename to node_modules/tar/lib/pax.js diff --git a/node_modules/pacote/node_modules/tar/lib/read-entry.js b/node_modules/tar/lib/read-entry.js similarity index 100% rename from node_modules/pacote/node_modules/tar/lib/read-entry.js rename to node_modules/tar/lib/read-entry.js diff --git a/node_modules/pacote/node_modules/tar/lib/replace.js b/node_modules/tar/lib/replace.js similarity index 100% rename from node_modules/pacote/node_modules/tar/lib/replace.js rename to node_modules/tar/lib/replace.js diff --git a/node_modules/pacote/node_modules/tar/lib/types.js b/node_modules/tar/lib/types.js similarity index 100% rename from node_modules/pacote/node_modules/tar/lib/types.js rename to node_modules/tar/lib/types.js diff --git a/node_modules/pacote/node_modules/tar/lib/unpack.js b/node_modules/tar/lib/unpack.js similarity index 100% rename from node_modules/pacote/node_modules/tar/lib/unpack.js rename to node_modules/tar/lib/unpack.js diff --git a/node_modules/pacote/node_modules/tar/lib/update.js b/node_modules/tar/lib/update.js similarity index 100% rename from node_modules/pacote/node_modules/tar/lib/update.js rename to node_modules/tar/lib/update.js diff --git a/node_modules/pacote/node_modules/tar/lib/warn-mixin.js b/node_modules/tar/lib/warn-mixin.js similarity index 100% rename from node_modules/pacote/node_modules/tar/lib/warn-mixin.js rename to node_modules/tar/lib/warn-mixin.js diff --git a/node_modules/pacote/node_modules/tar/lib/winchars.js b/node_modules/tar/lib/winchars.js similarity index 100% rename from node_modules/pacote/node_modules/tar/lib/winchars.js rename to node_modules/tar/lib/winchars.js diff --git a/node_modules/pacote/node_modules/tar/lib/write-entry.js b/node_modules/tar/lib/write-entry.js similarity index 100% rename from node_modules/pacote/node_modules/tar/lib/write-entry.js rename to node_modules/tar/lib/write-entry.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/.npmignore b/node_modules/tar/node_modules/minipass/.npmignore similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/.npmignore rename to node_modules/tar/node_modules/minipass/.npmignore diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/.travis.yml b/node_modules/tar/node_modules/minipass/.travis.yml similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/.travis.yml rename to node_modules/tar/node_modules/minipass/.travis.yml diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/README.md b/node_modules/tar/node_modules/minipass/README.md similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/README.md rename to node_modules/tar/node_modules/minipass/README.md diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/b.js b/node_modules/tar/node_modules/minipass/b.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/b.js rename to node_modules/tar/node_modules/minipass/b.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/bench/lib/extend-minipass.js b/node_modules/tar/node_modules/minipass/bench/lib/extend-minipass.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/bench/lib/extend-minipass.js rename to node_modules/tar/node_modules/minipass/bench/lib/extend-minipass.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/bench/lib/extend-through2.js b/node_modules/tar/node_modules/minipass/bench/lib/extend-through2.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/bench/lib/extend-through2.js rename to node_modules/tar/node_modules/minipass/bench/lib/extend-through2.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/bench/lib/extend-transform.js b/node_modules/tar/node_modules/minipass/bench/lib/extend-transform.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/bench/lib/extend-transform.js rename to node_modules/tar/node_modules/minipass/bench/lib/extend-transform.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/bench/lib/nullsink.js b/node_modules/tar/node_modules/minipass/bench/lib/nullsink.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/bench/lib/nullsink.js rename to node_modules/tar/node_modules/minipass/bench/lib/nullsink.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/bench/lib/numbers.js b/node_modules/tar/node_modules/minipass/bench/lib/numbers.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/bench/lib/numbers.js rename to node_modules/tar/node_modules/minipass/bench/lib/numbers.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/bench/lib/timer.js b/node_modules/tar/node_modules/minipass/bench/lib/timer.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/bench/lib/timer.js rename to node_modules/tar/node_modules/minipass/bench/lib/timer.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/bench/test.js b/node_modules/tar/node_modules/minipass/bench/test.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/bench/test.js rename to node_modules/tar/node_modules/minipass/bench/test.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/d.js b/node_modules/tar/node_modules/minipass/d.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/d.js rename to node_modules/tar/node_modules/minipass/d.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/e.js b/node_modules/tar/node_modules/minipass/e.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/e.js rename to node_modules/tar/node_modules/minipass/e.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/eos.js b/node_modules/tar/node_modules/minipass/eos.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/eos.js rename to node_modules/tar/node_modules/minipass/eos.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/foo b/node_modules/tar/node_modules/minipass/foo similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/foo rename to node_modules/tar/node_modules/minipass/foo diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/index.js b/node_modules/tar/node_modules/minipass/index.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/index.js rename to node_modules/tar/node_modules/minipass/index.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/minipass-benchmarks.xlsx b/node_modules/tar/node_modules/minipass/minipass-benchmarks.xlsx similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/minipass-benchmarks.xlsx rename to node_modules/tar/node_modules/minipass/minipass-benchmarks.xlsx diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/package.json b/node_modules/tar/node_modules/minipass/package.json similarity index 90% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/package.json rename to node_modules/tar/node_modules/minipass/package.json index 5cdc6970d15..52856521fcf 100644 --- a/node_modules/pacote/node_modules/tar/node_modules/minipass/package.json +++ b/node_modules/tar/node_modules/minipass/package.json @@ -3,7 +3,7 @@ "_id": "minipass@2.2.1", "_inBundle": false, "_integrity": "sha512-u1aUllxPJUI07cOqzR7reGmQxmCqlH88uIIsf6XZFEWgw7gXKpJdR+5R9Y3KEDmWYkdIz9wXZs3C0jOPxejk/Q==", - "_location": "/pacote/tar/minipass", + "_location": "/tar/minipass", "_phantomChildren": {}, "_requested": { "type": "range", @@ -16,13 +16,13 @@ "fetchSpec": "^2.0.2" }, "_requiredBy": [ - "/pacote/tar", - "/pacote/tar/minizlib" + "/tar", + "/tar/minizlib" ], "_resolved": "https://registry.npmjs.org/minipass/-/minipass-2.2.1.tgz", "_shasum": "5ada97538b1027b4cf7213432428578cb564011f", "_spec": "minipass@^2.0.2", - "_where": "/Users/rebecca/code/npm/node_modules/pacote/node_modules/tar", + "_where": "/Users/rebecca/code/npm/node_modules/tar", "author": { "name": "Isaac Z. Schlueter", "email": "i@izs.me", diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/test/basic.js b/node_modules/tar/node_modules/minipass/test/basic.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/test/basic.js rename to node_modules/tar/node_modules/minipass/test/basic.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minipass/test/empty-end.js b/node_modules/tar/node_modules/minipass/test/empty-end.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minipass/test/empty-end.js rename to node_modules/tar/node_modules/minipass/test/empty-end.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minizlib/LICENSE b/node_modules/tar/node_modules/minizlib/LICENSE similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minizlib/LICENSE rename to node_modules/tar/node_modules/minizlib/LICENSE diff --git a/node_modules/pacote/node_modules/tar/node_modules/minizlib/README.md b/node_modules/tar/node_modules/minizlib/README.md similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minizlib/README.md rename to node_modules/tar/node_modules/minizlib/README.md diff --git a/node_modules/pacote/node_modules/tar/node_modules/minizlib/constants.js b/node_modules/tar/node_modules/minizlib/constants.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minizlib/constants.js rename to node_modules/tar/node_modules/minizlib/constants.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minizlib/index.js b/node_modules/tar/node_modules/minizlib/index.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/minizlib/index.js rename to node_modules/tar/node_modules/minizlib/index.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/minizlib/package.json b/node_modules/tar/node_modules/minizlib/package.json similarity index 92% rename from node_modules/pacote/node_modules/tar/node_modules/minizlib/package.json rename to node_modules/tar/node_modules/minizlib/package.json index a58da806404..ae7fb898d46 100644 --- a/node_modules/pacote/node_modules/tar/node_modules/minizlib/package.json +++ b/node_modules/tar/node_modules/minizlib/package.json @@ -3,7 +3,7 @@ "_id": "minizlib@1.0.3", "_inBundle": false, "_integrity": "sha1-1cGr93vhVGGZUuJTM27Mq5sqMvU=", - "_location": "/pacote/tar/minizlib", + "_location": "/tar/minizlib", "_phantomChildren": {}, "_requested": { "type": "range", @@ -16,12 +16,12 @@ "fetchSpec": "^1.0.3" }, "_requiredBy": [ - "/pacote/tar" + "/tar" ], "_resolved": "https://registry.npmjs.org/minizlib/-/minizlib-1.0.3.tgz", "_shasum": "d5c1abf77be154619952e253336eccab9b2a32f5", "_spec": "minizlib@^1.0.3", - "_where": "/Users/rebecca/code/npm/node_modules/pacote/node_modules/tar", + "_where": "/Users/rebecca/code/npm/node_modules/tar", "author": { "name": "Isaac Z. Schlueter", "email": "i@izs.me", diff --git a/node_modules/pacote/node_modules/tar/node_modules/yallist/LICENSE b/node_modules/tar/node_modules/yallist/LICENSE similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/yallist/LICENSE rename to node_modules/tar/node_modules/yallist/LICENSE diff --git a/node_modules/pacote/node_modules/tar/node_modules/yallist/README.md b/node_modules/tar/node_modules/yallist/README.md similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/yallist/README.md rename to node_modules/tar/node_modules/yallist/README.md diff --git a/node_modules/pacote/node_modules/tar/node_modules/yallist/iterator.js b/node_modules/tar/node_modules/yallist/iterator.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/yallist/iterator.js rename to node_modules/tar/node_modules/yallist/iterator.js diff --git a/node_modules/pacote/node_modules/tar/node_modules/yallist/package.json b/node_modules/tar/node_modules/yallist/package.json similarity index 89% rename from node_modules/pacote/node_modules/tar/node_modules/yallist/package.json rename to node_modules/tar/node_modules/yallist/package.json index 65dfe325138..c2a8e0d3995 100644 --- a/node_modules/pacote/node_modules/tar/node_modules/yallist/package.json +++ b/node_modules/tar/node_modules/yallist/package.json @@ -3,7 +3,7 @@ "_id": "yallist@3.0.2", "_inBundle": false, "_integrity": "sha1-hFK0u36Dx8GI2AQcGoN8dz1ti7k=", - "_location": "/pacote/tar/yallist", + "_location": "/tar/yallist", "_phantomChildren": {}, "_requested": { "type": "range", @@ -16,13 +16,13 @@ "fetchSpec": "^3.0.2" }, "_requiredBy": [ - "/pacote/tar", - "/pacote/tar/minipass" + "/tar", + "/tar/minipass" ], "_resolved": "https://registry.npmjs.org/yallist/-/yallist-3.0.2.tgz", "_shasum": "8452b4bb7e83c7c188d8041c1a837c773d6d8bb9", "_spec": "yallist@^3.0.2", - "_where": "/Users/rebecca/code/npm/node_modules/pacote/node_modules/tar", + "_where": "/Users/rebecca/code/npm/node_modules/tar", "author": { "name": "Isaac Z. Schlueter", "email": "i@izs.me", diff --git a/node_modules/pacote/node_modules/tar/node_modules/yallist/yallist.js b/node_modules/tar/node_modules/yallist/yallist.js similarity index 100% rename from node_modules/pacote/node_modules/tar/node_modules/yallist/yallist.js rename to node_modules/tar/node_modules/yallist/yallist.js diff --git a/node_modules/tar/package.json b/node_modules/tar/package.json index 4dc0c151cbc..4d6683463a6 100644 --- a/node_modules/tar/package.json +++ b/node_modules/tar/package.json @@ -1,65 +1,78 @@ { - "_from": "tar@~2.2.1", - "_id": "tar@2.2.1", - "_integrity": "sha1-jk0qJWwOIYXGsYrWlK7JaLg8sdE=", + "_from": "tar@latest", + "_id": "tar@4.0.1", + "_inBundle": false, + "_integrity": "sha512-XBpU+/azPOMvE5m2Tn7Sl6U1ahpGfe77LkdrAlFilwrgHZsR+2iy0l8klQtfJNM+DACZO2Xrw10MTyQRB4du5A==", "_location": "/tar", - "_phantomChildren": { - "inherits": "2.0.3" - }, + "_phantomChildren": {}, "_requested": { - "type": "range", + "type": "tag", "registry": true, - "raw": "tar@~2.2.1", + "raw": "tar@latest", "name": "tar", "escapedName": "tar", - "rawSpec": "~2.2.1", + "rawSpec": "latest", "saveSpec": null, - "fetchSpec": "~2.2.1" + "fetchSpec": "latest" }, "_requiredBy": [ + "#USER", "/", - "/node-gyp" + "/pacote" ], - "_resolved": "https://registry.npmjs.org/tar/-/tar-2.2.1.tgz", - "_shasum": "8e4d2a256c0e2185c6b18ad694aec968b83cb1d1", - "_shrinkwrap": null, - "_spec": "tar@~2.2.1", - "_where": "/Users/zkat/Documents/code/npm", + "_resolved": "https://registry.npmjs.org/tar/-/tar-4.0.1.tgz", + "_shasum": "3f5b2e5289db30c2abe4c960f43d0d9fff96aaf0", + "_spec": "tar@latest", + "_where": "/Users/rebecca/code/npm", "author": { "name": "Isaac Z. Schlueter", "email": "i@izs.me", "url": "http://blog.izs.me/" }, - "bin": null, "bugs": { - "url": "https://github.com/isaacs/node-tar/issues" + "url": "https://github.com/npm/node-tar/issues" }, "bundleDependencies": false, "dependencies": { - "block-stream": "*", - "fstream": "^1.0.2", - "inherits": "2" + "chownr": "^1.0.1", + "minipass": "^2.0.2", + "minizlib": "^1.0.3", + "mkdirp": "^0.5.0", + "yallist": "^3.0.2" }, "deprecated": false, "description": "tar for node", "devDependencies": { - "graceful-fs": "^4.1.2", - "mkdirp": "^0.5.0", + "chmodr": "^1.0.2", + "end-of-stream": "^1.4.0", + "events-to-array": "^1.1.2", + "mutate-fs": "^1.1.0", "rimraf": "1.x", - "tap": "0.x" + "tap": "^10.3.3", + "tar-fs": "^1.15.2", + "tar-stream": "^1.5.2" }, - "homepage": "https://github.com/isaacs/node-tar#readme", + "engines": { + "node": ">=4.5" + }, + "files": [ + "index.js", + "lib/" + ], + "homepage": "https://github.com/npm/node-tar#readme", "license": "ISC", - "main": "tar.js", "name": "tar", - "optionalDependencies": {}, - "peerDependencies": {}, "repository": { "type": "git", - "url": "git://github.com/isaacs/node-tar.git" + "url": "git+https://github.com/npm/node-tar.git" }, "scripts": { - "test": "tap test/*.js" + "bench": "for i in benchmarks/*/*.js; do echo $i; for j in {1..5}; do node $i || break; done; done", + "genparse": "node scripts/generate-parse-fixtures.js", + "postpublish": "git push origin --all; git push origin --tags", + "postversion": "npm publish", + "preversion": "npm test", + "test": "tap test/*.js --100 -J --coverage-report=text" }, - "version": "2.2.1" + "version": "4.0.1" } diff --git a/package-lock.json b/package-lock.json index 2955e743729..cb4a1e5adbb 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1275,6 +1275,26 @@ "version": "5.3.0", "resolved": "https://registry.npmjs.org/semver/-/semver-5.3.0.tgz", "integrity": "sha1-myzl094C0XxgEq0yaqa00M9U+U8=" + }, + "tar": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-2.2.1.tgz", + "integrity": "sha1-jk0qJWwOIYXGsYrWlK7JaLg8sdE=", + "requires": { + "block-stream": "0.0.9", + "fstream": "1.0.11", + "inherits": "2.0.3" + }, + "dependencies": { + "block-stream": { + "version": "0.0.9", + "resolved": "https://registry.npmjs.org/block-stream/-/block-stream-0.0.9.tgz", + "integrity": "sha1-E+v+d4oDIFz+A3UUgeu0szAMEmo=", + "requires": { + "inherits": "2.0.3" + } + } + } } } }, @@ -2645,41 +2665,6 @@ "integrity": "sha1-7RAEHy5KfxsKOEZtF6XD4n3x38E=" } } - }, - "tar": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/tar/-/tar-4.0.1.tgz", - "integrity": "sha512-XBpU+/azPOMvE5m2Tn7Sl6U1ahpGfe77LkdrAlFilwrgHZsR+2iy0l8klQtfJNM+DACZO2Xrw10MTyQRB4du5A==", - "requires": { - "chownr": "1.0.1", - "minipass": "2.2.1", - "minizlib": "1.0.3", - "mkdirp": "0.5.1", - "yallist": "3.0.2" - }, - "dependencies": { - "minipass": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-2.2.1.tgz", - "integrity": "sha512-u1aUllxPJUI07cOqzR7reGmQxmCqlH88uIIsf6XZFEWgw7gXKpJdR+5R9Y3KEDmWYkdIz9wXZs3C0jOPxejk/Q==", - "requires": { - "yallist": "3.0.2" - } - }, - "minizlib": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-1.0.3.tgz", - "integrity": "sha1-1cGr93vhVGGZUuJTM27Mq5sqMvU=", - "requires": { - "minipass": "2.2.1" - } - }, - "yallist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.0.2.tgz", - "integrity": "sha1-hFK0u36Dx8GI2AQcGoN8dz1ti7k=" - } - } } } }, @@ -7440,22 +7425,37 @@ } }, "tar": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/tar/-/tar-2.2.1.tgz", - "integrity": "sha1-jk0qJWwOIYXGsYrWlK7JaLg8sdE=", + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-4.0.1.tgz", + "integrity": "sha512-XBpU+/azPOMvE5m2Tn7Sl6U1ahpGfe77LkdrAlFilwrgHZsR+2iy0l8klQtfJNM+DACZO2Xrw10MTyQRB4du5A==", "requires": { - "block-stream": "0.0.9", - "fstream": "1.0.11", - "inherits": "2.0.3" + "chownr": "1.0.1", + "minipass": "2.2.1", + "minizlib": "1.0.3", + "mkdirp": "0.5.1", + "yallist": "3.0.2" }, "dependencies": { - "block-stream": { - "version": "0.0.9", - "resolved": "https://registry.npmjs.org/block-stream/-/block-stream-0.0.9.tgz", - "integrity": "sha1-E+v+d4oDIFz+A3UUgeu0szAMEmo=", + "minipass": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-2.2.1.tgz", + "integrity": "sha512-u1aUllxPJUI07cOqzR7reGmQxmCqlH88uIIsf6XZFEWgw7gXKpJdR+5R9Y3KEDmWYkdIz9wXZs3C0jOPxejk/Q==", "requires": { - "inherits": "2.0.3" + "yallist": "3.0.2" } + }, + "minizlib": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-1.0.3.tgz", + "integrity": "sha1-1cGr93vhVGGZUuJTM27Mq5sqMvU=", + "requires": { + "minipass": "2.2.1" + } + }, + "yallist": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.0.2.tgz", + "integrity": "sha1-hFK0u36Dx8GI2AQcGoN8dz1ti7k=" } } }, diff --git a/package.json b/package.json index 406d7fab849..9e776a6e897 100644 --- a/package.json +++ b/package.json @@ -109,7 +109,7 @@ "sorted-union-stream": "~2.1.3", "ssri": "~4.1.6", "strip-ansi": "~4.0.0", - "tar": "~2.2.1", + "tar": "~4.0.1", "text-table": "~0.2.0", "uid-number": "0.0.6", "umask": "~1.1.0",