summaryrefslogtreecommitdiff
path: root/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib
diff options
context:
space:
mode:
Diffstat (limited to 'deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib')
-rw-r--r--deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/path.js26
-rw-r--r--deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/read.js125
-rw-r--r--deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/rm.js21
-rw-r--r--deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/write.js162
-rw-r--r--deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/entry-index.js225
-rw-r--r--deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/memoization.js69
-rw-r--r--deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/fix-owner.js44
-rw-r--r--deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/hash-to-segments.js11
-rw-r--r--deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/move-file.js51
-rw-r--r--deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/tmp.js32
-rw-r--r--deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/y.js25
-rw-r--r--deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/verify.js213
12 files changed, 1004 insertions, 0 deletions
diff --git a/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/path.js b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/path.js
new file mode 100644
index 0000000000..fa6491ba6f
--- /dev/null
+++ b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/path.js
@@ -0,0 +1,26 @@
+'use strict'
+
+const contentVer = require('../../package.json')['cache-version'].content
+const hashToSegments = require('../util/hash-to-segments')
+const path = require('path')
+const ssri = require('ssri')
+
+// Current format of content file path:
+//
+// sha512-BaSE64Hex= ->
+// ~/.my-cache/content-v2/sha512/ba/da/55deadbeefc0ffee
+//
+module.exports = contentPath
+function contentPath (cache, integrity) {
+ const sri = ssri.parse(integrity, {single: true})
+ // contentPath is the *strongest* algo given
+ return path.join.apply(path, [
+ contentDir(cache),
+ sri.algorithm
+ ].concat(hashToSegments(sri.hexDigest())))
+}
+
+module.exports._contentDir = contentDir
+function contentDir (cache) {
+ return path.join(cache, `content-v${contentVer}`)
+}
diff --git a/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/read.js b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/read.js
new file mode 100644
index 0000000000..7a4da3beb8
--- /dev/null
+++ b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/read.js
@@ -0,0 +1,125 @@
+'use strict'
+
+const BB = require('bluebird')
+
+const contentPath = require('./path')
+const fs = require('graceful-fs')
+const PassThrough = require('stream').PassThrough
+const pipe = BB.promisify(require('mississippi').pipe)
+const ssri = require('ssri')
+const Y = require('../util/y.js')
+
+BB.promisifyAll(fs)
+
+module.exports = read
+function read (cache, integrity, opts) {
+ opts = opts || {}
+ return pickContentSri(cache, integrity).then(content => {
+ const sri = content.sri
+ const cpath = contentPath(cache, sri)
+ return fs.readFileAsync(cpath, null).then(data => {
+ if (typeof opts.size === 'number' && opts.size !== data.length) {
+ throw sizeError(opts.size, data.length)
+ } else if (ssri.checkData(data, sri)) {
+ return data
+ } else {
+ throw integrityError(sri, cpath)
+ }
+ })
+ })
+}
+
+module.exports.stream = readStream
+module.exports.readStream = readStream
+function readStream (cache, integrity, opts) {
+ opts = opts || {}
+ const stream = new PassThrough()
+ pickContentSri(
+ cache, integrity
+ ).then(content => {
+ const sri = content.sri
+ return pipe(
+ fs.createReadStream(contentPath(cache, sri)),
+ ssri.integrityStream({
+ integrity: sri,
+ size: opts.size
+ }),
+ stream
+ )
+ }).catch(err => {
+ stream.emit('error', err)
+ })
+ return stream
+}
+
+if (fs.copyFile) {
+ module.exports.copy = copy
+}
+function copy (cache, integrity, dest, opts) {
+ opts = opts || {}
+ return pickContentSri(cache, integrity).then(content => {
+ const sri = content.sri
+ const cpath = contentPath(cache, sri)
+ return fs.copyFileAsync(cpath, dest).then(() => content.size)
+ })
+}
+
+module.exports.hasContent = hasContent
+function hasContent (cache, integrity) {
+ if (!integrity) { return BB.resolve(false) }
+ return pickContentSri(cache, integrity)
+ .catch({code: 'ENOENT'}, () => false)
+ .catch({code: 'EPERM'}, err => {
+ if (process.platform !== 'win32') {
+ throw err
+ } else {
+ return false
+ }
+ }).then(content => {
+ if (!content.sri) return false
+ return ({ sri: content.sri, size: content.stat.size })
+ })
+}
+
+module.exports._pickContentSri = pickContentSri
+function pickContentSri (cache, integrity) {
+ const sri = ssri.parse(integrity)
+ // If `integrity` has multiple entries, pick the first digest
+ // with available local data.
+ const algo = sri.pickAlgorithm()
+ const digests = sri[algo]
+ if (digests.length <= 1) {
+ const cpath = contentPath(cache, digests[0])
+ return fs.lstatAsync(cpath).then(stat => ({ sri: digests[0], stat }))
+ } else {
+ return BB.any(sri[sri.pickAlgorithm()].map(meta => {
+ return pickContentSri(cache, meta)
+ }))
+ .catch(err => {
+ if ([].some.call(err, e => e.code === 'ENOENT')) {
+ throw Object.assign(
+ new Error('No matching content found for ' + sri.toString()),
+ {code: 'ENOENT'}
+ )
+ } else {
+ throw err[0]
+ }
+ })
+ }
+}
+
+function sizeError (expected, found) {
+ var err = new Error(Y`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`)
+ err.expected = expected
+ err.found = found
+ err.code = 'EBADSIZE'
+ return err
+}
+
+function integrityError (sri, path) {
+ var err = new Error(Y`Integrity verification failed for ${sri} (${path})`)
+ err.code = 'EINTEGRITY'
+ err.sri = sri
+ err.path = path
+ return err
+}
diff --git a/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/rm.js b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/rm.js
new file mode 100644
index 0000000000..12cf158235
--- /dev/null
+++ b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/rm.js
@@ -0,0 +1,21 @@
+'use strict'
+
+const BB = require('bluebird')
+
+const contentPath = require('./path')
+const hasContent = require('./read').hasContent
+const rimraf = BB.promisify(require('rimraf'))
+
+module.exports = rm
+function rm (cache, integrity) {
+ return hasContent(cache, integrity).then(content => {
+ if (content) {
+ const sri = content.sri
+ if (sri) {
+ return rimraf(contentPath(cache, sri)).then(() => true)
+ }
+ } else {
+ return false
+ }
+ })
+}
diff --git a/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/write.js b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/write.js
new file mode 100644
index 0000000000..a79ae92902
--- /dev/null
+++ b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/content/write.js
@@ -0,0 +1,162 @@
+'use strict'
+
+const BB = require('bluebird')
+
+const contentPath = require('./path')
+const fixOwner = require('../util/fix-owner')
+const fs = require('graceful-fs')
+const moveFile = require('../util/move-file')
+const PassThrough = require('stream').PassThrough
+const path = require('path')
+const pipe = BB.promisify(require('mississippi').pipe)
+const rimraf = BB.promisify(require('rimraf'))
+const ssri = require('ssri')
+const to = require('mississippi').to
+const uniqueFilename = require('unique-filename')
+const Y = require('../util/y.js')
+
+const writeFileAsync = BB.promisify(fs.writeFile)
+
+module.exports = write
+function write (cache, data, opts) {
+ opts = opts || {}
+ if (opts.algorithms && opts.algorithms.length > 1) {
+ throw new Error(
+ Y`opts.algorithms only supports a single algorithm for now`
+ )
+ }
+ if (typeof opts.size === 'number' && data.length !== opts.size) {
+ return BB.reject(sizeError(opts.size, data.length))
+ }
+ const sri = ssri.fromData(data, opts)
+ if (opts.integrity && !ssri.checkData(data, opts.integrity, opts)) {
+ return BB.reject(checksumError(opts.integrity, sri))
+ }
+ return BB.using(makeTmp(cache, opts), tmp => (
+ writeFileAsync(
+ tmp.target, data, {flag: 'wx'}
+ ).then(() => (
+ moveToDestination(tmp, cache, sri, opts)
+ ))
+ )).then(() => ({integrity: sri, size: data.length}))
+}
+
+module.exports.stream = writeStream
+function writeStream (cache, opts) {
+ opts = opts || {}
+ const inputStream = new PassThrough()
+ let inputErr = false
+ function errCheck () {
+ if (inputErr) { throw inputErr }
+ }
+
+ let allDone
+ const ret = to((c, n, cb) => {
+ if (!allDone) {
+ allDone = handleContent(inputStream, cache, opts, errCheck)
+ }
+ inputStream.write(c, n, cb)
+ }, cb => {
+ inputStream.end(() => {
+ if (!allDone) {
+ const e = new Error(Y`Cache input stream was empty`)
+ e.code = 'ENODATA'
+ return ret.emit('error', e)
+ }
+ allDone.then(res => {
+ res.integrity && ret.emit('integrity', res.integrity)
+ res.size !== null && ret.emit('size', res.size)
+ cb()
+ }, e => {
+ ret.emit('error', e)
+ })
+ })
+ })
+ ret.once('error', e => {
+ inputErr = e
+ })
+ return ret
+}
+
+function handleContent (inputStream, cache, opts, errCheck) {
+ return BB.using(makeTmp(cache, opts), tmp => {
+ errCheck()
+ return pipeToTmp(
+ inputStream, cache, tmp.target, opts, errCheck
+ ).then(res => {
+ return moveToDestination(
+ tmp, cache, res.integrity, opts, errCheck
+ ).then(() => res)
+ })
+ })
+}
+
+function pipeToTmp (inputStream, cache, tmpTarget, opts, errCheck) {
+ return BB.resolve().then(() => {
+ let integrity
+ let size
+ const hashStream = ssri.integrityStream({
+ integrity: opts.integrity,
+ algorithms: opts.algorithms,
+ size: opts.size
+ }).on('integrity', s => {
+ integrity = s
+ }).on('size', s => {
+ size = s
+ })
+ const outStream = fs.createWriteStream(tmpTarget, {
+ flags: 'wx'
+ })
+ errCheck()
+ return pipe(inputStream, hashStream, outStream).then(() => {
+ return {integrity, size}
+ }, err => {
+ return rimraf(tmpTarget).then(() => { throw err })
+ })
+ })
+}
+
+function makeTmp (cache, opts) {
+ const tmpTarget = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix)
+ return fixOwner.mkdirfix(
+ path.dirname(tmpTarget), opts.uid, opts.gid
+ ).then(() => ({
+ target: tmpTarget,
+ moved: false
+ })).disposer(tmp => (!tmp.moved && rimraf(tmp.target)))
+}
+
+function moveToDestination (tmp, cache, sri, opts, errCheck) {
+ errCheck && errCheck()
+ const destination = contentPath(cache, sri)
+ const destDir = path.dirname(destination)
+
+ return fixOwner.mkdirfix(
+ destDir, opts.uid, opts.gid
+ ).then(() => {
+ errCheck && errCheck()
+ return moveFile(tmp.target, destination)
+ }).then(() => {
+ errCheck && errCheck()
+ tmp.moved = true
+ return fixOwner.chownr(destination, opts.uid, opts.gid)
+ })
+}
+
+function sizeError (expected, found) {
+ var err = new Error(Y`Bad data size: expected inserted data to be ${expected} bytes, but got ${found} instead`)
+ err.expected = expected
+ err.found = found
+ err.code = 'EBADSIZE'
+ return err
+}
+
+function checksumError (expected, found) {
+ var err = new Error(Y`Integrity check failed:
+ Wanted: ${expected}
+ Found: ${found}`)
+ err.code = 'EINTEGRITY'
+ err.expected = expected
+ err.found = found
+ return err
+}
diff --git a/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/entry-index.js b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/entry-index.js
new file mode 100644
index 0000000000..fe1cd06457
--- /dev/null
+++ b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/entry-index.js
@@ -0,0 +1,225 @@
+'use strict'
+
+const BB = require('bluebird')
+
+const contentPath = require('./content/path')
+const crypto = require('crypto')
+const fixOwner = require('./util/fix-owner')
+const fs = require('graceful-fs')
+const hashToSegments = require('./util/hash-to-segments')
+const ms = require('mississippi')
+const path = require('path')
+const ssri = require('ssri')
+const Y = require('./util/y.js')
+
+const indexV = require('../package.json')['cache-version'].index
+
+const appendFileAsync = BB.promisify(fs.appendFile)
+const readFileAsync = BB.promisify(fs.readFile)
+const readdirAsync = BB.promisify(fs.readdir)
+const concat = ms.concat
+const from = ms.from
+
+module.exports.NotFoundError = class NotFoundError extends Error {
+ constructor (cache, key) {
+ super(Y`No cache entry for \`${key}\` found in \`${cache}\``)
+ this.code = 'ENOENT'
+ this.cache = cache
+ this.key = key
+ }
+}
+
+module.exports.insert = insert
+function insert (cache, key, integrity, opts) {
+ opts = opts || {}
+ const bucket = bucketPath(cache, key)
+ const entry = {
+ key,
+ integrity: integrity && ssri.stringify(integrity),
+ time: Date.now(),
+ size: opts.size,
+ metadata: opts.metadata
+ }
+ return fixOwner.mkdirfix(
+ path.dirname(bucket), opts.uid, opts.gid
+ ).then(() => {
+ const stringified = JSON.stringify(entry)
+ // NOTE - Cleverness ahoy!
+ //
+ // This works because it's tremendously unlikely for an entry to corrupt
+ // another while still preserving the string length of the JSON in
+ // question. So, we just slap the length in there and verify it on read.
+ //
+ // Thanks to @isaacs for the whiteboarding session that ended up with this.
+ return appendFileAsync(
+ bucket, `\n${hashEntry(stringified)}\t${stringified}`
+ )
+ }).then(
+ () => fixOwner.chownr(bucket, opts.uid, opts.gid)
+ ).catch({code: 'ENOENT'}, () => {
+ // There's a class of race conditions that happen when things get deleted
+ // during fixOwner, or between the two mkdirfix/chownr calls.
+ //
+ // It's perfectly fine to just not bother in those cases and lie
+ // that the index entry was written. Because it's a cache.
+ }).then(() => {
+ return formatEntry(cache, entry)
+ })
+}
+
+module.exports.find = find
+function find (cache, key) {
+ const bucket = bucketPath(cache, key)
+ return bucketEntries(cache, bucket).then(entries => {
+ return entries.reduce((latest, next) => {
+ if (next && next.key === key) {
+ return formatEntry(cache, next)
+ } else {
+ return latest
+ }
+ }, null)
+ }).catch(err => {
+ if (err.code === 'ENOENT') {
+ return null
+ } else {
+ throw err
+ }
+ })
+}
+
+module.exports.delete = del
+function del (cache, key, opts) {
+ return insert(cache, key, null, opts)
+}
+
+module.exports.lsStream = lsStream
+function lsStream (cache) {
+ const indexDir = bucketDir(cache)
+ const stream = from.obj()
+
+ // "/cachename/*"
+ readdirOrEmpty(indexDir).map(bucket => {
+ const bucketPath = path.join(indexDir, bucket)
+
+ // "/cachename/<bucket 0xFF>/*"
+ return readdirOrEmpty(bucketPath).map(subbucket => {
+ const subbucketPath = path.join(bucketPath, subbucket)
+
+ // "/cachename/<bucket 0xFF>/<bucket 0xFF>/*"
+ return readdirOrEmpty(subbucketPath).map(entry => {
+ const getKeyToEntry = bucketEntries(
+ cache,
+ path.join(subbucketPath, entry)
+ ).reduce((acc, entry) => {
+ acc.set(entry.key, entry)
+ return acc
+ }, new Map())
+
+ return getKeyToEntry.then(reduced => {
+ for (let entry of reduced.values()) {
+ const formatted = formatEntry(cache, entry)
+ formatted && stream.push(formatted)
+ }
+ }).catch({code: 'ENOENT'}, nop)
+ })
+ })
+ }).then(() => {
+ stream.push(null)
+ }, err => {
+ stream.emit('error', err)
+ })
+
+ return stream
+}
+
+module.exports.ls = ls
+function ls (cache) {
+ return BB.fromNode(cb => {
+ lsStream(cache).on('error', cb).pipe(concat(entries => {
+ cb(null, entries.reduce((acc, xs) => {
+ acc[xs.key] = xs
+ return acc
+ }, {}))
+ }))
+ })
+}
+
+function bucketEntries (cache, bucket, filter) {
+ return readFileAsync(
+ bucket, 'utf8'
+ ).then(data => {
+ let entries = []
+ data.split('\n').forEach(entry => {
+ if (!entry) { return }
+ const pieces = entry.split('\t')
+ if (!pieces[1] || hashEntry(pieces[1]) !== pieces[0]) {
+ // Hash is no good! Corruption or malice? Doesn't matter!
+ // EJECT EJECT
+ return
+ }
+ let obj
+ try {
+ obj = JSON.parse(pieces[1])
+ } catch (e) {
+ // Entry is corrupted!
+ return
+ }
+ if (obj) {
+ entries.push(obj)
+ }
+ })
+ return entries
+ })
+}
+
+module.exports._bucketDir = bucketDir
+function bucketDir (cache) {
+ return path.join(cache, `index-v${indexV}`)
+}
+
+module.exports._bucketPath = bucketPath
+function bucketPath (cache, key) {
+ const hashed = hashKey(key)
+ return path.join.apply(path, [bucketDir(cache)].concat(
+ hashToSegments(hashed)
+ ))
+}
+
+module.exports._hashKey = hashKey
+function hashKey (key) {
+ return hash(key, 'sha256')
+}
+
+module.exports._hashEntry = hashEntry
+function hashEntry (str) {
+ return hash(str, 'sha1')
+}
+
+function hash (str, digest) {
+ return crypto
+ .createHash(digest)
+ .update(str)
+ .digest('hex')
+}
+
+function formatEntry (cache, entry) {
+ // Treat null digests as deletions. They'll shadow any previous entries.
+ if (!entry.integrity) { return null }
+ return {
+ key: entry.key,
+ integrity: entry.integrity,
+ path: contentPath(cache, entry.integrity),
+ size: entry.size,
+ time: entry.time,
+ metadata: entry.metadata
+ }
+}
+
+function readdirOrEmpty (dir) {
+ return readdirAsync(dir)
+ .catch({code: 'ENOENT'}, () => [])
+ .catch({code: 'ENOTDIR'}, () => [])
+}
+
+function nop () {
+}
diff --git a/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/memoization.js b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/memoization.js
new file mode 100644
index 0000000000..92179c7ac6
--- /dev/null
+++ b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/memoization.js
@@ -0,0 +1,69 @@
+'use strict'
+
+const LRU = require('lru-cache')
+
+const MAX_SIZE = 50 * 1024 * 1024 // 50MB
+const MAX_AGE = 3 * 60 * 1000
+
+let MEMOIZED = new LRU({
+ max: MAX_SIZE,
+ maxAge: MAX_AGE,
+ length: (entry, key) => {
+ if (key.startsWith('key:')) {
+ return entry.data.length
+ } else if (key.startsWith('digest:')) {
+ return entry.length
+ }
+ }
+})
+
+module.exports.clearMemoized = clearMemoized
+function clearMemoized () {
+ const old = {}
+ MEMOIZED.forEach((v, k) => {
+ old[k] = v
+ })
+ MEMOIZED.reset()
+ return old
+}
+
+module.exports.put = put
+function put (cache, entry, data, opts) {
+ pickMem(opts).set(`key:${cache}:${entry.key}`, { entry, data })
+ putDigest(cache, entry.integrity, data, opts)
+}
+
+module.exports.put.byDigest = putDigest
+function putDigest (cache, integrity, data, opts) {
+ pickMem(opts).set(`digest:${cache}:${integrity}`, data)
+}
+
+module.exports.get = get
+function get (cache, key, opts) {
+ return pickMem(opts).get(`key:${cache}:${key}`)
+}
+
+module.exports.get.byDigest = getDigest
+function getDigest (cache, integrity, opts) {
+ return pickMem(opts).get(`digest:${cache}:${integrity}`)
+}
+
+class ObjProxy {
+ constructor (obj) {
+ this.obj = obj
+ }
+ get (key) { return this.obj[key] }
+ set (key, val) { this.obj[key] = val }
+}
+
+function pickMem (opts) {
+ if (!opts || !opts.memoize) {
+ return MEMOIZED
+ } else if (opts.memoize.get && opts.memoize.set) {
+ return opts.memoize
+ } else if (typeof opts.memoize === 'object') {
+ return new ObjProxy(opts.memoize)
+ } else {
+ return MEMOIZED
+ }
+}
diff --git a/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/fix-owner.js b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/fix-owner.js
new file mode 100644
index 0000000000..7000bff048
--- /dev/null
+++ b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/fix-owner.js
@@ -0,0 +1,44 @@
+'use strict'
+
+const BB = require('bluebird')
+
+const chownr = BB.promisify(require('chownr'))
+const mkdirp = BB.promisify(require('mkdirp'))
+const inflight = require('promise-inflight')
+
+module.exports.chownr = fixOwner
+function fixOwner (filepath, uid, gid) {
+ if (!process.getuid) {
+ // This platform doesn't need ownership fixing
+ return BB.resolve()
+ }
+ if (typeof uid !== 'number' && typeof gid !== 'number') {
+ // There's no permissions override. Nothing to do here.
+ return BB.resolve()
+ }
+ if ((typeof uid === 'number' && process.getuid() === uid) &&
+ (typeof gid === 'number' && process.getgid() === gid)) {
+ // No need to override if it's already what we used.
+ return BB.resolve()
+ }
+ return inflight(
+ 'fixOwner: fixing ownership on ' + filepath,
+ () => chownr(
+ filepath,
+ typeof uid === 'number' ? uid : process.getuid(),
+ typeof gid === 'number' ? gid : process.getgid()
+ ).catch({code: 'ENOENT'}, () => null)
+ )
+}
+
+module.exports.mkdirfix = mkdirfix
+function mkdirfix (p, uid, gid, cb) {
+ return mkdirp(p).then(made => {
+ if (made) {
+ return fixOwner(made, uid, gid).then(() => made)
+ }
+ }).catch({code: 'EEXIST'}, () => {
+ // There's a race in mkdirp!
+ return fixOwner(p, uid, gid).then(() => null)
+ })
+}
diff --git a/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/hash-to-segments.js b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/hash-to-segments.js
new file mode 100644
index 0000000000..192be2a6d6
--- /dev/null
+++ b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/hash-to-segments.js
@@ -0,0 +1,11 @@
+'use strict'
+
+module.exports = hashToSegments
+
+function hashToSegments (hash) {
+ return [
+ hash.slice(0, 2),
+ hash.slice(2, 4),
+ hash.slice(4)
+ ]
+}
diff --git a/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/move-file.js b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/move-file.js
new file mode 100644
index 0000000000..b43744b3da
--- /dev/null
+++ b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/move-file.js
@@ -0,0 +1,51 @@
+'use strict'
+
+const fs = require('graceful-fs')
+const BB = require('bluebird')
+const chmod = BB.promisify(fs.chmod)
+const unlink = BB.promisify(fs.unlink)
+let move
+let pinflight
+
+module.exports = moveFile
+function moveFile (src, dest) {
+ // This isn't quite an fs.rename -- the assumption is that
+ // if `dest` already exists, and we get certain errors while
+ // trying to move it, we should just not bother.
+ //
+ // In the case of cache corruption, users will receive an
+ // EINTEGRITY error elsewhere, and can remove the offending
+ // content their own way.
+ //
+ // Note that, as the name suggests, this strictly only supports file moves.
+ return BB.fromNode(cb => {
+ fs.link(src, dest, err => {
+ if (err) {
+ if (err.code === 'EEXIST' || err.code === 'EBUSY') {
+ // file already exists, so whatever
+ } else if (err.code === 'EPERM' && process.platform === 'win32') {
+ // file handle stayed open even past graceful-fs limits
+ } else {
+ return cb(err)
+ }
+ }
+ return cb()
+ })
+ }).then(() => {
+ // content should never change for any reason, so make it read-only
+ return BB.join(unlink(src), process.platform !== 'win32' && chmod(dest, '0444'))
+ }).catch(() => {
+ if (!pinflight) { pinflight = require('promise-inflight') }
+ return pinflight('cacache-move-file:' + dest, () => {
+ return BB.promisify(fs.stat)(dest).catch(err => {
+ if (err.code !== 'ENOENT') {
+ // Something else is wrong here. Bail bail bail
+ throw err
+ }
+ // file doesn't already exist! let's try a rename -> copy fallback
+ if (!move) { move = require('move-concurrently') }
+ return move(src, dest, { BB, fs })
+ })
+ })
+ })
+}
diff --git a/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/tmp.js b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/tmp.js
new file mode 100644
index 0000000000..4fc4512cc8
--- /dev/null
+++ b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/tmp.js
@@ -0,0 +1,32 @@
+'use strict'
+
+const BB = require('bluebird')
+
+const fixOwner = require('./fix-owner')
+const path = require('path')
+const rimraf = BB.promisify(require('rimraf'))
+const uniqueFilename = require('unique-filename')
+
+module.exports.mkdir = mktmpdir
+function mktmpdir (cache, opts) {
+ opts = opts || {}
+ const tmpTarget = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix)
+ return fixOwner.mkdirfix(tmpTarget, opts.uid, opts.gid).then(() => {
+ return tmpTarget
+ })
+}
+
+module.exports.withTmp = withTmp
+function withTmp (cache, opts, cb) {
+ if (!cb) {
+ cb = opts
+ opts = null
+ }
+ opts = opts || {}
+ return BB.using(mktmpdir(cache, opts).disposer(rimraf), cb)
+}
+
+module.exports.fix = fixtmpdir
+function fixtmpdir (cache, opts) {
+ return fixOwner(path.join(cache, 'tmp'), opts.uid, opts.gid)
+}
diff --git a/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/y.js b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/y.js
new file mode 100644
index 0000000000..d62bedacb3
--- /dev/null
+++ b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/util/y.js
@@ -0,0 +1,25 @@
+'use strict'
+
+const path = require('path')
+const y18n = require('y18n')({
+ directory: path.join(__dirname, '../../locales'),
+ locale: 'en',
+ updateFiles: process.env.CACACHE_UPDATE_LOCALE_FILES === 'true'
+})
+
+module.exports = yTag
+function yTag (parts) {
+ let str = ''
+ parts.forEach((part, i) => {
+ const arg = arguments[i + 1]
+ str += part
+ if (arg) {
+ str += '%s'
+ }
+ })
+ return y18n.__.apply(null, [str].concat([].slice.call(arguments, 1)))
+}
+
+module.exports.setLocale = locale => {
+ y18n.setLocale(locale)
+}
diff --git a/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/verify.js b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/verify.js
new file mode 100644
index 0000000000..6a01004c97
--- /dev/null
+++ b/deps/npm/node_modules/npm-registry-fetch/node_modules/cacache/lib/verify.js
@@ -0,0 +1,213 @@
+'use strict'
+
+const BB = require('bluebird')
+
+const contentPath = require('./content/path')
+const finished = BB.promisify(require('mississippi').finished)
+const fixOwner = require('./util/fix-owner')
+const fs = require('graceful-fs')
+const glob = BB.promisify(require('glob'))
+const index = require('./entry-index')
+const path = require('path')
+const rimraf = BB.promisify(require('rimraf'))
+const ssri = require('ssri')
+
+BB.promisifyAll(fs)
+
+module.exports = verify
+function verify (cache, opts) {
+ opts = opts || {}
+ opts.log && opts.log.silly('verify', 'verifying cache at', cache)
+ return BB.reduce([
+ markStartTime,
+ fixPerms,
+ garbageCollect,
+ rebuildIndex,
+ cleanTmp,
+ writeVerifile,
+ markEndTime
+ ], (stats, step, i) => {
+ const label = step.name || `step #${i}`
+ const start = new Date()
+ return BB.resolve(step(cache, opts)).then(s => {
+ s && Object.keys(s).forEach(k => {
+ stats[k] = s[k]
+ })
+ const end = new Date()
+ if (!stats.runTime) { stats.runTime = {} }
+ stats.runTime[label] = end - start
+ return stats
+ })
+ }, {}).tap(stats => {
+ stats.runTime.total = stats.endTime - stats.startTime
+ opts.log && opts.log.silly('verify', 'verification finished for', cache, 'in', `${stats.runTime.total}ms`)
+ })
+}
+
+function markStartTime (cache, opts) {
+ return { startTime: new Date() }
+}
+
+function markEndTime (cache, opts) {
+ return { endTime: new Date() }
+}
+
+function fixPerms (cache, opts) {
+ opts.log && opts.log.silly('verify', 'fixing cache permissions')
+ return fixOwner.mkdirfix(cache, opts.uid, opts.gid).then(() => {
+ // TODO - fix file permissions too
+ return fixOwner.chownr(cache, opts.uid, opts.gid)
+ }).then(() => null)
+}
+
+// Implements a naive mark-and-sweep tracing garbage collector.
+//
+// The algorithm is basically as follows:
+// 1. Read (and filter) all index entries ("pointers")
+// 2. Mark each integrity value as "live"
+// 3. Read entire filesystem tree in `content-vX/` dir
+// 4. If content is live, verify its checksum and delete it if it fails
+// 5. If content is not marked as live, rimraf it.
+//
+function garbageCollect (cache, opts) {
+ opts.log && opts.log.silly('verify', 'garbage collecting content')
+ const indexStream = index.lsStream(cache)
+ const liveContent = new Set()
+ indexStream.on('data', entry => {
+ if (opts && opts.filter && !opts.filter(entry)) { return }
+ liveContent.add(entry.integrity.toString())
+ })
+ return finished(indexStream).then(() => {
+ const contentDir = contentPath._contentDir(cache)
+ return glob(path.join(contentDir, '**'), {
+ follow: false,
+ nodir: true,
+ nosort: true
+ }).then(files => {
+ return BB.resolve({
+ verifiedContent: 0,
+ reclaimedCount: 0,
+ reclaimedSize: 0,
+ badContentCount: 0,
+ keptSize: 0
+ }).tap((stats) => BB.map(files, (f) => {
+ const split = f.split(/[/\\]/)
+ const digest = split.slice(split.length - 3).join('')
+ const algo = split[split.length - 4]
+ const integrity = ssri.fromHex(digest, algo)
+ if (liveContent.has(integrity.toString())) {
+ return verifyContent(f, integrity).then(info => {
+ if (!info.valid) {
+ stats.reclaimedCount++
+ stats.badContentCount++
+ stats.reclaimedSize += info.size
+ } else {
+ stats.verifiedContent++
+ stats.keptSize += info.size
+ }
+ return stats
+ })
+ } else {
+ // No entries refer to this content. We can delete.
+ stats.reclaimedCount++
+ return fs.statAsync(f).then(s => {
+ return rimraf(f).then(() => {
+ stats.reclaimedSize += s.size
+ return stats
+ })
+ })
+ }
+ }, {concurrency: opts.concurrency || 20}))
+ })
+ })
+}
+
+function verifyContent (filepath, sri) {
+ return fs.statAsync(filepath).then(stat => {
+ const contentInfo = {
+ size: stat.size,
+ valid: true
+ }
+ return ssri.checkStream(
+ fs.createReadStream(filepath),
+ sri
+ ).catch(err => {
+ if (err.code !== 'EINTEGRITY') { throw err }
+ return rimraf(filepath).then(() => {
+ contentInfo.valid = false
+ })
+ }).then(() => contentInfo)
+ }).catch({code: 'ENOENT'}, () => ({size: 0, valid: false}))
+}
+
+function rebuildIndex (cache, opts) {
+ opts.log && opts.log.silly('verify', 'rebuilding index')
+ return index.ls(cache).then(entries => {
+ const stats = {
+ missingContent: 0,
+ rejectedEntries: 0,
+ totalEntries: 0
+ }
+ const buckets = {}
+ for (let k in entries) {
+ if (entries.hasOwnProperty(k)) {
+ const hashed = index._hashKey(k)
+ const entry = entries[k]
+ const excluded = opts && opts.filter && !opts.filter(entry)
+ excluded && stats.rejectedEntries++
+ if (buckets[hashed] && !excluded) {
+ buckets[hashed].push(entry)
+ } else if (buckets[hashed] && excluded) {
+ // skip
+ } else if (excluded) {
+ buckets[hashed] = []
+ buckets[hashed]._path = index._bucketPath(cache, k)
+ } else {
+ buckets[hashed] = [entry]
+ buckets[hashed]._path = index._bucketPath(cache, k)
+ }
+ }
+ }
+ return BB.map(Object.keys(buckets), key => {
+ return rebuildBucket(cache, buckets[key], stats, opts)
+ }, {concurrency: opts.concurrency || 20}).then(() => stats)
+ })
+}
+
+function rebuildBucket (cache, bucket, stats, opts) {
+ return fs.truncateAsync(bucket._path).then(() => {
+ // This needs to be serialized because cacache explicitly
+ // lets very racy bucket conflicts clobber each other.
+ return BB.mapSeries(bucket, entry => {
+ const content = contentPath(cache, entry.integrity)
+ return fs.statAsync(content).then(() => {
+ return index.insert(cache, entry.key, entry.integrity, {
+ uid: opts.uid,
+ gid: opts.gid,
+ metadata: entry.metadata
+ }).then(() => { stats.totalEntries++ })
+ }).catch({code: 'ENOENT'}, () => {
+ stats.rejectedEntries++
+ stats.missingContent++
+ })
+ })
+ })
+}
+
+function cleanTmp (cache, opts) {
+ opts.log && opts.log.silly('verify', 'cleaning tmp directory')
+ return rimraf(path.join(cache, 'tmp'))
+}
+
+function writeVerifile (cache, opts) {
+ const verifile = path.join(cache, '_lastverified')
+ opts.log && opts.log.silly('verify', 'writing verifile to ' + verifile)
+ return fs.writeFileAsync(verifile, '' + (+(new Date())))
+}
+
+module.exports.lastRun = lastRun
+function lastRun (cache) {
+ return fs.readFileAsync(
+ path.join(cache, '_lastverified'), 'utf8'
+ ).then(data => new Date(+data))
+}