1
1
'use strict'
2
2
3
- var asyncMap = require ( 'slide/lib/async-map' )
4
- var contentPath = require ( './content/path' )
5
- var fixOwner = require ( './util/fix-owner' )
6
- var fs = require ( 'graceful-fs' )
7
- var lockfile = require ( 'lockfile' )
8
- var path = require ( 'path' )
9
- var pipe = require ( 'mississippi' ) . pipe
10
- var split = require ( 'split' )
11
- var through = require ( 'mississippi' ) . through
3
+ const asyncMap = require ( 'slide/lib/async-map' )
4
+ const contentPath = require ( './content/path' )
5
+ const fixOwner = require ( './util/fix-owner' )
6
+ const fs = require ( 'graceful-fs' )
7
+ const lockfile = require ( 'lockfile' )
8
+ const path = require ( 'path' )
9
+ const pipe = require ( 'mississippi' ) . pipe
10
+ const Promise = require ( 'bluebird' )
11
+ const split = require ( 'split' )
12
+ const through = require ( 'mississippi' ) . through
12
13
13
14
module . exports . insert = insert
14
- function insert ( cache , key , digest , opts , _cb ) {
15
- if ( ! _cb ) {
16
- _cb = opts
17
- opts = null
18
- }
15
+ function insert ( cache , key , digest , opts ) {
19
16
opts = opts || { }
20
- var bucket = indexPath ( cache , key )
21
- var lock = bucket + '.lock'
22
- var cb = function ( err , entry ) {
23
- lockfile . unlock ( lock , function ( er ) {
24
- _cb ( er || err , entry )
25
- } )
26
- }
27
- fixOwner . mkdirfix ( path . dirname ( bucket ) , opts . uid , opts . gid , function ( err ) {
28
- if ( err ) { return _cb ( err ) }
29
- lockfile . lock ( lock , {
30
- stale : 60000 ,
31
- retries : 10 ,
32
- wait : 10000
33
- } , function ( err ) {
34
- if ( err ) { return _cb ( err ) }
35
- fs . stat ( bucket , function ( err , existing ) {
36
- if ( err && err . code !== 'ENOENT' && err . code !== 'EPERM' ) { cb ( err ) }
37
- var entry = {
38
- key : key ,
39
- digest : digest ,
40
- time : + ( new Date ( ) ) ,
41
- metadata : opts . metadata
42
- }
43
- // Because of the way these entries work,
44
- // the index is safe from fs.appendFile stopping
45
- // mid-write so long as newlines are *prepended*
46
- //
47
- // That is, if a write fails, it will be ignored
48
- // by `find`, and the next successful one will be
49
- // used.
50
- //
51
- // This should be -very rare-, since `fs.appendFile`
52
- // will often be atomic on most platforms unless
53
- // very large metadata has been included, but caches
54
- // like this one tend to last a long time. :)
55
- // Most corrupted reads are likely to be from attempting
56
- // to read the index while it's being written to --
57
- // which is safe, but not guaranteed to be atomic.
58
- var e = ( existing ? '\n' : '' ) + JSON . stringify ( entry )
59
- fs . appendFile ( bucket , e , function ( err ) {
60
- if ( err ) { return cb ( err ) }
61
- fixOwner . chownr ( bucket , opts . uid , opts . gid , function ( err ) {
17
+ const bucket = indexPath ( cache , key )
18
+ const lock = bucket + '.lock'
19
+ return fixOwner . mkdirfix (
20
+ path . dirname ( bucket ) , opts . uid , opts . gid
21
+ ) . then ( ( ) => (
22
+ Promise . fromNode ( _cb => {
23
+ const cb = ( err , entry ) => {
24
+ lockfile . unlock ( lock , er => {
25
+ _cb ( err || er , entry )
26
+ } )
27
+ }
28
+ lockfile . lock ( lock , {
29
+ stale : 60000 ,
30
+ retries : 10 ,
31
+ wait : 10000
32
+ } , function ( err ) {
33
+ if ( err ) { return _cb ( err ) }
34
+ fs . stat ( bucket , function ( err , existing ) {
35
+ if ( err && err . code !== 'ENOENT' && err . code !== 'EPERM' ) {
36
+ return cb ( err )
37
+ }
38
+ const entry = {
39
+ key : key ,
40
+ digest : digest ,
41
+ time : + ( new Date ( ) ) ,
42
+ metadata : opts . metadata
43
+ }
44
+ // Because of the way these entries work,
45
+ // the index is safe from fs.appendFile stopping
46
+ // mid-write so long as newlines are *prepended*
47
+ //
48
+ // That is, if a write fails, it will be ignored
49
+ // by `find`, and the next successful one will be
50
+ // used.
51
+ //
52
+ // This should be -very rare-, since `fs.appendFile`
53
+ // will often be atomic on most platforms unless
54
+ // very large metadata has been included, but caches
55
+ // like this one tend to last a long time. :)
56
+ // Most corrupted reads are likely to be from attempting
57
+ // to read the index while it's being written to --
58
+ // which is safe, but not guaranteed to be atomic.
59
+ const e = ( existing ? '\n' : '' ) + JSON . stringify ( entry )
60
+ fs . appendFile ( bucket , e , function ( err ) {
62
61
cb ( err , entry )
63
62
} )
64
63
} )
65
64
} )
66
65
} )
67
- } )
66
+ ) ) . then ( ( ) => fixOwner . chownr ( bucket , opts . uid , opts . gid ) )
68
67
}
69
68
70
69
module . exports . find = find
71
- function find ( cache , key , cb ) {
72
- var bucket = indexPath ( cache , key )
73
- var stream = fs . createReadStream ( bucket )
74
- var ret
75
- pipe ( stream , split ( '\n' , null , { trailing : true } ) . on ( 'data' , function ( l ) {
76
- try {
77
- var obj = JSON . parse ( l )
78
- } catch ( e ) {
79
- return
80
- }
81
- if ( obj && ( obj . key === key ) ) {
82
- ret = formatEntry ( cache , obj )
83
- }
84
- } ) , function ( err ) {
85
- if ( err && err . code === 'ENOENT' ) {
86
- cb ( null , null )
87
- } else {
88
- cb ( err , ret )
89
- }
70
+ function find ( cache , key ) {
71
+ const bucket = indexPath ( cache , key )
72
+ const stream = fs . createReadStream ( bucket )
73
+ let ret
74
+ return Promise . fromNode ( cb => {
75
+ pipe ( stream , split ( '\n' , null , { trailing : true } ) . on ( 'data' , function ( l ) {
76
+ let obj
77
+ try {
78
+ obj = JSON . parse ( l )
79
+ } catch ( e ) {
80
+ return
81
+ }
82
+ if ( obj && ( obj . key === key ) ) {
83
+ ret = formatEntry ( cache , obj )
84
+ }
85
+ } ) , function ( err ) {
86
+ if ( err && err . code === 'ENOENT' ) {
87
+ cb ( null , null )
88
+ } else {
89
+ cb ( err , ret )
90
+ }
91
+ } )
90
92
} )
91
93
}
92
94
93
95
module . exports . delete = del
94
- function del ( cache , key , cb ) {
95
- insert ( cache , key , null , cb )
96
+ function del ( cache , key ) {
97
+ return insert ( cache , key , null )
96
98
}
97
99
98
100
module . exports . lsStream = lsStream
99
101
function lsStream ( cache ) {
100
- var indexPath = path . join ( cache , 'index' )
101
- var stream = through . obj ( )
102
+ const indexPath = path . join ( cache , 'index' )
103
+ const stream = through . obj ( )
102
104
fs . readdir ( indexPath , function ( err , files ) {
103
105
if ( err && err . code === 'ENOENT' ) {
104
106
return stream . end ( )
@@ -108,10 +110,11 @@ function lsStream (cache) {
108
110
asyncMap ( files , function ( f , cb ) {
109
111
fs . readFile ( path . join ( indexPath , f ) , 'utf8' , function ( err , data ) {
110
112
if ( err ) { return cb ( err ) }
111
- var entries = { }
113
+ const entries = { }
112
114
data . split ( '\n' ) . forEach ( function ( entry ) {
115
+ let parsed
113
116
try {
114
- var parsed = JSON . parse ( entry )
117
+ parsed = JSON . parse ( entry )
115
118
} catch ( e ) {
116
119
}
117
120
// NOTE - it's possible for an entry to be
@@ -136,18 +139,20 @@ function lsStream (cache) {
136
139
}
137
140
138
141
module . exports . ls = ls
139
- function ls ( cache , cb ) {
140
- var entries = { }
141
- lsStream ( cache ) . on ( 'finish' , function ( ) {
142
- cb ( null , entries )
143
- } ) . on ( 'data' , function ( d ) {
144
- entries [ d . key ] = d
145
- } ) . on ( 'error' , cb )
142
+ function ls ( cache ) {
143
+ const entries = { }
144
+ return Promise . fromNode ( cb => {
145
+ lsStream ( cache ) . on ( 'finish' , function ( ) {
146
+ cb ( null , entries )
147
+ } ) . on ( 'data' , function ( d ) {
148
+ entries [ d . key ] = d
149
+ } ) . on ( 'error' , cb )
150
+ } )
146
151
}
147
152
148
153
module . exports . notFoundError = notFoundError
149
154
function notFoundError ( cache , key ) {
150
- var err = new Error ( 'content not found' )
155
+ const err = new Error ( 'content not found' )
151
156
err . code = 'ENOENT'
152
157
err . cache = cache
153
158
err . key = key
0 commit comments