/
worker.js
186 lines (153 loc) · 5.3 KB
/
worker.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
/* eslint-env serviceworker */
// returns an id that hasn't been used before by this thread
let numIds = 0
function generateUniqueId () {
numIds++
return numIds
}
async function nextPostMessage () {
return new Promise((resolve) => {
self.addEventListener(
'message',
e => resolve(e.data),
{ once: true }
)
})
}
// resolve with next postMessage that matches a condition (given by closure)
// if no closure is passed, resolves with the next post message
// TODO: does this infinite loop cause a performance problem or memory leak?
async function nextMessage (boolClosure = () => true) {
while (true) {
const message = await nextPostMessage()
if (boolClosure(message)) {
return message
}
}
}
self.addEventListener('fetch', (e) => {
const request = e.request
// this function is set with the cli
const shouldIntercept =
// CLI REPLACE START
// never intercept by default
(path, request) => false
// CLI REPLACE END
let path = request.url.replace(self.origin, '') // trim protocol and host
path = path.replace(/#.*$/, '') // trim hash
path = path.replace(/\?.*$/, '') // trim query params
path = path.replace(/\/$/, '') // trim trailing slash if present
if (!shouldIntercept(path, request)) {
return // let request be handled by normal fetch
}
e.respondWith(getResponse())
async function getResponse () {
// get the first client that responds to a ping
async function getClient () {
// ping the client to see if it can receive and send postmessages
async function ping (client) {
// use a unique id to differentiate messages corresponding to different pings
// each request makes a ping to all windows
const pingId = generateUniqueId()
client.postMessage({
type: 'ping',
id: pingId
})
// TODO: does this cause a leak? for windows closed during a ping?
await nextMessage(message => message.type === 'pong' && message.id === pingId)
return client
}
// TODO: should the request timeout if no window responds?
// right now requests wait until registerStreamToSw() is called
// wait until a window responds to a ping to ensure that
// the request can be processed by the thread and responded to using postmessage
while (true) {
const clients = await self.clients.matchAll({ type: 'window' })
// wait until there are actually windows to ping
if (!clients || clients.length === 0) {
continue
}
// postMessage latency is less than a millisecond https://hacks.mozilla.org/2015/07/how-fast-are-web-workers/
const PING_TIMEOUT = 5
const client = await Promise.race([
...clients.map(client => ping(client)),
new Promise(resolve => setTimeout(() => resolve(false), PING_TIMEOUT))
])
// false if timed out
if (client) {
return client
}
}
}
const client = await getClient()
// use a unique ID to differentiate postMessages coorresponding to different requests
const responseId = generateUniqueId()
function sendMessageToWindow (obj) {
obj.id = responseId
client.postMessage(obj)
}
// request objects can't be sent via postMessage, so construct an object literal representation
const props = [
'method',
'mode',
'url',
'credentials',
'cache',
'context',
'destination',
'redirect',
'integrity',
'referrer',
'referrerPolicy',
'keepalive',
'isHistoryNavigation'
]
const plainRequest = {}
for (const key of props) {
plainRequest[key] = request[key]
}
plainRequest.headers = {}
// convert headers object into plain associative array
for (const [key, value] of request.headers) {
plainRequest.headers[key] = value
}
// send body as blob, which minimizes postMessage overhead (only a pointer to data will be sent)
plainRequest.body = await request.blob()
// include trimmed path
plainRequest.path = path
// send request data to window
// this is the req object in registerStreamToSw('/worker.js', (req, res) => {})
sendMessageToWindow({
type: 'request',
plainRequest
})
// the first message sent for a response is always metadata
const metadata = await nextMessage(message => message.id === responseId)
const responseStream = new self.ReadableStream({
async pull (controller) {
sendMessageToWindow({ type: 'pull' })
// after metadata is sent, post messages will only be either chunks or 'done'
const message = await nextMessage(message => message.id === responseId)
if (message.type === 'done') {
controller.close()
} else {
controller.enqueue(message.chunk)
}
},
cancel () {
sendMessageToWindow({ type: 'cancel' })
}
})
return new self.Response(
responseStream,
{
status: metadata.status,
headers: metadata.headers,
statusText: metadata.statusText
}
)
}
})
// activate the service worker immediately instead of waiting until the next reload
self.addEventListener('install', (event) => self.skipWaiting())
self.addEventListener('activate', (event) => self.clients.claim())