-
Notifications
You must be signed in to change notification settings - Fork 30
Return 406 if Accept-Encoding doesn't match store Content-Encoding #264
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -23,6 +23,7 @@ suite('Artifacts', function() { | |
| var urllib = require('url'); | ||
| var http = require('http'); | ||
| var https = require('https'); | ||
| var zlib = require('zlib'); | ||
|
|
||
| // Static URL from which ip-ranges from AWS services can be fetched | ||
| const AWS_IP_RANGES_URL = 'https://ip-ranges.amazonaws.com/ip-ranges.json'; | ||
|
|
@@ -176,7 +177,7 @@ suite('Artifacts', function() { | |
|
|
||
| test('S3 single part complete flow', async () => { | ||
| let taskId = slugid.v4(); | ||
|
|
||
| debug('### Creating task'); | ||
| await helper.queue.createTask(taskId, taskDef); | ||
|
|
||
|
|
@@ -212,11 +213,11 @@ suite('Artifacts', function() { | |
| let uploadOutcome = await client.runUpload(response.requests, uploadInfo); | ||
|
|
||
| response = await helper.queue.completeArtifact(taskId, 0, 'public/singlepart.dat', { | ||
| etags: uploadOutcome.etags, | ||
| etags: uploadOutcome.etags, | ||
| }); | ||
|
|
||
| let secondResponse = await helper.queue.completeArtifact(taskId, 0, 'public/singlepart.dat', { | ||
| etags: uploadOutcome.etags, | ||
| etags: uploadOutcome.etags, | ||
| }); | ||
| assume(response).deeply.equals(secondResponse); | ||
|
|
||
|
|
@@ -227,18 +228,75 @@ suite('Artifacts', function() { | |
| debug('Fetching artifact from: %s', artifactUrl); | ||
| let artifact = await getWithoutRedirecting(artifactUrl); | ||
|
|
||
| let expectedUrl = | ||
| let expectedUrl = | ||
| `https://test-bucket-for-any-garbage.s3-us-west-2.amazonaws.com/${taskId}/0/public/singlepart.dat`; | ||
| assume(artifact.headers).has.property('location', expectedUrl); | ||
|
|
||
| await verifyDownload(artifact.headers.location, bigfilehash, bigfilesize); | ||
|
|
||
| }); | ||
|
|
||
| test('S3 single part complete flow, content-encoding: gzip', async () => { | ||
| const taskId = slugid.v4(); | ||
| const data = crypto.randomBytes(12 * 1024 * 1024 + 21); | ||
| const gzipped = zlib.gzipSync(data); | ||
|
|
||
| debug('### Creating task'); | ||
| await helper.queue.createTask(taskId, taskDef); | ||
|
|
||
| debug('### Claiming task'); | ||
| await helper.queue.claimTask(taskId, 0, { | ||
| workerGroup: 'my-worker-group', | ||
| workerId: 'my-worker', | ||
| }); | ||
|
|
||
| debug('### Create artifact'); | ||
| const { | ||
| requests, | ||
| } = await helper.queue.createArtifact(taskId, 0, 'public/singlepart.dat', { | ||
| storageType: 'blob', | ||
| expires: taskcluster.fromNowJSON('1 day'), | ||
| contentType: 'binary/octet-stream', | ||
| contentEncoding: 'gzip', | ||
| contentLength: data.length, | ||
| contentSha256: crypto.createHash('sha256').update(data).digest('hex'), | ||
| transferLength: gzipped.length, | ||
| transferSha256: crypto.createHash('sha256').update(gzipped).digest('hex'), | ||
| }); | ||
|
|
||
| debug('### Put first and only part of artifact'); | ||
| const {method, url, headers} = requests[0]; | ||
| const res = await request(method, url).set(headers).send(gzipped); | ||
| const etag = res.headers['etag']; | ||
|
|
||
| debug('### Complete artifact upload'); | ||
| await helper.queue.completeArtifact(taskId, 0, 'public/singlepart.dat', { | ||
| etags: [etag], | ||
| }); | ||
|
|
||
| const artifactUrl = helper.queue.buildUrl( | ||
| helper.queue.getArtifact, | ||
| taskId, 0, 'public/singlepart.dat', | ||
| ); | ||
| debug('### Fetching artifact from: %s', artifactUrl); | ||
| const res2 = await request.get(artifactUrl).responseType('blob'); | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There's already code for doing this: await verifyDownload(artifact.headers.location, bigfilehash, bigfilesize);Please use this code as it's what we do for all the other tests and handles many more tests than are duplicated here.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. tried adding it that won't work because of content-encoding. |
||
| debug('Downloaded artifact, statusCode: %s', res2.status); | ||
| const res2Hash = crypto.createHash('sha256').update(res2.body).digest('hex'); | ||
| assert(res2Hash === crypto.createHash('sha256').update(data).digest('hex')); | ||
| assert(res2Hash === res2.headers['x-amz-meta-content-sha256']); | ||
| debug('Response headers: %j', res2.headers); | ||
|
|
||
| debug('### Downloading artifact with incorrect "Accept-Encoding"'); | ||
| let e; | ||
| await request.get(artifactUrl).set('Accept-Encoding', 'identity').catch(err => e = err); | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's also test for no
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. but it should also be one extra line to account for us doing something incorrectly and handling the two different options differently. |
||
| assert(e, 'expected an error'); | ||
| assert(e.status === 406, 'expected 406'); | ||
| }); | ||
|
|
||
| test('S3 multi part complete flow', async () => { | ||
| let name = 'public/multipart.dat'; | ||
| let taskId = slugid.v4(); | ||
|
|
||
| debug('### Creating task'); | ||
| await helper.queue.createTask(taskId, taskDef); | ||
|
|
||
|
|
@@ -279,12 +337,12 @@ suite('Artifacts', function() { | |
| let uploadOutcome = await client.runUpload(response.requests, uploadInfo); | ||
|
|
||
| response = await helper.queue.completeArtifact(taskId, 0, name, { | ||
| etags: uploadOutcome.etags, | ||
| etags: uploadOutcome.etags, | ||
| }); | ||
|
|
||
| // Ensure idempotency for completion of artifacts | ||
| let secondResponse = await helper.queue.completeArtifact(taskId, 0, name, { | ||
| etags: uploadOutcome.etags, | ||
| etags: uploadOutcome.etags, | ||
| }); | ||
| assume(response).deeply.equals(secondResponse); | ||
|
|
||
|
|
@@ -301,11 +359,11 @@ suite('Artifacts', function() { | |
|
|
||
| await verifyDownload(artifact.headers.location, bigfilehash, bigfilesize); | ||
| }); | ||
|
|
||
| test('S3 multi part idempotency', async () => { | ||
| let name = 'public/multipart.dat'; | ||
| let taskId = slugid.v4(); | ||
|
|
||
| debug('### Creating task'); | ||
| await helper.queue.createTask(taskId, taskDef); | ||
|
|
||
|
|
@@ -348,7 +406,7 @@ suite('Artifacts', function() { | |
| return {sha256: x.sha256, size: x.size}; | ||
| }), | ||
| }); | ||
|
|
||
| let firstUploadId = qs.parse(urllib.parse(firstResponse.requests[0].url).query).uploadId; | ||
| let secondUploadId = qs.parse(urllib.parse(secondResponse.requests[0].url).query).uploadId; | ||
| assume(firstUploadId).equals(secondUploadId); | ||
|
|
@@ -382,7 +440,7 @@ suite('Artifacts', function() { | |
| let uploadOutcome = await client.runUpload(secondResponse.requests, uploadInfo); | ||
|
|
||
| let response = await helper.queue.completeArtifact(taskId, 0, name, { | ||
| etags: uploadOutcome.etags, | ||
| etags: uploadOutcome.etags, | ||
| }); | ||
| }); | ||
| }); | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I thought if not present, we would store 'identity' there, am I wrong?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
If I look at the code yes... maybe we have default in the schema and I missed that.. but then we have unnecessary special cases in the code.