Skip to content

Commit

Permalink
Merge 2a259fa into 87ddee0
Browse files Browse the repository at this point in the history
  • Loading branch information
Nathan Gillett committed Sep 18, 2020
2 parents 87ddee0 + 2a259fa commit 64ee3f8
Show file tree
Hide file tree
Showing 7 changed files with 112 additions and 41 deletions.
2 changes: 1 addition & 1 deletion docs/api-guide.rst
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ Usage:
config=Config(client_cert=('client.crt', 'client.key')))
# Basic APIs such as upload_file now work as usual
bucket = s3.Bucket('exodus-cdn-dev')
bucket = s3.Bucket('dev')
bucket.upload_file('/tmp/hello.txt',
'aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f')
Expand Down
19 changes: 19 additions & 0 deletions exodus-gw-config.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Configuration for the exodus-gw application


# qa currently shares storage with dev
[qa]
bucket = exodus-cdn-dev
table = exodus-cdn-dev

[dev]
bucket = exodus-cdn-dev
table = exodus-cdn-dev

[stage]
bucket = exodus-cdn-stage
table = exodus-cdn-stage

[prod]
bucket = exodus-cdn-prod
table = exodus-cdn-prod
48 changes: 23 additions & 25 deletions exodus_gw/s3/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@

from ..app import app

from .util import extract_mpu_parts, xml_response, RequestReader
from .util import extract_mpu_parts, xml_response, RequestReader, bucket_name


LOG = logging.getLogger("s3")
Expand All @@ -47,10 +47,6 @@
# - format of 'key' should be enforced (sha256sum)
# - a way to check if object is already uploaded, e.g. HEAD
# - limits on chunk sizes during multipart upload should be decided and enforced
# - should support configuration of the target S3 environment rather than using
# whatever's the boto default
# - {bucket} shouldn't be a parameter, as the target bucket for each exodus env
# is predefined and not controlled by the caller.
# - requests should be authenticated


Expand All @@ -64,14 +60,14 @@ def s3_client():


@app.post(
"/upload/{bucket}/{key}",
"/upload/{env}/{key}",
tags=["upload"],
summary="Create/complete multipart upload",
response_class=Response,
)
async def multipart_upload(
request: Request,
bucket: str = Path(..., description="S3 bucket name"),
env: str = Path(..., description="Target CDN environment"),
key: str = Path(..., description="S3 object key"),
uploadId: Optional[str] = Query(
None,
Expand Down Expand Up @@ -99,7 +95,7 @@ async def multipart_upload(
"""Create or complete a multi-part upload.
To create a multi-part upload:
- include ``uploads`` in query string, with no value (e.g. ``POST /upload/{bucket}/{key}?uploads``)
- include ``uploads`` in query string, with no value (e.g. ``POST /upload/{env}/{key}?uploads``)
- see also: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
To complete a multi-part upload:
Expand All @@ -110,11 +106,11 @@ async def multipart_upload(

if uploads == "":
# Means a new upload is requested
return await create_multipart_upload(bucket, key)
return await create_multipart_upload(env, key)

elif uploads is None and uploadId:
# Given an existing upload to complete
return await complete_multipart_upload(bucket, key, uploadId, request)
return await complete_multipart_upload(env, key, uploadId, request)

# Caller did something wrong
raise HTTPException(
Expand All @@ -125,14 +121,14 @@ async def multipart_upload(


@app.put(
"/upload/{bucket}/{key}",
"/upload/{env}/{key}",
tags=["upload"],
summary="Upload bytes",
response_class=Response,
)
async def upload(
request: Request,
bucket: str = Path(..., description="S3 bucket name"),
env: str = Path(..., description="Target CDN environment"),
key: str = Path(..., description="S3 object key"),
uploadId: Optional[str] = Query(
None, description="ID of an existing multi-part upload."
Expand All @@ -157,19 +153,19 @@ async def upload(

if uploadId is None and partNumber is None:
# Single-part upload
return await object_put(bucket, key, request)
return await object_put(env, key, request)

# Multipart upload
return await multipart_put(bucket, key, uploadId, partNumber, request)
return await multipart_put(env, key, uploadId, partNumber, request)


async def object_put(bucket: str, key: str, request: Request):
async def object_put(env: str, key: str, request: Request):
# Single-part upload handler: entire object is written via one PUT.
reader = RequestReader.get_reader(request)

async with s3_client() as s3:
response = await s3.put_object(
Bucket=bucket,
Bucket=bucket_name(env),
Key=key,
Body=reader,
ContentMD5=request.headers["Content-MD5"],
Expand All @@ -180,7 +176,7 @@ async def object_put(bucket: str, key: str, request: Request):


async def complete_multipart_upload(
bucket: str, key: str, uploadId: str, request: Request
env: str, key: str, uploadId: str, request: Request
):
body = await request.body()

Expand All @@ -190,7 +186,7 @@ async def complete_multipart_upload(

async with s3_client() as s3:
response = await s3.complete_multipart_upload(
Bucket=bucket,
Bucket=bucket_name(env),
Key=key,
UploadId=uploadId,
MultipartUpload={"Parts": parts},
Expand All @@ -206,9 +202,11 @@ async def complete_multipart_upload(
)


async def create_multipart_upload(bucket: str, key: str):
async def create_multipart_upload(env: str, key: str):
async with s3_client() as s3:
response = await s3.create_multipart_upload(Bucket=bucket, Key=key)
response = await s3.create_multipart_upload(
Bucket=bucket_name(env), Key=key
)

return xml_response(
"CreateMultipartUploadOutput",
Expand All @@ -219,14 +217,14 @@ async def create_multipart_upload(bucket: str, key: str):


async def multipart_put(
bucket: str, key: str, uploadId: str, partNumber: int, request: Request
env: str, key: str, uploadId: str, partNumber: int, request: Request
):
reader = RequestReader.get_reader(request)

async with s3_client() as s3:
response = await s3.upload_part(
Body=reader,
Bucket=bucket,
Bucket=bucket_name(env),
Key=key,
PartNumber=partNumber,
UploadId=uploadId,
Expand All @@ -238,14 +236,14 @@ async def multipart_put(


@app.delete(
"/upload/{bucket}/{key}",
"/upload/{env}/{key}",
tags=["upload"],
summary="Abort multipart upload",
response_description="Empty response",
response_class=Response,
)
async def abort_multipart_upload(
bucket: str = Path(..., description="S3 bucket name"),
env: str = Path(..., description="Target CDN environment"),
key: str = Path(..., description="S3 object key"),
uploadId: str = Query(..., description="ID of a multipart upload"),
):
Expand All @@ -260,7 +258,7 @@ async def abort_multipart_upload(

async with s3_client() as s3:
await s3.abort_multipart_upload(
Bucket=bucket, Key=key, UploadId=uploadId
Bucket=bucket_name(env), Key=key, UploadId=uploadId
)

return Response()
12 changes: 12 additions & 0 deletions exodus_gw/s3/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

from fastapi import Response
from defusedxml.ElementTree import fromstring
from ..settings import exodus_gw_config


def extract_mpu_parts(
Expand Down Expand Up @@ -94,3 +95,14 @@ def get_reader(cls, request):
# a helper to make tests easier to write.
# tests can patch over this to effectively disable streaming.
return cls(request)


def bucket_name(env: str):
"""Produces s3 bucket name for the given environment name."""

config = exodus_gw_config()

if env in config.sections() and "bucket" in config.options(env):
return config.get(env, "bucket")

raise ValueError("Environment '%s' not supported" % env)
10 changes: 10 additions & 0 deletions exodus_gw/settings.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,16 @@
from functools import lru_cache

from pydantic import BaseSettings
import configparser


def exodus_gw_config(file_name: str = "exodus-gw-config.ini"):
"""Parse and return exodus-gw configuration"""

config = configparser.ConfigParser()
config.read(file_name)

return config


class Settings(BaseSettings):
Expand Down
22 changes: 11 additions & 11 deletions tests/s3/test_manage_mpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,21 +15,21 @@ async def test_create_mpu(mock_s3_client):
"""Creating a multipart upload is delegated correctly to S3."""

mock_s3_client.create_multipart_upload.return_value = {
"Bucket": "my-bucket",
"Bucket": "exodus-cdn-dev",
"Key": TEST_KEY,
"UploadId": "my-great-upload",
}

response = await multipart_upload(
None,
bucket="my-bucket",
env="dev",
key=TEST_KEY,
uploads="",
)

# It should delegate request to real S3
mock_s3_client.create_multipart_upload.assert_called_once_with(
Bucket="my-bucket",
Bucket="exodus-cdn-dev",
Key=TEST_KEY,
)

Expand All @@ -42,7 +42,7 @@ async def test_create_mpu(mock_s3_client):
# It should include the appropriate data
expected = xml_response(
"CreateMultipartUploadOutput",
Bucket="my-bucket",
Bucket="exodus-cdn-dev",
Key=TEST_KEY,
UploadId="my-great-upload",
).body
Expand All @@ -55,7 +55,7 @@ async def test_complete_mpu(mock_s3_client):

mock_s3_client.complete_multipart_upload.return_value = {
"Location": "https://example.com/some-object",
"Bucket": "my-bucket",
"Bucket": "exodus-cdn-dev",
"Key": TEST_KEY,
"ETag": "my-better-etag",
}
Expand All @@ -80,15 +80,15 @@ async def test_complete_mpu(mock_s3_client):

response = await multipart_upload(
request=request,
bucket="my-bucket",
env="dev",
key=TEST_KEY,
uploadId="my-better-upload",
uploads=None,
)

# It should delegate request to real S3
mock_s3_client.complete_multipart_upload.assert_called_once_with(
Bucket="my-bucket",
Bucket="exodus-cdn-dev",
Key=TEST_KEY,
UploadId="my-better-upload",
MultipartUpload={
Expand All @@ -109,7 +109,7 @@ async def test_complete_mpu(mock_s3_client):
expected = xml_response(
"CompleteMultipartUploadOutput",
Location="https://example.com/some-object",
Bucket="my-bucket",
Bucket="exodus-cdn-dev",
Key=TEST_KEY,
ETag="my-better-etag",
).body
Expand All @@ -123,7 +123,7 @@ async def test_bad_mpu_call():
with pytest.raises(HTTPException) as exc_info:
await multipart_upload(
request=None,
bucket="my-bucket",
env="dev",
key=TEST_KEY,
uploadId="oops",
uploads="not valid to mix these args",
Expand All @@ -137,14 +137,14 @@ async def test_abort_mpu(mock_s3_client):
"""Aborting a multipart upload is correctly delegated to S3."""

response = await abort_multipart_upload(
bucket="my-bucket",
env="dev",
key=TEST_KEY,
uploadId="my-lame-upload",
)

# It should delegate the request to real S3
mock_s3_client.abort_multipart_upload.assert_called_once_with(
Bucket="my-bucket",
Bucket="exodus-cdn-dev",
Key=TEST_KEY,
UploadId="my-lame-upload",
)
Expand Down
Loading

0 comments on commit 64ee3f8

Please sign in to comment.