Skip to content

Commit

Permalink
Merge fc34bb2 into aeba1a3
Browse files Browse the repository at this point in the history
  • Loading branch information
mpu-creare committed Apr 17, 2020
2 parents aeba1a3 + fc34bb2 commit 113dd4a
Show file tree
Hide file tree
Showing 145 changed files with 9,623 additions and 7,910 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ jobs:
# check formatting
- stage: formatting
python: "3.7"
script: black --check podpac
script: black --check --diff podpac
# deploy docs to `podpac-docs` repository. This script only pushes the docs on pushes to develop and master.
- stage: docs deploy
python: "3.7"
Expand Down
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
# Changelog

## 2.0.0

# Breaking changes
* Renamed 'native_coordinates' to 'coordinates'

## 1.3.0

Expand Down
61 changes: 40 additions & 21 deletions dist/aws/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,40 +1,59 @@
FROM amazonlinux:latest

ARG COMMIT_SHA=""
ARG TAG=""
RUN echo $COMMIT_SHA
ARG REF="master"

RUN yum update -y

# Install apt dependencies
RUN yum install -y gcc gcc-c++ freetype-devel yum-utils findutils openssl-devel

RUN yum -y groupinstall development
# development tools
RUN yum update -y && yum -y install \
groupinstall \
development \
gcc \
gcc-c++ \
git \
zip \
freetype-devel \
yum-utils \
findutils \
openssl-devel \
&& yum clean all

# Mock current AWS Lambda docker image
# Find complete list of package https://gist.github.com/vincentsarago/acb33eb9f0502fcd38e0feadfe098eb7
RUN yum install -y libjpeg-devel libpng-devel libcurl-devel ImageMagick-devel.x86_64 python3-devel.x86_64 which
# NOTE: this is still Py3.7, need to be careful about version management
RUN yum -y install \
python3 \
python3-pip \
python3-devel \
&& yum clean all

ENV LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
# clone the podpac repository and checkout the requested tag
# for developers looking to create a custom deployment package or dependencies,
# comment this block and un-comment the next block
RUN git clone https://github.com/creare-com/podpac.git /podpac/ &&\
pushd /podpac/ && \
git fetch --all && \
git checkout $REF && \
popd

ADD . /podpac/
# # uncomment this block to create a custom deployment package or dependencies archive
# # based on your local copy of the PODPAC repository
# # this command assumes you are building the Dockerfile using `build_lambda.sh` (which runs from the root of the PODPAC repository )
# ADD . /podpac/

# Install core, datatype and aws optional dependencies
RUN mkdir /tmp/vendored/ && \
cp /podpac/settings.json /tmp/vendored/settings.json && \
cd /podpac/ && rm -rf .git/ doc/ .github/ && \
pip3 install -r dist/aws/aws_requirements.txt -t /tmp/vendored/ --upgrade
pip3 install . -t /tmp/vendored/ --upgrade && \
pip3 install .[datatype] -t /tmp/vendored/ --upgrade && \
pip3 install .[aws] -t /tmp/vendored/ --upgrade

# need to add some __init__ files
RUN cd /tmp/vendored/ && touch pydap/__init__.py && \
touch pydap/responses/__init__.py && \
touch pydap/handlers/__init__.py && \
touch pydap/parsers/__init__.py

RUN cp -r /podpac/ /tmp/vendored/ && \
mv /tmp/vendored/podpac/dist/aws/handler.py /tmp/vendored/handler.py && \
cp tmp/vendored/podpac/dist/aws/_mk_dist.py /tmp/vendored/_mk_dist.py && \
rm -rf /tmp/vendored/podpac/dist/ && \
cp -r /tmp/vendored/podpac/podpac/* /tmp/vendored/podpac/ && \
rm -rf /tmp/vendored/podpac/podpac/*
# copy handler and _mk_dist:
RUN cp /podpac/dist/aws/handler.py /tmp/vendored/handler.py && \
cp /podpac/dist/aws/_mk_dist.py /tmp/vendored/_mk_dist.py

RUN cd /tmp/vendored && \
find * -maxdepth 0 -type f | grep ".zip" -v | grep -v ".pyc" | xargs zip -9 -rqy podpac_dist.zip
Expand Down
15 changes: 1 addition & 14 deletions dist/aws/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,19 +17,6 @@ The bucket itself is private, but each directory is made public individually.
The following process is used to create new PODPAC distribution in the `podpac-dist` bucket
when a new version of PODPAC is released.

- Run `build_lambda.sh`. Note this currently requires `settings.json` to copied to the root of the podpac directory.
- Run `build_lambda.sh`
- Run `upload_lambda.sh`
- Navigate to `podpac-dist` (or input bucket) and make the archives public

## Handler

...document handler.py...

## Using Distribution

...document podpac build process...

## Debugging Lambda Function

Use the script `print_logs.sh` to read cloud watch logs from your built lambda function.
This is currently the only way to debug your lambda function.
19 changes: 0 additions & 19 deletions dist/aws/aws_requirements.txt

This file was deleted.

30 changes: 13 additions & 17 deletions dist/aws/build_lambda.sh
Original file line number Diff line number Diff line change
@@ -1,39 +1,35 @@
#!/bin/sh
#
# Build podpac lambda distribution and dependencies
#
# Currently, this builds the function using the local
# podpac repository, including any outstanding changes.
# Build podpac lambda distribution and dependencies.
# Change $REF to specify a specific branch, tag, or commit in podpac to build from.
#
# Usage:
#
# $ bash build_lambda.sh [s3-bucket] [function-name]
# $ bash build_lambda.sh
#
# Requires:
# - Docker
# - `settings.json` to be copied to the root directory of the podpac repository
# This will not be required in the future
#
# Example usage:
#
# $ bash build_lambda.sh


# variables
COMMIT_SHA="$(git rev-parse HEAD)"
TAG="$(git describe --always)"
REF="master"
# REF="tags/1.1.0" # Change $REF to the branch, tag, or commit in podpac you want to use
# REF="develop"

DOCKER_NAME="podpac"
DOCKER_TAG=$TAG
DOCKER_TAG=$REF

echo "Creating docker image from podpac version ${TAG}"
echo "Creating docker image from podpac version ${REF}"
echo "${DOCKER_NAME}:${DOCKER_TAG}"

# Navigate to root, build docker, and extract zips
pushd ../../
docker build -f dist/aws/Dockerfile --no-cache --tag $DOCKER_NAME:$DOCKER_TAG --build-arg COMMIT_SHA="${COMMIT_SHA}" --build-arg TAG="${TAG}" .
docker build -f dist/aws/Dockerfile --no-cache --tag $DOCKER_NAME:$DOCKER_TAG --build-arg REF="${REF}" .
docker run --name "${DOCKER_NAME}" -itd $DOCKER_NAME:$DOCKER_TAG
docker cp "${DOCKER_NAME}":/tmp/vendored/podpac_dist.zip ./dist/aws
docker cp "${DOCKER_NAME}":/tmp/vendored/podpac_deps.zip ./dist/aws
docker stop "${DOCKER_NAME}"
docker rm "${DOCKER_NAME}"
popd

echo "Built podpac deployment package: podpac_dist.zip"
echo "Built podpac dependencies: podpac_deps.zip"
78 changes: 0 additions & 78 deletions dist/aws/configure_lambda.sh

This file was deleted.

18 changes: 0 additions & 18 deletions dist/aws/example.json

This file was deleted.

41 changes: 27 additions & 14 deletions dist/aws/handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,10 @@ def default_pipeline(pipeline=None):
else:
pipeline = defaults

# overwrite certain settings so that the function doesn't fail
pipeline["settings"]["ROOT_PATH"] = "/tmp"
pipeline["settings"]["LOG_FILE_PATH"] = "/tmp"

return pipeline


Expand Down Expand Up @@ -82,15 +86,15 @@ def parse_event(trigger, event):
"""

if trigger == "eval":
print("Triggered by Invoke")
print ("Triggered by Invoke")

# event is the pipeline, provide consistent pipeline defaults
pipeline = default_pipeline(event)

return pipeline

elif trigger == "S3":
print("Triggered from S3")
print ("Triggered from S3")

# get boto s3 client
s3 = boto3.client("s3")
Expand Down Expand Up @@ -129,7 +133,7 @@ def parse_event(trigger, event):
return pipeline

elif trigger == "APIGateway":
print("Triggered from API Gateway")
print ("Triggered from API Gateway")

pipeline = default_pipeline()
pipeline["url"] = event["queryStringParameters"]
Expand All @@ -154,8 +158,8 @@ def parse_event(trigger, event):
# If we get here, the api settings were loaded
pipeline["settings"] = {**pipeline["settings"], **api_settings}
except Exception as e:
print("Got an exception when attempting to load api settings: ", e)
print(pipeline)
print ("Got an exception when attempting to load api settings: ", e)
print (pipeline)

# handle OUTPUT in query parameters
elif param == "output":
Expand Down Expand Up @@ -195,7 +199,7 @@ def handler(event, context):
ret_pipeline : bool, optional
Description
"""
print(event)
print (event)

# Add /tmp/ path to handle python path for dependencies
sys.path.append("/tmp/")
Expand Down Expand Up @@ -227,13 +231,22 @@ def handler(event, context):
os.environ.get("PODPAC_VERSION", pipeline["settings"].get("PODPAC_VERSION"))
) # this should be equivalent to version.semver()

# Download dependencies from specific bucket/object
s3 = boto3.client("s3")
s3.download_file(bucket, dependencies, "/tmp/" + dependencies)
subprocess.call(["unzip", "/tmp/" + dependencies, "-d", "/tmp"])
sys.path.append("/tmp/")
subprocess.call(["rm", "/tmp/" + dependencies])
# -----
# Check to see if this function is "hot", in which case the dependencies have already been downloaded and are
# available for use right away.
if os.path.exists("/tmp/scipy"):
print (
"Scipy has been detected in the /tmp/ directory. Assuming this function is hot, dependencies will"
" not be downloaded."
)
else:
# Download dependencies from specific bucket/object
print ("Downloading and extracting dependencies")
s3 = boto3.client("s3")
s3.download_file(bucket, dependencies, "/tmp/" + dependencies)
subprocess.call(["unzip", "/tmp/" + dependencies, "-d", "/tmp"])
sys.path.append("/tmp/")
subprocess.call(["rm", "/tmp/" + dependencies])
# -----

# Load PODPAC

Expand Down Expand Up @@ -285,7 +298,7 @@ def handler(event, context):
try:
json.dumps(body)
except Exception as e:
print("Output body is not serializable, attempting to decode.")
print ("Output body is not serializable, attempting to decode.")
body = body.decode()

return {
Expand Down
Loading

0 comments on commit 113dd4a

Please sign in to comment.