From a5500f1225e5fed36a1a777b454d44b880cdd59a Mon Sep 17 00:00:00 2001 From: GuoqiaoLi Date: Sun, 27 Jun 2021 14:29:57 -0700 Subject: [PATCH] add support for spark 3.1.1(emr 6.3.0) --- Makefile | 7 +- Pipfile | 1 + new_images.yml | 4 +- .../container-bootstrap-config/bootstrap.sh | 1 + .../processing/3.1/py3/docker/Dockerfile.cpu | 109 ++++++++++++++++++ .../3.1/py3/hadoop-config/core-site.xml | 26 +++++ .../3.1/py3/hadoop-config/hdfs-site.xml | 67 +++++++++++ .../3.1/py3/hadoop-config/spark-defaults.conf | 10 ++ .../3.1/py3/hadoop-config/spark-env.sh | 3 + .../3.1/py3/hadoop-config/yarn-site.xml | 34 ++++++ .../3.1/py3/nginx-config/default.conf | 17 +++ .../3.1/py3/nginx-config/nginx.conf | 66 +++++++++++ spark/processing/3.1/py3/yum/emr-apps.repo | 17 +++ 13 files changed, 357 insertions(+), 5 deletions(-) create mode 100644 spark/processing/3.1/py3/container-bootstrap-config/bootstrap.sh create mode 100644 spark/processing/3.1/py3/docker/Dockerfile.cpu create mode 100644 spark/processing/3.1/py3/hadoop-config/core-site.xml create mode 100644 spark/processing/3.1/py3/hadoop-config/hdfs-site.xml create mode 100644 spark/processing/3.1/py3/hadoop-config/spark-defaults.conf create mode 100644 spark/processing/3.1/py3/hadoop-config/spark-env.sh create mode 100644 spark/processing/3.1/py3/hadoop-config/yarn-site.xml create mode 100644 spark/processing/3.1/py3/nginx-config/default.conf create mode 100644 spark/processing/3.1/py3/nginx-config/nginx.conf create mode 100644 spark/processing/3.1/py3/yum/emr-apps.repo diff --git a/Makefile b/Makefile index 11f4bd0..ed40909 100644 --- a/Makefile +++ b/Makefile @@ -32,6 +32,7 @@ all: build test init: pip install pipenv --upgrade + pipenv run pip install --upgrade pip pipenv install cp {Pipfile,Pipfile.lock,setup.py} ${BUILD_CONTEXT} @@ -82,7 +83,7 @@ test-local: install-container-library build-tests test-sagemaker: build-tests # Separate `pytest` invocation without parallelization: # History server tests can't run in parallel since they use the same container name. - pipenv run pytest -s -vv test/integration/history \ + pipenv run pytest --reruns 3 -s -vv test/integration/history \ --repo=$(DEST_REPO) --tag=$(VERSION) --durations=0 \ --spark-version=$(SPARK_VERSION) \ --framework-version=$(FRAMEWORK_VERSION) \ @@ -91,7 +92,7 @@ test-sagemaker: build-tests --region ${REGION} \ --domain ${AWS_DOMAIN} # OBJC_DISABLE_INITIALIZE_FORK_SAFETY: https://github.com/ansible/ansible/issues/32499#issuecomment-341578864 - OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES pipenv run pytest --workers auto -s -vv test/integration/sagemaker \ + OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES pipenv run pytest --workers auto --reruns 3 -s -vv test/integration/sagemaker \ --repo=$(DEST_REPO) --tag=$(VERSION) --durations=0 \ --spark-version=$(SPARK_VERSION) \ --framework-version=$(FRAMEWORK_VERSION) \ @@ -146,4 +147,4 @@ release: # Targets that don't create a file with the same name as the target. -.PHONY: all build test test-all clean clean-all release whitelist build-container-library +.PHONY: all build test test-all clean clean-all release whitelist build-container-library \ No newline at end of file diff --git a/Pipfile b/Pipfile index a6f64cb..b06500e 100644 --- a/Pipfile +++ b/Pipfile @@ -31,6 +31,7 @@ sagemaker = "==2.30.0" smspark = {editable = true, path = "."} importlib-metadata = "==3.7.3" pytest-parallel = "==0.1.0" +pytest-rerunfailures = "10.0" [requires] python_version = "3.7" diff --git a/new_images.yml b/new_images.yml index 3983444..3deeb0b 100644 --- a/new_images.yml +++ b/new_images.yml @@ -1,7 +1,7 @@ --- new_images: - - spark: "3.0.0" + - spark: "3.1.1" use-case: "processing" processors: ["cpu"] python: ["py37"] - sm_version: "1.3" + sm_version: "1.0" diff --git a/spark/processing/3.1/py3/container-bootstrap-config/bootstrap.sh b/spark/processing/3.1/py3/container-bootstrap-config/bootstrap.sh new file mode 100644 index 0000000..e4e23bb --- /dev/null +++ b/spark/processing/3.1/py3/container-bootstrap-config/bootstrap.sh @@ -0,0 +1 @@ +echo "Not implemented" \ No newline at end of file diff --git a/spark/processing/3.1/py3/docker/Dockerfile.cpu b/spark/processing/3.1/py3/docker/Dockerfile.cpu new file mode 100644 index 0000000..14c9527 --- /dev/null +++ b/spark/processing/3.1/py3/docker/Dockerfile.cpu @@ -0,0 +1,109 @@ +FROM 137112412989.dkr.ecr.us-west-2.amazonaws.com/amazonlinux:2 +ARG REGION +ENV AWS_REGION ${REGION} +RUN yum clean all +RUN yum update -y +RUN yum install -y awscli bigtop-utils curl gcc gzip unzip python3 python3-setuptools python3-pip python-devel python3-devel python-psutil gunzip tar wget liblapack* libblas* libopencv* libopenblas* + +# install nginx amazonlinux:2.0.20200304.0 does not have nginx, so need to install epel-release first +RUN wget https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm +RUN yum install -y epel-release-latest-7.noarch.rpm +RUN yum install -y nginx + +RUN rm -rf /var/cache/yum + +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 +# http://blog.stuart.axelbrooke.com/python-3-on-spark-return-of-the-pythonhashseed +ENV PYTHONHASHSEED 0 +ENV PYTHONIOENCODING UTF-8 +ENV PIP_DISABLE_PIP_VERSION_CHECK 1 + +# Install EMR Spark/Hadoop +ENV HADOOP_HOME /usr/lib/hadoop +ENV HADOOP_CONF_DIR /usr/lib/hadoop/etc/hadoop +ENV SPARK_HOME /usr/lib/spark + +COPY yum/emr-apps.repo /etc/yum.repos.d/emr-apps.repo + +# Install hadoop / spark dependencies from EMR's yum repository for Spark optimizations. +# replace placeholder with region in repository URL +RUN sed -i "s/REGION/${AWS_REGION}/g" /etc/yum.repos.d/emr-apps.repo +RUN adduser -N hadoop + +# These packages are a subset of what EMR installs in a cluster with the +# "hadoop", "spark", and "hive" applications. +# They include EMR-optimized libraries and extras. +RUN yum install -y aws-hm-client \ + aws-java-sdk \ + aws-sagemaker-spark-sdk \ + emr-goodies \ + emr-ruby \ + emr-scripts \ + emr-s3-select \ + emrfs \ + hadoop \ + hadoop-client \ + hadoop-hdfs \ + hadoop-hdfs-datanode \ + hadoop-hdfs-namenode \ + hadoop-httpfs \ + hadoop-kms \ + hadoop-lzo \ + hadoop-yarn \ + hadoop-yarn-nodemanager \ + hadoop-yarn-proxyserver \ + hadoop-yarn-resourcemanager \ + hadoop-yarn-timelineserver \ + hive \ + hive-hcatalog \ + hive-hcatalog-server \ + hive-jdbc \ + hive-server2 \ + python37-numpy \ + s3-dist-cp \ + spark-core \ + spark-datanucleus \ + spark-external \ + spark-history-server \ + spark-python + + +# Point Spark at proper python binary +ENV PYSPARK_PYTHON=/usr/bin/python3 + +# Setup Spark/Yarn/HDFS user as root +ENV PATH="/usr/bin:/opt/program:${PATH}" +ENV YARN_RESOURCEMANAGER_USER="root" +ENV YARN_NODEMANAGER_USER="root" +ENV HDFS_NAMENODE_USER="root" +ENV HDFS_DATANODE_USER="root" +ENV HDFS_SECONDARYNAMENODE_USER="root" + +# Set up bootstrapping program and Spark configuration +COPY *.whl /opt/program/ +RUN /usr/bin/python3 -m pip install pipenv --upgrade +COPY hadoop-config /opt/hadoop-config +COPY nginx-config /opt/nginx-config +COPY aws-config /opt/aws-config +COPY Pipfile /opt/program/ +COPY Pipfile.lock /opt/program/ +COPY setup.py /opt/program/ +ENV PIPENV_PIPFILE=/opt/program/Pipfile +# Use --system flag, so it will install all packages into the system python, +# and not into the virtualenv. Since docker containers do not need to have virtualenvs +RUN pipenv install --system +RUN /usr/bin/python3 -m pip install /opt/program/*.whl + +# Setup container bootstrapper +COPY container-bootstrap-config /opt/container-bootstrap-config +RUN chmod +x /opt/container-bootstrap-config/bootstrap.sh +RUN /opt/container-bootstrap-config/bootstrap.sh + +# With this config, spark history server will not run as daemon, otherwise there +# will be no server running and container will terminate immediately +ENV SPARK_NO_DAEMONIZE TRUE + +WORKDIR $SPARK_HOME + +ENTRYPOINT ["smspark-submit"] diff --git a/spark/processing/3.1/py3/hadoop-config/core-site.xml b/spark/processing/3.1/py3/hadoop-config/core-site.xml new file mode 100644 index 0000000..52db7b2 --- /dev/null +++ b/spark/processing/3.1/py3/hadoop-config/core-site.xml @@ -0,0 +1,26 @@ + + + + + + + fs.defaultFS + hdfs://nn_uri/ + NameNode URI + + + fs.s3a.aws.credentials.provider + com.amazonaws.auth.DefaultAWSCredentialsProviderChain + AWS S3 credential provider + + + fs.s3.impl + org.apache.hadoop.fs.s3a.S3AFileSystem + s3a filesystem implementation + + + fs.AbstractFileSystem.s3a.imp + org.apache.hadoop.fs.s3a.S3A + s3a filesystem implementation + + diff --git a/spark/processing/3.1/py3/hadoop-config/hdfs-site.xml b/spark/processing/3.1/py3/hadoop-config/hdfs-site.xml new file mode 100644 index 0000000..37e0a5b --- /dev/null +++ b/spark/processing/3.1/py3/hadoop-config/hdfs-site.xml @@ -0,0 +1,67 @@ + + + + + + + dfs.datanode.data.dir + file:///opt/amazon/hadoop/hdfs/datanode + Comma separated list of paths on the local filesystem of a DataNode where it should store its\ + blocks. + + + + dfs.namenode.name.dir + file:///opt/amazon/hadoop/hdfs/namenode + Path on the local filesystem where the NameNode stores the namespace and transaction logs per\ + sistently. + + + + + dfs.client.block.write.replace-datanode-on-failure.enable + true + + If there is a datanode/network failure in the write pipeline, + DFSClient will try to remove the failed datanode from the pipeline + and then continue writing with the remaining datanodes. As a result, + the number of datanodes in the pipeline is decreased. The feature is + to add new datanodes to the pipeline. + + This is a site-wide property to enable/disable the feature. + + When the cluster size is extremely small, e.g. 3 nodes or less, cluster + administrators may want to set the policy to NEVER in the default + configuration file or disable this feature. Otherwise, users may + experience an unusually high rate of pipeline failures since it is + impossible to find new datanodes for replacement. + + See also dfs.client.block.write.replace-datanode-on-failure.policy + + + + + dfs.client.block.write.replace-datanode-on-failure.policy + ALWAYS + + This property is used only if the value of + dfs.client.block.write.replace-datanode-on-failure.enable is true. + + ALWAYS: always add a new datanode when an existing datanode is + removed. + + NEVER: never add a new datanode. + + DEFAULT: + Let r be the replication number. + Let n be the number of existing datanodes. + Add a new datanode only if r is greater than or equal to 3 and either + (1) floor(r/2) is greater than or equal to n; or + (2) r is greater than n and the block is hflushed/appended. + + + diff --git a/spark/processing/3.1/py3/hadoop-config/spark-defaults.conf b/spark/processing/3.1/py3/hadoop-config/spark-defaults.conf new file mode 100644 index 0000000..734086a --- /dev/null +++ b/spark/processing/3.1/py3/hadoop-config/spark-defaults.conf @@ -0,0 +1,10 @@ +spark.driver.extraClassPath /usr/lib/hadoop-lzo/lib/*:/usr/lib/hadoop/hadoop-aws.jar:/usr/share/aws/aws-java-sdk/*:/usr/share/aws/emr/emrfs/conf:/usr/share/aws/emr/emrfs/lib/*:/usr/share/aws/emr/emrfs/auxlib/*:/usr/share/aws/emr/goodies/lib/emr-spark-goodies.jar:/usr/share/aws/emr/security/conf:/usr/share/aws/emr/security/lib/*:/usr/share/aws/hmclient/lib/aws-glue-datacatalog-spark-client.jar:/usr/share/java/Hive-JSON-Serde/hive-openx-serde.jar:/usr/share/aws/sagemaker-spark-sdk/lib/sagemaker-spark-sdk.jar:/usr/share/aws/emr/s3select/lib/emr-s3-select-spark-connector.jar +spark.driver.extraLibraryPath /usr/lib/hadoop/lib/native:/usr/lib/hadoop-lzo/lib/native +spark.executor.extraClassPath /usr/lib/hadoop-lzo/lib/*:/usr/lib/hadoop/hadoop-aws.jar:/usr/share/aws/aws-java-sdk/*:/usr/share/aws/emr/emrfs/conf:/usr/share/aws/emr/emrfs/lib/*:/usr/share/aws/emr/emrfs/auxlib/*:/usr/share/aws/emr/goodies/lib/emr-spark-goodies.jar:/usr/share/aws/emr/security/conf:/usr/share/aws/emr/security/lib/*:/usr/share/aws/hmclient/lib/aws-glue-datacatalog-spark-client.jar:/usr/share/java/Hive-JSON-Serde/hive-openx-serde.jar:/usr/share/aws/sagemaker-spark-sdk/lib/sagemaker-spark-sdk.jar:/usr/share/aws/emr/s3select/lib/emr-s3-select-spark-connector.jar +spark.executor.extraLibraryPath /usr/lib/hadoop/lib/native:/usr/lib/hadoop-lzo/lib/native +spark.driver.host=sd_host +spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version=2 + +# Fix for "Uncaught exception: org.apache.spark.rpc.RpcTimeoutException: Cannot +# receive any reply from 10.0.109.30:35219 in 120 seconds."" +spark.rpc.askTimeout=300s diff --git a/spark/processing/3.1/py3/hadoop-config/spark-env.sh b/spark/processing/3.1/py3/hadoop-config/spark-env.sh new file mode 100644 index 0000000..1b58aa1 --- /dev/null +++ b/spark/processing/3.1/py3/hadoop-config/spark-env.sh @@ -0,0 +1,3 @@ +#EMPTY FILE AVOID OVERRIDDING ENV VARS +# Specifically, without copying the empty file, SPARK_HISTORY_OPTS will be overriden, +# spark.history.ui.port defaults to 18082, and spark.eventLog.dir defaults to local fs diff --git a/spark/processing/3.1/py3/hadoop-config/yarn-site.xml b/spark/processing/3.1/py3/hadoop-config/yarn-site.xml new file mode 100644 index 0000000..3790582 --- /dev/null +++ b/spark/processing/3.1/py3/hadoop-config/yarn-site.xml @@ -0,0 +1,34 @@ + + + + + yarn.resourcemanager.hostname + rm_hostname + The hostname of the RM. + + + yarn.nodemanager.hostname + nm_hostname + The hostname of the NM. + + + yarn.nodemanager.webapp.address + nm_webapp_address + + + yarn.nodemanager.vmem-pmem-ratio + 5 + Ratio between virtual memory to physical memory. + + + yarn.resourcemanager.am.max-attempts + 1 + The maximum number of application attempts. + + + yarn.nodemanager.env-whitelist + JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME,AWS_CONTAINER_CREDENTIALS_RELATIVE_URI + Environment variable whitelist + + + diff --git a/spark/processing/3.1/py3/nginx-config/default.conf b/spark/processing/3.1/py3/nginx-config/default.conf new file mode 100644 index 0000000..a8a50a5 --- /dev/null +++ b/spark/processing/3.1/py3/nginx-config/default.conf @@ -0,0 +1,17 @@ +server { + listen 15050; + server_name localhost; + client_header_buffer_size 128k; + large_client_header_buffers 4 128k; + + location ~ ^/history/(.*)/(.*)/jobs/$ { + proxy_pass http://localhost:18080/history/$1/jobs/; + proxy_redirect http://localhost:18080/history/$1/jobs/ $domain_name/proxy/15050/history/$1/jobs/; + expires off; + } + + location / { + proxy_pass http://localhost:18080; + expires off; + } +} \ No newline at end of file diff --git a/spark/processing/3.1/py3/nginx-config/nginx.conf b/spark/processing/3.1/py3/nginx-config/nginx.conf new file mode 100644 index 0000000..1e3a51c --- /dev/null +++ b/spark/processing/3.1/py3/nginx-config/nginx.conf @@ -0,0 +1,66 @@ +# For more information on configuration, see: +# * Official English Documentation: http://nginx.org/en/docs/ +# * Official Russian Documentation: http://nginx.org/ru/docs/ + +user nginx; +worker_processes auto; +error_log /var/log/nginx/error.log; +pid /run/nginx.pid; + +# Load dynamic modules. See /usr/share/doc/nginx/README.dynamic. +include /usr/share/nginx/modules/*.conf; + +events { + worker_connections 1024; +} + +http { + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + + include /etc/nginx/mime.types; + default_type application/octet-stream; + + # Load modular configuration files from the /etc/nginx/conf.d directory. + # See http://nginx.org/en/docs/ngx_core_module.html#include + # for more information. + include /etc/nginx/conf.d/*.conf; + + server { + listen 80 default_server; + listen [::]:80 default_server; + server_name _; + root /usr/share/nginx/html; + + # Load configuration files for the default server block. + include /etc/nginx/default.d/*.conf; + + location /proxy/15050 { + proxy_pass http://localhost:15050/; + } + + location ~ ^/proxy/15050/(.*) { + proxy_pass http://localhost:15050/$1; + } + + location / { + } + + error_page 404 /404.html; + location = /40x.html { + } + + error_page 500 502 503 504 /50x.html; + location = /50x.html { + } + } +} \ No newline at end of file diff --git a/spark/processing/3.1/py3/yum/emr-apps.repo b/spark/processing/3.1/py3/yum/emr-apps.repo new file mode 100644 index 0000000..5ac1b03 --- /dev/null +++ b/spark/processing/3.1/py3/yum/emr-apps.repo @@ -0,0 +1,17 @@ +[emr-apps] +name = EMR Application Repository +gpgkey = https://s3-REGION.amazonaws.com/repo.REGION.emr.amazonaws.com/apps-repository/emr-6.3.0/184d7755-d3a2-4c5c-9e1f-c72d4f2b33f1/repoPublicKey.txt +enabled = 1 +baseurl = https://s3-REGION.amazonaws.com/repo.REGION.emr.amazonaws.com/apps-repository/emr-6.3.0/184d7755-d3a2-4c5c-9e1f-c72d4f2b33f1 +priority = 5 +gpgcheck = 0 + +[emr-puppet] +mirrorlist: http://amazonlinux.$awsregion.$awsdomain/$releasever/extras/emr-puppet/latest/$basearch/mirror.list +enabled: 1 +gpgcheck: 1 +name: Amazon Extras repo for emr-puppet +gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-amazon-linux-2 +priority: 10 +skip_if_unavailable: 1 +report_instanceid: yes