Skip to content

Commit

Permalink
Update on "[GPU] Make permuteWeights inline"
Browse files Browse the repository at this point in the history
Follow up on @dust's diff - D24710102. Make the function inline in order to get rid of the compiler checking `-Werror,-Wunused-function`.

Differential Revision: [D24824637](https://our.internmc.facebook.com/intern/diff/D24824637/)

[ghstack-poisoned]
  • Loading branch information
xta0 committed Nov 13, 2020
2 parents 4293d9d + a97c7e2 commit 1711d57
Show file tree
Hide file tree
Showing 325 changed files with 8,517 additions and 5,899 deletions.
4 changes: 3 additions & 1 deletion .circleci/config.yml
Expand Up @@ -1205,6 +1205,7 @@ jobs:
export COMMAND='((echo "sudo chown -R jenkins workspace && cd workspace && . ./.circleci/scripts/python_doc_push_script.sh docs/'$target' master site") | docker exec -u jenkins -i "$id" bash) 2>&1'
export CIRCLE_SHA1="$CIRCLE_SHA1"
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
mkdir -p ~/workspace/build_artifacts
Expand Down Expand Up @@ -1250,6 +1251,7 @@ jobs:
export COMMAND='((echo "sudo chown -R jenkins workspace && cd workspace && . ./.circleci/scripts/cpp_doc_push_script.sh docs/"$target" master") | docker exec -u jenkins -i "$id" bash) 2>&1'
export CIRCLE_SHA1="$CIRCLE_SHA1"
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
mkdir -p ~/workspace/build_artifacts
Expand Down Expand Up @@ -1579,7 +1581,7 @@ jobs:
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
}
retry conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing requests --yes
retry conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi requests --yes
# sync submodules
cd ${PROJ_ROOT}
Expand Down
5 changes: 2 additions & 3 deletions .circleci/docker/common/install_conda.sh
Expand Up @@ -72,14 +72,13 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
# DO NOT install cmake here as it would install a version newer than 3.5, but
# we want to pin to version 3.5.
if [ "$ANACONDA_PYTHON_VERSION" = "3.8" ]; then
# DO NOT install typing if installing python-3.8, since its part of python-3.8 core packages
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
conda_install numpy=1.18.5 pyyaml mkl mkl-include setuptools cffi future six llvmdev=8.0.0
elif [ "$ANACONDA_PYTHON_VERSION" = "3.7" ]; then
# DO NOT install dataclasses if installing python-3.7, since its part of python-3.7 core packages
conda_install numpy=1.18.5 pyyaml mkl mkl-include setuptools cffi typing future six
conda_install numpy=1.18.5 pyyaml mkl mkl-include setuptools cffi future six
else
conda_install numpy=1.18.5 pyyaml mkl mkl-include setuptools cffi typing future six dataclasses
conda_install numpy=1.18.5 pyyaml mkl mkl-include setuptools cffi future six dataclasses
fi
if [[ "$CUDA_VERSION" == 9.2* ]]; then
conda_install magma-cuda92 -c pytorch
Expand Down
2 changes: 1 addition & 1 deletion .circleci/scripts/binary_ios_build.sh
Expand Up @@ -15,7 +15,7 @@ export PATH="~/anaconda/bin:${PATH}"
source ~/anaconda/bin/activate

# Install dependencies
conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing requests --yes
conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi requests --yes
conda install -c conda-forge valgrind --yes
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}

Expand Down
2 changes: 1 addition & 1 deletion .circleci/scripts/cpp_doc_push_script.sh
Expand Up @@ -88,7 +88,7 @@ git status
git config user.email "soumith+bot@pytorch.org"
git config user.name "pytorchbot"
# If there aren't changes, don't make a commit; push is no-op
git commit -m "Automatic sync on $(date)" || true
git commit -m "Generate C++ docs from pytorch/pytorch@$CIRCLE_SHA1" || true
git status

popd
Expand Down
2 changes: 1 addition & 1 deletion .circleci/scripts/python_doc_push_script.sh
Expand Up @@ -107,7 +107,7 @@ git status
git config user.email "soumith+bot@pytorch.org"
git config user.name "pytorchbot"
# If there aren't changes, don't make a commit; push is no-op
git commit -m "auto-generating sphinx docs" || true
git commit -m "Generate Python docs from pytorch/pytorch@$CIRCLE_SHA1" || true
git status

popd
Expand Down
4 changes: 3 additions & 1 deletion .circleci/verbatim-sources/job-specs/job-specs-custom.yml
Expand Up @@ -51,6 +51,7 @@
export COMMAND='((echo "sudo chown -R jenkins workspace && cd workspace && . ./.circleci/scripts/python_doc_push_script.sh docs/'$target' master site") | docker exec -u jenkins -i "$id" bash) 2>&1'
export CIRCLE_SHA1="$CIRCLE_SHA1"
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
mkdir -p ~/workspace/build_artifacts
Expand Down Expand Up @@ -96,6 +97,7 @@
export COMMAND='((echo "sudo chown -R jenkins workspace && cd workspace && . ./.circleci/scripts/cpp_doc_push_script.sh docs/"$target" master") | docker exec -u jenkins -i "$id" bash) 2>&1'
export CIRCLE_SHA1="$CIRCLE_SHA1"
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
mkdir -p ~/workspace/build_artifacts
Expand Down Expand Up @@ -425,7 +427,7 @@
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
}
retry conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing requests --yes
retry conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi requests --yes
# sync submodules
cd ${PROJ_ROOT}
Expand Down
7 changes: 6 additions & 1 deletion .github/workflows/lint.yml
Expand Up @@ -22,8 +22,13 @@ jobs:
pip install -r requirements.txt
cd .circleci && ./ensure-consistency.py
- name: Shellcheck Jenkins scripts
# https://github.com/koalaman/shellcheck#installing-a-pre-compiled-binary
run: |
sudo apt-get install -y shellcheck
scversion="stable"
wget -qO- "https://github.com/koalaman/shellcheck/releases/download/${scversion?}/shellcheck-${scversion?}.linux.x86_64.tar.xz" | tar -xJv
sudo cp "shellcheck-${scversion}/shellcheck" /usr/bin/
rm -r "shellcheck-${scversion}"
shellcheck --version
.jenkins/run-shellcheck.sh
- name: Ensure no tabs
run: |
Expand Down
6 changes: 6 additions & 0 deletions .jenkins/pytorch/.shellcheckrc
@@ -0,0 +1,6 @@
disable=SC2086
disable=SC1091
disable=SC2155
disable=SC1090
disable=SC2164
disable=SC1003
1 change: 1 addition & 0 deletions .jenkins/pytorch/build-mobile-code-analysis.sh
Expand Up @@ -5,6 +5,7 @@ set -eu -o pipefail
# This script builds and runs code analyzer tool to generate aten op dependency
# graph for custom mobile build.

# shellcheck disable=SC2034
COMPACT_JOB_NAME="${BUILD_ENVIRONMENT}"

source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
Expand Down
1 change: 1 addition & 0 deletions .jenkins/pytorch/build-mobile.sh
Expand Up @@ -6,6 +6,7 @@ set -eu -o pipefail
# build & test mobile libtorch without having to setup Android/iOS
# toolchain/simulator.

# shellcheck disable=SC2034
COMPACT_JOB_NAME="${BUILD_ENVIRONMENT}"

source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
Expand Down
2 changes: 1 addition & 1 deletion .jenkins/pytorch/build.sh
Expand Up @@ -165,7 +165,7 @@ fi
# sccache will fail for CUDA builds if all cores are used for compiling
# gcc 7 with sccache seems to have intermittent OOM issue if all cores are used
if [ -z "$MAX_JOBS" ]; then
if ([[ "$BUILD_ENVIRONMENT" == *cuda* ]] || [[ "$BUILD_ENVIRONMENT" == *gcc7* ]]) && which sccache > /dev/null; then
if { [[ "$BUILD_ENVIRONMENT" == *cuda* ]] || [[ "$BUILD_ENVIRONMENT" == *gcc7* ]]; } && which sccache > /dev/null; then
export MAX_JOBS=$(($(nproc) - 1))
fi
fi
Expand Down
1 change: 1 addition & 0 deletions .jenkins/pytorch/codegen-test.sh
Expand Up @@ -12,6 +12,7 @@
set -eu -o pipefail

if [ "$#" -eq 0 ]; then
# shellcheck disable=SC2034
COMPACT_JOB_NAME="${BUILD_ENVIRONMENT}"
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
OUT="$(dirname "${BASH_SOURCE[0]}")/../../codegen_result"
Expand Down
7 changes: 4 additions & 3 deletions .jenkins/pytorch/common.sh
Expand Up @@ -18,7 +18,7 @@ if [[ "${BUILD_ENVIRONMENT}" == *rocm* ]] && [[ "${BUILD_ENVIRONMENT}" =~ py((2|
# non-interactive bashs do not expand aliases by default
shopt -s expand_aliases
export PYTORCH_TEST_WITH_ROCM=1
alias python="$PYTHON"
alias python='$PYTHON'
# temporary to locate some kernel issues on the CI nodes
export HSAKMT_DEBUG_LEVEL=4
fi
Expand All @@ -45,7 +45,7 @@ fatal() { error "$@"; exit 1; }
# - remaining args: names of traps to modify
#
trap_add() {
trap_add_cmd=$1; shift || fatal "${FUNCNAME} usage error"
trap_add_cmd=$1; shift || fatal "${FUNCNAME[0]} usage error"
for trap_add_name in "$@"; do
trap -- "$(
# helper fn to get existing trap command from output
Expand Down Expand Up @@ -116,6 +116,7 @@ if [[ "$BUILD_ENVIRONMENT" == *pytorch-linux-xenial-cuda10.1-cudnn7-py3* ]] || \
[[ "$BUILD_ENVIRONMENT" == *pytorch_macos* ]]; then
BUILD_TEST_LIBTORCH=1
else
# shellcheck disable=SC2034
BUILD_TEST_LIBTORCH=0
fi
Expand All @@ -138,5 +139,5 @@ if [[ "$BUILD_ENVIRONMENT" == *pytorch-xla-linux-bionic* ]] || \
fi
retry () {
$* || (sleep 1 && $*) || (sleep 2 && $*)
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
}
2 changes: 1 addition & 1 deletion .jenkins/pytorch/common_utils.sh
Expand Up @@ -18,7 +18,7 @@ function cleanup {
function assert_git_not_dirty() {
# TODO: we should add an option to `build_amd.py` that reverts the repo to
# an unmodified state.
if ([[ "$BUILD_ENVIRONMENT" != *rocm* ]] && [[ "$BUILD_ENVIRONMENT" != *xla* ]]) ; then
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]] && [[ "$BUILD_ENVIRONMENT" != *xla* ]] ; then
git_status=$(git status --porcelain)
if [[ $git_status ]]; then
echo "Build left local git repository checkout dirty"
Expand Down
4 changes: 2 additions & 2 deletions .jenkins/pytorch/macos-build.sh
Expand Up @@ -13,10 +13,10 @@ if [ -z "${IN_CI}" ]; then
fi

if which sccache > /dev/null; then
printf "#!/bin/sh\nexec sccache $(which clang++) \$*" > "${WORKSPACE_DIR}/clang++"
printf "#!/bin/sh\nexec sccache %s \$*" "$(which clang++)" > "${WORKSPACE_DIR}/clang++"
chmod a+x "${WORKSPACE_DIR}/clang++"

printf "#!/bin/sh\nexec sccache $(which clang) \$*" > "${WORKSPACE_DIR}/clang"
printf "#!/bin/sh\nexec sccache %s \$*" "$(which clang)" > "${WORKSPACE_DIR}/clang"
chmod a+x "${WORKSPACE_DIR}/clang"

export PATH="${WORKSPACE_DIR}:$PATH"
Expand Down
Expand Up @@ -21,7 +21,7 @@ test_cpu_speed_mini_sequence_labeler () {

for (( i=1; i<=NUM_RUNS; i++ )) do
runtime=$(get_runtime_of_command python main.py)
SAMPLE_ARRAY+=(${runtime})
SAMPLE_ARRAY+=("${runtime}")
done

cd ../../..
Expand Down
2 changes: 1 addition & 1 deletion .jenkins/pytorch/perf_test/test_cpu_speed_mnist.sh
Expand Up @@ -23,7 +23,7 @@ test_cpu_speed_mnist () {
for (( i=1; i<=NUM_RUNS; i++ )) do
runtime=$(get_runtime_of_command python main.py --epochs 1 --no-log)
echo $runtime
SAMPLE_ARRAY+=(${runtime})
SAMPLE_ARRAY+=("${runtime}")
done

cd ../..
Expand Down
2 changes: 1 addition & 1 deletion .jenkins/pytorch/perf_test/test_gpu_speed_cudnn_lstm.sh
Expand Up @@ -22,7 +22,7 @@ test_gpu_speed_cudnn_lstm () {
for (( i=1; i<=NUM_RUNS; i++ )) do
runtime=$(get_runtime_of_command python cudnn_lstm.py --skip-cpu-governor-check)
echo $runtime
SAMPLE_ARRAY+=(${runtime})
SAMPLE_ARRAY+=("${runtime}")
done

cd ../..
Expand Down
2 changes: 1 addition & 1 deletion .jenkins/pytorch/perf_test/test_gpu_speed_lstm.sh
Expand Up @@ -22,7 +22,7 @@ test_gpu_speed_lstm () {
for (( i=1; i<=NUM_RUNS; i++ )) do
runtime=$(get_runtime_of_command python lstm.py --skip-cpu-governor-check)
echo $runtime
SAMPLE_ARRAY+=(${runtime})
SAMPLE_ARRAY+=("${runtime}")
done

cd ../..
Expand Down
2 changes: 1 addition & 1 deletion .jenkins/pytorch/perf_test/test_gpu_speed_mlstm.sh
Expand Up @@ -22,7 +22,7 @@ test_gpu_speed_mlstm () {
for (( i=1; i<=NUM_RUNS; i++ )) do
runtime=$(get_runtime_of_command python mlstm.py --skip-cpu-governor-check)
echo $runtime
SAMPLE_ARRAY+=(${runtime})
SAMPLE_ARRAY+=("${runtime}")
done

cd ../..
Expand Down
2 changes: 1 addition & 1 deletion .jenkins/pytorch/perf_test/test_gpu_speed_mnist.sh
Expand Up @@ -26,7 +26,7 @@ test_gpu_speed_mnist () {
for (( i=1; i<=NUM_RUNS; i++ )) do
runtime=$(get_runtime_of_command python main.py --epochs 1 --no-log)
echo $runtime
SAMPLE_ARRAY+=(${runtime})
SAMPLE_ARRAY+=("${runtime}")
done

cd ../..
Expand Down
Expand Up @@ -31,7 +31,7 @@ test_gpu_speed_word_language_model () {
for (( i=1; i<=NUM_RUNS; i++ )) do
runtime=$(get_runtime_of_command python main.py --cuda --epochs 1)
echo $runtime
SAMPLE_ARRAY+=(${runtime})
SAMPLE_ARRAY+=("${runtime}")
done

cd ../..
Expand Down
5 changes: 2 additions & 3 deletions .jenkins/pytorch/short-perf-test-cpu.sh
Expand Up @@ -27,13 +27,12 @@ fi
git remote add upstream https://github.com/pytorch/pytorch.git
git fetch upstream
IFS=$'\n'
master_commit_ids=($(git rev-list upstream/master))
for commit_id in "${master_commit_ids[@]}"; do
while IFS='' read -r commit_id; do
if aws s3 ls s3://ossci-perf-test/pytorch/cpu_runtime/${commit_id}.json; then
LATEST_TESTED_COMMIT=${commit_id}
break
fi
done
done < <(git rev-list upstream/master)
aws s3 cp s3://ossci-perf-test/pytorch/cpu_runtime/${LATEST_TESTED_COMMIT}.json cpu_runtime.json

if [[ "$COMMIT_SOURCE" == master ]]; then
Expand Down
5 changes: 2 additions & 3 deletions .jenkins/pytorch/short-perf-test-gpu.sh
Expand Up @@ -26,13 +26,12 @@ fi
git remote add upstream https://github.com/pytorch/pytorch.git
git fetch upstream
IFS=$'\n'
master_commit_ids=($(git rev-list upstream/master))
for commit_id in "${master_commit_ids[@]}"; do
while IFS='' read -r commit_id; do
if aws s3 ls s3://ossci-perf-test/pytorch/gpu_runtime/${commit_id}.json; then
LATEST_TESTED_COMMIT=${commit_id}
break
fi
done
done < <(git rev-list upstream/master)
aws s3 cp s3://ossci-perf-test/pytorch/gpu_runtime/${LATEST_TESTED_COMMIT}.json gpu_runtime.json

if [[ "$COMMIT_SOURCE" == master ]]; then
Expand Down
8 changes: 4 additions & 4 deletions .jenkins/pytorch/test.sh
Expand Up @@ -22,7 +22,7 @@ fi

if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
# Print GPU info
rocminfo | egrep 'Name:.*\sgfx|Marketing'
rocminfo | grep -E 'Name:.*\sgfx|Marketing'
fi

# --user breaks ppc64le builds and these packages are already in ppc64le docker
Expand Down Expand Up @@ -93,7 +93,7 @@ elif [[ "${BUILD_ENVIRONMENT}" == *-NO_AVX2-* ]]; then
export ATEN_CPU_CAPABILITY=avx
fi

if ([ -n "$CIRCLE_PULL_REQUEST" ] && [[ "$BUILD_ENVIRONMENT" != *coverage* ]]); then
if [ -n "$CIRCLE_PULL_REQUEST" ] && [[ "$BUILD_ENVIRONMENT" != *coverage* ]]; then
DETERMINE_FROM=$(mktemp)
file_diff_from_base "$DETERMINE_FROM"
fi
Expand All @@ -117,7 +117,7 @@ test_aten() {
# Test ATen
# The following test(s) of ATen have already been skipped by caffe2 in rocm environment:
# scalar_tensor_test, basic, native_test
if ([[ "$BUILD_ENVIRONMENT" != *asan* ]] && [[ "$BUILD_ENVIRONMENT" != *rocm* ]]); then
if [[ "$BUILD_ENVIRONMENT" != *asan* ]] && [[ "$BUILD_ENVIRONMENT" != *rocm* ]]; then
echo "Running ATen tests with pytorch lib"
TORCH_LIB_PATH=$(python -c "import site; print(site.getsitepackages()[0])")/torch/lib
# NB: the ATen test binaries don't have RPATH set, so it's necessary to
Expand Down Expand Up @@ -255,7 +255,7 @@ test_torch_function_benchmark() {
test_xla() {
export XLA_USE_XRT=1 XRT_DEVICE_MAP="CPU:0;/job:localservice/replica:0/task:0/device:XLA_CPU:0"
# Issue #30717: randomize the port of XLA/gRPC workers is listening on to reduce flaky tests.
XLA_PORT=`shuf -i 40701-40999 -n 1`
XLA_PORT=$(shuf -i 40701-40999 -n 1)
export XRT_WORKERS="localservice:0;grpc://localhost:$XLA_PORT"
pushd xla
echo "Running Python Tests"
Expand Down
2 changes: 1 addition & 1 deletion .jenkins/pytorch/win-build.sh
Expand Up @@ -15,7 +15,7 @@ COMPACT_JOB_NAME=pytorch-win-ws2019-cuda10-cudnn7-py3-build
SCRIPT_PARENT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source "$SCRIPT_PARENT_DIR/common.sh"

export IMAGE_COMMIT_ID=`git rev-parse HEAD`
export IMAGE_COMMIT_ID=$(git rev-parse HEAD)
export IMAGE_COMMIT_TAG=${BUILD_ENVIRONMENT}-${IMAGE_COMMIT_ID}
if [[ ${JOB_NAME} == *"develop"* ]]; then
export IMAGE_COMMIT_TAG=develop-${IMAGE_COMMIT_TAG}
Expand Down
2 changes: 1 addition & 1 deletion .jenkins/pytorch/win-test.sh
Expand Up @@ -6,7 +6,7 @@ COMPACT_JOB_NAME=pytorch-win-ws2019-cuda10-cudnn7-py3-test
SCRIPT_PARENT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source "$SCRIPT_PARENT_DIR/common.sh"

export IMAGE_COMMIT_ID=`git rev-parse HEAD`
export IMAGE_COMMIT_ID=$(git rev-parse HEAD)
export IMAGE_COMMIT_TAG=${BUILD_ENVIRONMENT}-${IMAGE_COMMIT_ID}
if [[ ${JOB_NAME} == *"develop"* ]]; then
export IMAGE_COMMIT_TAG=develop-${IMAGE_COMMIT_TAG}
Expand Down
4 changes: 1 addition & 3 deletions .jenkins/run-shellcheck.sh
Expand Up @@ -5,6 +5,4 @@
# .jenkins/run-shellcheck.sh --color=always | less -R


EXCLUSIONS=SC2086,SC1091,SC2155,SC1090,SC2164,SC1003

find .jenkins/pytorch -name *.sh | xargs shellcheck --exclude=$EXCLUSIONS --external-sources "$@" || true
find .jenkins/pytorch -name *.sh | xargs shellcheck --external-sources "$@"
15 changes: 8 additions & 7 deletions BUILD.bazel
Expand Up @@ -125,15 +125,17 @@ genrule(
] + glob(["aten/src/ATen/templates/**"]),
outs = [
"aten/src/ATen/Declarations.yaml",
"aten/src/ATen/BackendSelectRegister.cpp",
"aten/src/ATen/CPUType.cpp",
"aten/src/ATen/RegisterBackendSelect.cpp",
"aten/src/ATen/RegisterCPU.cpp",
"aten/src/ATen/RegisterMkldnnCPU.cpp",
"aten/src/ATen/RegisterQuantizedCPU.cpp",
"aten/src/ATen/RegisterSparseCPU.cpp",
"aten/src/ATen/RegisterMath.cpp",
"aten/src/ATen/RegisterDefaultBackend.cpp",
"aten/src/ATen/RegisterSchema.cpp",
"aten/src/ATen/Functions.h",
"aten/src/ATen/Functions.cpp",
"aten/src/ATen/NativeFunctions.h",
"aten/src/ATen/MkldnnCPUType.cpp",
"aten/src/ATen/QuantizedCPUType.cpp",
"aten/src/ATen/SparseCPUType.cpp",
"aten/src/ATen/TypeDefault.cpp",
"aten/src/ATen/core/TensorBody.h",
"aten/src/ATen/core/TensorMethods.cpp",
"aten/src/ATen/core/ATenOpList.cpp",
Expand Down Expand Up @@ -378,7 +380,6 @@ filegroup(
"aten/src/THC/THCTensorCopy.cu.cc",
"aten/src/THC/THCTensorIndex.cu.cc",
"aten/src/THC/THCTensorMath.cu.cc",
"aten/src/THC/THCTensorMathBlas.cu.cc",
"aten/src/THC/THCTensorMathMagma.cu.cc",
"aten/src/THC/THCTensorMathPairwise.cu.cc",
"aten/src/THC/THCTensorMathReduce.cu.cc",
Expand Down

0 comments on commit 1711d57

Please sign in to comment.