From 3dba33f81d85d1852f76bd78227d5c965aad545f Mon Sep 17 00:00:00 2001 From: Chris Jowett <421501+cryptk@users.noreply.github.com> Date: Fri, 10 May 2024 21:24:56 -0500 Subject: [PATCH 1/5] feat: create bash library to handle install/run/test of python backends Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com> --- backend/python/autogptq/Makefile | 2 +- backend/python/autogptq/autogptq.yml | 93 -------- .../autogptq/{autogptq.py => backend.py} | 0 backend/python/autogptq/install.sh | 34 +-- backend/python/autogptq/run.sh | 10 +- backend/python/autogptq/test.sh | 16 +- backend/python/bark/Makefile | 2 +- .../python/bark/{ttsbark.py => backend.py} | 0 backend/python/bark/install.sh | 34 +-- backend/python/bark/run.sh | 10 +- backend/python/bark/test.py | 2 +- backend/python/bark/test.sh | 16 +- .../python/common-env/transformers/Makefile | 21 -- .../python/common-env/transformers/install.sh | 44 ---- .../transformers/transformers-nvidia.yml | 125 ----------- .../transformers/transformers-rocm.yml | 113 ---------- .../common-env/transformers/transformers.yml | 118 ---------- backend/python/common/libbackend.sh | 201 ++++++++++++++++++ backend/python/common/template/Makefile | 17 ++ backend/python/common/template/backend.py | 4 + backend/python/common/template/install.sh | 6 + .../python/common/template/requirements.txt | 2 + backend/python/common/template/run.sh | 4 + backend/python/common/template/test.sh | 6 + backend/python/coqui/Makefile | 2 +- .../coqui/{coqui_server.py => backend.py} | 0 backend/python/coqui/install.sh | 34 +-- backend/python/coqui/run.sh | 10 +- backend/python/coqui/test.py | 2 +- backend/python/coqui/test.sh | 16 +- backend/python/diffusers/Makefile | 2 +- .../{backend_diffusers.py => backend.py} | 0 backend/python/diffusers/install.sh | 34 +-- .../python/diffusers/requirements-intel.txt | 1 + backend/python/diffusers/run.sh | 10 +- backend/python/diffusers/test.py | 2 +- backend/python/diffusers/test.sh | 16 +- backend/python/exllama/Makefile | 2 +- .../python/exllama/{exllama.py => backend.py} | 6 +- backend/python/exllama/exllama.yml | 56 ----- backend/python/exllama/install.sh | 34 +-- backend/python/exllama/run.sh | 11 +- backend/python/exllama/test.sh | 16 +- backend/python/exllama2/Makefile | 2 +- .../{exllama2_backend.py => backend.py} | 0 backend/python/exllama2/install.sh | 40 +--- backend/python/exllama2/run.sh | 10 +- backend/python/exllama2/test.sh | 16 +- backend/python/mamba/Makefile | 2 +- .../mamba/{backend_mamba.py => backend.py} | 0 backend/python/mamba/install.sh | 40 +--- backend/python/mamba/run.sh | 10 +- backend/python/mamba/test.py | 2 +- backend/python/mamba/test.sh | 16 +- backend/python/parler-tts/Makefile | 2 +- .../{parler_tts_server.py => backend.py} | 0 backend/python/parler-tts/install.sh | 38 +--- backend/python/parler-tts/parler-nvidia.yml | 48 ----- backend/python/parler-tts/parler.yml | 36 ---- backend/python/parler-tts/run.sh | 10 +- backend/python/parler-tts/test.py | 2 +- backend/python/parler-tts/test.sh | 16 +- backend/python/petals/Makefile | 2 +- .../petals/{backend_petals.py => backend.py} | 0 backend/python/petals/install.sh | 34 +-- backend/python/petals/petals.yml | 30 --- backend/python/petals/run.sh | 10 +- backend/python/petals/test.py | 2 +- backend/python/petals/test.sh | 16 +- backend/python/rerankers/Makefile | 2 +- .../rerankers/{reranker.py => backend.py} | 0 backend/python/rerankers/install.sh | 34 +-- backend/python/rerankers/run.sh | 10 +- backend/python/rerankers/test.py | 2 +- backend/python/rerankers/test.sh | 16 +- backend/python/sentencetransformers/Makefile | 2 +- .../{sentencetransformers.py => backend.py} | 0 .../python/sentencetransformers/install.sh | 34 +-- backend/python/sentencetransformers/run.sh | 10 +- backend/python/sentencetransformers/test.py | 2 +- backend/python/sentencetransformers/test.sh | 16 +- backend/python/transformers-musicgen/Makefile | 2 +- .../{transformers_server.py => backend.py} | 0 .../python/transformers-musicgen/install.sh | 34 +-- backend/python/transformers-musicgen/run.sh | 10 +- backend/python/transformers-musicgen/test.py | 2 +- backend/python/transformers-musicgen/test.sh | 16 +- backend/python/transformers/Makefile | 2 +- .../{transformers_server.py => backend.py} | 0 backend/python/transformers/install.sh | 34 +-- backend/python/transformers/run.sh | 17 +- backend/python/transformers/test.py | 2 +- backend/python/transformers/test.sh | 16 +- backend/python/vall-e-x/Makefile | 2 +- backend/python/vall-e-x/__init__.py | 0 .../vall-e-x/{ttsvalle.py => backend.py} | 4 +- backend/python/vall-e-x/install.sh | 40 +--- backend/python/vall-e-x/run.sh | 10 +- backend/python/vall-e-x/test.py | 2 +- backend/python/vall-e-x/test.sh | 16 +- backend/python/vall-e-x/ttsvalle.yml | 101 --------- backend/python/vllm/Makefile | 2 +- .../vllm/{backend_vllm.py => backend.py} | 0 backend/python/vllm/install.sh | 34 +-- backend/python/vllm/run.sh | 10 +- backend/python/vllm/test.py | 2 +- backend/python/vllm/test.sh | 16 +- 107 files changed, 413 insertions(+), 1607 deletions(-) delete mode 100644 backend/python/autogptq/autogptq.yml rename backend/python/autogptq/{autogptq.py => backend.py} (100%) rename backend/python/bark/{ttsbark.py => backend.py} (100%) delete mode 100644 backend/python/common-env/transformers/Makefile delete mode 100755 backend/python/common-env/transformers/install.sh delete mode 100644 backend/python/common-env/transformers/transformers-nvidia.yml delete mode 100644 backend/python/common-env/transformers/transformers-rocm.yml delete mode 100644 backend/python/common-env/transformers/transformers.yml create mode 100644 backend/python/common/libbackend.sh create mode 100644 backend/python/common/template/Makefile create mode 100755 backend/python/common/template/backend.py create mode 100755 backend/python/common/template/install.sh create mode 100644 backend/python/common/template/requirements.txt create mode 100755 backend/python/common/template/run.sh create mode 100755 backend/python/common/template/test.sh rename backend/python/coqui/{coqui_server.py => backend.py} (100%) rename backend/python/diffusers/{backend_diffusers.py => backend.py} (100%) rename backend/python/exllama/{exllama.py => backend.py} (97%) delete mode 100644 backend/python/exllama/exllama.yml rename backend/python/exllama2/{exllama2_backend.py => backend.py} (100%) rename backend/python/mamba/{backend_mamba.py => backend.py} (100%) rename backend/python/parler-tts/{parler_tts_server.py => backend.py} (100%) delete mode 100644 backend/python/parler-tts/parler-nvidia.yml delete mode 100644 backend/python/parler-tts/parler.yml rename backend/python/petals/{backend_petals.py => backend.py} (100%) delete mode 100644 backend/python/petals/petals.yml rename backend/python/rerankers/{reranker.py => backend.py} (100%) rename backend/python/sentencetransformers/{sentencetransformers.py => backend.py} (100%) rename backend/python/transformers-musicgen/{transformers_server.py => backend.py} (100%) rename backend/python/transformers/{transformers_server.py => backend.py} (100%) create mode 100644 backend/python/vall-e-x/__init__.py rename backend/python/vall-e-x/{ttsvalle.py => backend.py} (97%) delete mode 100644 backend/python/vall-e-x/ttsvalle.yml rename backend/python/vllm/{backend_vllm.py => backend.py} (100%) diff --git a/backend/python/autogptq/Makefile b/backend/python/autogptq/Makefile index 5e912a2cc8c..e2662b7a33e 100644 --- a/backend/python/autogptq/Makefile +++ b/backend/python/autogptq/Makefile @@ -14,4 +14,4 @@ backend_pb2_grpc.py backend_pb2.py: .PHONY: clean clean: protogen-clean - rm -rf venv \ No newline at end of file + rm -rf venv __pycache__ \ No newline at end of file diff --git a/backend/python/autogptq/autogptq.yml b/backend/python/autogptq/autogptq.yml deleted file mode 100644 index 1d11c998814..00000000000 --- a/backend/python/autogptq/autogptq.yml +++ /dev/null @@ -1,93 +0,0 @@ -#### -# Attention! This file is abandoned. -# Please use the ../common-env/transformers/transformers.yml file to manage dependencies. -### -name: autogptq -channels: - - defaults -dependencies: - - _libgcc_mutex=0.1=main - - _openmp_mutex=5.1=1_gnu - - bzip2=1.0.8=h7b6447c_0 - - ca-certificates=2023.08.22=h06a4308_0 - - ld_impl_linux-64=2.38=h1181459_1 - - libffi=3.4.4=h6a678d5_0 - - libgcc-ng=11.2.0=h1234567_1 - - libgomp=11.2.0=h1234567_1 - - libstdcxx-ng=11.2.0=h1234567_1 - - libuuid=1.41.5=h5eee18b_0 - - ncurses=6.4=h6a678d5_0 - - openssl=3.0.11=h7f8727e_2 - - pip=23.2.1=py311h06a4308_0 - - python=3.11.5=h955ad1f_0 - - readline=8.2=h5eee18b_0 - - setuptools=68.0.0=py311h06a4308_0 - - sqlite=3.41.2=h5eee18b_0 - - tk=8.6.12=h1ccaba5_0 - - wheel=0.41.2=py311h06a4308_0 - - xz=5.4.2=h5eee18b_0 - - zlib=1.2.13=h5eee18b_0 - - pip: - - accelerate==0.27.0 - - aiohttp==3.8.5 - - aiosignal==1.3.1 - - async-timeout==4.0.3 - - attrs==23.1.0 - - auto-gptq==0.7.1 - - certifi==2023.7.22 - - charset-normalizer==3.3.0 - - datasets==2.14.5 - - dill==0.3.7 - - filelock==3.12.4 - - frozenlist==1.4.0 - - fsspec==2023.6.0 - - grpcio==1.63.0 - - huggingface-hub==0.16.4 - - idna==3.4 - - jinja2==3.1.2 - - markupsafe==2.1.3 - - mpmath==1.3.0 - - multidict==6.0.4 - - multiprocess==0.70.15 - - networkx==3.1 - - numpy==1.26.0 - - nvidia-cublas-cu12==12.1.3.1 - - nvidia-cuda-cupti-cu12==12.1.105 - - nvidia-cuda-nvrtc-cu12==12.1.105 - - nvidia-cuda-runtime-cu12==12.1.105 - - nvidia-cudnn-cu12==8.9.2.26 - - nvidia-cufft-cu12==11.0.2.54 - - nvidia-curand-cu12==10.3.2.106 - - nvidia-cusolver-cu12==11.4.5.107 - - nvidia-cusparse-cu12==12.1.0.106 - - nvidia-nccl-cu12==2.18.1 - - nvidia-nvjitlink-cu12==12.2.140 - - nvidia-nvtx-cu12==12.1.105 - - optimum==1.17.1 - - packaging==23.2 - - pandas==2.1.1 - - peft==0.5.0 - - protobuf==4.24.4 - - psutil==5.9.5 - - pyarrow==13.0.0 - - python-dateutil==2.8.2 - - pytz==2023.3.post1 - - pyyaml==6.0.1 - - regex==2023.10.3 - - requests==2.31.0 - - rouge==1.0.1 - - safetensors>=0.3.3 - - six==1.16.0 - - sympy==1.12 - - tokenizers==0.14.0 - - tqdm==4.66.1 - - torch==2.2.1 - - torchvision==0.17.1 - - transformers==4.34.0 - - transformers_stream_generator==0.0.5 - - triton==2.1.0 - - typing-extensions==4.8.0 - - tzdata==2023.3 - - urllib3==2.0.6 - - xxhash==3.4.1 - - yarl==1.9.2 diff --git a/backend/python/autogptq/autogptq.py b/backend/python/autogptq/backend.py similarity index 100% rename from backend/python/autogptq/autogptq.py rename to backend/python/autogptq/backend.py diff --git a/backend/python/autogptq/install.sh b/backend/python/autogptq/install.sh index 311203ca602..2829b927e3b 100755 --- a/backend/python/autogptq/install.sh +++ b/backend/python/autogptq/install.sh @@ -1,34 +1,6 @@ #!/bin/bash -set -ex +set -e -BUILD_ISOLATION_FLAG="" +source $(dirname $0)/../common/libbackend.sh -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -uv venv ${MY_DIR}/venv -source ${MY_DIR}/venv/bin/activate - -if [ -f "requirements-install.txt" ]; then - # If we have a requirements-install.txt, it means that a package does not properly declare it's build time - # dependencies per PEP-517, so we have to set up the proper build environment ourselves, and then install - # the package without build isolation - BUILD_ISOLATION_FLAG="--no-build-isolation" - uv pip install --requirement ${MY_DIR}/requirements-install.txt -fi -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements.txt - -if [ -f "requirements-${BUILD_TYPE}.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements-${BUILD_TYPE}.txt -fi - -if [ -d "/opt/intel" ]; then - # Intel GPU: If the directory exists, we assume we are using the Intel image - # https://github.com/intel/intel-extension-for-pytorch/issues/538 - if [ -f "requirements-intel.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --requirement ${MY_DIR}/requirements-intel.txt - fi -fi - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file +installRequirements diff --git a/backend/python/autogptq/run.sh b/backend/python/autogptq/run.sh index ba599ddfd4c..375c07e5f42 100755 --- a/backend/python/autogptq/run.sh +++ b/backend/python/autogptq/run.sh @@ -1,10 +1,4 @@ #!/bin/bash +source $(dirname $0)/../common/libbackend.sh -## -## A bash script wrapper that runs the autogptq server - -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -source $MY_DIR/venv/bin/activate - -python $MY_DIR/autogptq.py $@ \ No newline at end of file +startBackend $@ \ No newline at end of file diff --git a/backend/python/autogptq/test.sh b/backend/python/autogptq/test.sh index 4b742b3f25e..6940b0661df 100755 --- a/backend/python/autogptq/test.sh +++ b/backend/python/autogptq/test.sh @@ -1,16 +1,6 @@ #!/bin/bash -## -## A bash script wrapper that runs python unittests +set -e -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -source $MY_DIR/venv/bin/activate - -if [ -f "${MY_DIR}/test.py" ]; then - pushd ${MY_DIR} - python -m unittest test.py - popd -else - echo "ERROR: No tests defined for backend!" - exit 1 -fi \ No newline at end of file +runUnittests diff --git a/backend/python/bark/Makefile b/backend/python/bark/Makefile index a6ba88cefab..031efa32bfd 100644 --- a/backend/python/bark/Makefile +++ b/backend/python/bark/Makefile @@ -26,4 +26,4 @@ backend_pb2_grpc.py backend_pb2.py: .PHONY: clean clean: protogen-clean - rm -rf venv \ No newline at end of file + rm -rf venv __pycache__ \ No newline at end of file diff --git a/backend/python/bark/ttsbark.py b/backend/python/bark/backend.py similarity index 100% rename from backend/python/bark/ttsbark.py rename to backend/python/bark/backend.py diff --git a/backend/python/bark/install.sh b/backend/python/bark/install.sh index 311203ca602..2829b927e3b 100755 --- a/backend/python/bark/install.sh +++ b/backend/python/bark/install.sh @@ -1,34 +1,6 @@ #!/bin/bash -set -ex +set -e -BUILD_ISOLATION_FLAG="" +source $(dirname $0)/../common/libbackend.sh -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -uv venv ${MY_DIR}/venv -source ${MY_DIR}/venv/bin/activate - -if [ -f "requirements-install.txt" ]; then - # If we have a requirements-install.txt, it means that a package does not properly declare it's build time - # dependencies per PEP-517, so we have to set up the proper build environment ourselves, and then install - # the package without build isolation - BUILD_ISOLATION_FLAG="--no-build-isolation" - uv pip install --requirement ${MY_DIR}/requirements-install.txt -fi -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements.txt - -if [ -f "requirements-${BUILD_TYPE}.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements-${BUILD_TYPE}.txt -fi - -if [ -d "/opt/intel" ]; then - # Intel GPU: If the directory exists, we assume we are using the Intel image - # https://github.com/intel/intel-extension-for-pytorch/issues/538 - if [ -f "requirements-intel.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --requirement ${MY_DIR}/requirements-intel.txt - fi -fi - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file +installRequirements diff --git a/backend/python/bark/run.sh b/backend/python/bark/run.sh index 76149f370ae..375c07e5f42 100755 --- a/backend/python/bark/run.sh +++ b/backend/python/bark/run.sh @@ -1,10 +1,4 @@ #!/bin/bash +source $(dirname $0)/../common/libbackend.sh -## -## A bash script wrapper that runs the ttsbark server - -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -source $MY_DIR/venv/bin/activate - -python $MY_DIR/ttsbark.py $@ \ No newline at end of file +startBackend $@ \ No newline at end of file diff --git a/backend/python/bark/test.py b/backend/python/bark/test.py index 3a79dd00e83..4c9f3cf6b0a 100644 --- a/backend/python/bark/test.py +++ b/backend/python/bark/test.py @@ -18,7 +18,7 @@ def setUp(self): """ This method sets up the gRPC service by starting the server """ - self.service = subprocess.Popen(["python3", "ttsbark.py", "--addr", "localhost:50051"]) + self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) time.sleep(10) def tearDown(self) -> None: diff --git a/backend/python/bark/test.sh b/backend/python/bark/test.sh index 4b742b3f25e..6940b0661df 100755 --- a/backend/python/bark/test.sh +++ b/backend/python/bark/test.sh @@ -1,16 +1,6 @@ #!/bin/bash -## -## A bash script wrapper that runs python unittests +set -e -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -source $MY_DIR/venv/bin/activate - -if [ -f "${MY_DIR}/test.py" ]; then - pushd ${MY_DIR} - python -m unittest test.py - popd -else - echo "ERROR: No tests defined for backend!" - exit 1 -fi \ No newline at end of file +runUnittests diff --git a/backend/python/common-env/transformers/Makefile b/backend/python/common-env/transformers/Makefile deleted file mode 100644 index 797af0832ef..00000000000 --- a/backend/python/common-env/transformers/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -CONDA_ENV_PATH = "transformers.yml" - -ifeq ($(BUILD_TYPE), cublas) - CONDA_ENV_PATH = "transformers-nvidia.yml" -endif - -ifeq ($(BUILD_TYPE), hipblas) - CONDA_ENV_PATH = "transformers-rocm.yml" -endif - -# Intel GPU are supposed to have dependencies installed in the main python -# environment, so we skip conda installation for SYCL builds. -# https://github.com/intel/intel-extension-for-pytorch/issues/538 -ifneq (,$(findstring sycl,$(BUILD_TYPE))) -export SKIP_CONDA=1 -endif - -.PHONY: transformers -transformers: - @echo "Installing $(CONDA_ENV_PATH)..." - bash install.sh $(CONDA_ENV_PATH) diff --git a/backend/python/common-env/transformers/install.sh b/backend/python/common-env/transformers/install.sh deleted file mode 100755 index ef768bc7986..00000000000 --- a/backend/python/common-env/transformers/install.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -set -ex - -SKIP_CONDA=${SKIP_CONDA:-0} -REQUIREMENTS_FILE=$1 - -# Check if environment exist -conda_env_exists(){ - ! conda list --name "${@}" >/dev/null 2>/dev/null -} - -if [ $SKIP_CONDA -eq 1 ]; then - echo "Skipping conda environment installation" -else - export PATH=$PATH:/opt/conda/bin - if conda_env_exists "transformers" ; then - echo "Creating virtual environment..." - conda env create --name transformers --file $REQUIREMENTS_FILE - echo "Virtual environment created." - else - echo "Virtual environment already exists." - fi -fi - -if [ -d "/opt/intel" ]; then - # Intel GPU: If the directory exists, we assume we are using the intel image - # (no conda env) - # https://github.com/intel/intel-extension-for-pytorch/issues/538 - pip install torch==2.1.0.post0 torchvision==0.16.0.post0 torchaudio==2.1.0.post0 intel-extension-for-pytorch==2.1.20+xpu oneccl_bind_pt==2.1.200+xpu intel-extension-for-transformers datasets sentencepiece tiktoken neural_speed optimum[openvino] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ -fi - -# If we didn't skip conda, activate the environment -# to install FlashAttention -if [ $SKIP_CONDA -eq 0 ]; then - source activate transformers -fi -if [[ $REQUIREMENTS_FILE =~ -nvidia.yml$ ]]; then - #TODO: FlashAttention is supported on nvidia and ROCm, but ROCm install can't be done this easily - pip install flash-attn --no-build-isolation -fi - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file diff --git a/backend/python/common-env/transformers/transformers-nvidia.yml b/backend/python/common-env/transformers/transformers-nvidia.yml deleted file mode 100644 index cf9f2eab98f..00000000000 --- a/backend/python/common-env/transformers/transformers-nvidia.yml +++ /dev/null @@ -1,125 +0,0 @@ -name: transformers -channels: - - defaults -dependencies: - - _libgcc_mutex=0.1=main - - _openmp_mutex=5.1=1_gnu - - bzip2=1.0.8=h7b6447c_0 - - ca-certificates=2023.08.22=h06a4308_0 - - ld_impl_linux-64=2.38=h1181459_1 - - libffi=3.4.4=h6a678d5_0 - - libgcc-ng=11.2.0=h1234567_1 - - libgomp=11.2.0=h1234567_1 - - libstdcxx-ng=11.2.0=h1234567_1 - - libuuid=1.41.5=h5eee18b_0 - - ncurses=6.4=h6a678d5_0 - - openssl=3.0.11=h7f8727e_2 - - pip=23.2.1=py311h06a4308_0 - - python=3.11.5=h955ad1f_0 - - readline=8.2=h5eee18b_0 - - setuptools=68.0.0=py311h06a4308_0 - - sqlite=3.41.2=h5eee18b_0 - - tk=8.6.12=h1ccaba5_0 - - wheel=0.41.2=py311h06a4308_0 - - xz=5.4.2=h5eee18b_0 - - zlib=1.2.13=h5eee18b_0 - - pip: - - accelerate==0.27.0 - - aiohttp==3.8.5 - - aiosignal==1.3.1 - - async-timeout==4.0.3 - - auto-gptq==0.7.1 - - attrs==23.1.0 - - bark==0.1.5 - - bitsandbytes==0.43.0 - - boto3==1.28.61 - - botocore==1.31.61 - - certifi==2023.7.22 - - TTS==0.22.0 - - charset-normalizer==3.3.0 - - datasets==2.14.5 - - sentence-transformers==2.5.1 # Updated Version - - sentencepiece==0.1.99 - - dill==0.3.7 - - einops==0.7.0 - - encodec==0.1.1 - - filelock==3.12.4 - - frozenlist==1.4.0 - - fsspec==2023.6.0 - - funcy==2.0 - - grpcio==1.63.0 - - huggingface-hub - - idna==3.4 - - jinja2==3.1.2 - - jmespath==1.0.1 - - markupsafe==2.1.3 - - mpmath==1.3.0 - - multidict==6.0.4 - - multiprocess==0.70.15 - - networkx - - numpy==1.26.0 - - nvidia-cublas-cu12==12.1.3.1 - - nvidia-cuda-cupti-cu12==12.1.105 - - nvidia-cuda-nvrtc-cu12==12.1.105 - - nvidia-cuda-runtime-cu12==12.1.105 - - nvidia-cudnn-cu12==8.9.2.26 - - nvidia-cufft-cu12==11.0.2.54 - - nvidia-curand-cu12==10.3.2.106 - - nvidia-cusolver-cu12==11.4.5.107 - - nvidia-cusparse-cu12==12.1.0.106 - - nvidia-nccl-cu12==2.18.1 - - nvidia-nvjitlink-cu12==12.2.140 - - nvidia-nvtx-cu12==12.1.105 - - optimum==1.17.1 - - packaging==23.2 - - pandas - - peft==0.5.0 - - protobuf==4.24.4 - - psutil==5.9.5 - - pyarrow==13.0.0 - - python-dateutil==2.8.2 - - pytz==2023.3.post1 - - pyyaml==6.0.1 - - regex==2023.10.3 - - requests==2.31.0 - - rouge==1.0.1 - - s3transfer==0.7.0 - - safetensors>=0.4.1 - - scipy==1.12.0 # Updated Version - - six==1.16.0 - - sympy==1.12 - - tokenizers - - torch==2.1.2 - - torchvision==0.16.2 - - torchaudio==2.1.2 - - tqdm==4.66.1 - - triton==2.1.0 - - typing-extensions==4.8.0 - - tzdata==2023.3 - - urllib3==1.26.17 - - xxhash==3.4.1 - - yarl==1.9.2 - - soundfile - - langid - - wget - - unidecode - - pyopenjtalk-prebuilt - - pypinyin - - inflect - - cn2an - - jieba - - eng_to_ipa - - openai-whisper - - matplotlib - - gradio==3.41.2 - - nltk - - sudachipy - - sudachidict_core - - vocos - - vllm>=0.4.0 - - transformers>=4.38.2 # Updated Version - - transformers_stream_generator==0.0.5 - - xformers==0.0.23.post1 - - rerankers[transformers] - - pydantic -prefix: /opt/conda/envs/transformers diff --git a/backend/python/common-env/transformers/transformers-rocm.yml b/backend/python/common-env/transformers/transformers-rocm.yml deleted file mode 100644 index 3fcc407dd02..00000000000 --- a/backend/python/common-env/transformers/transformers-rocm.yml +++ /dev/null @@ -1,113 +0,0 @@ -name: transformers -channels: - - defaults -dependencies: - - _libgcc_mutex=0.1=main - - _openmp_mutex=5.1=1_gnu - - bzip2=1.0.8=h7b6447c_0 - - ca-certificates=2023.08.22=h06a4308_0 - - ld_impl_linux-64=2.38=h1181459_1 - - libffi=3.4.4=h6a678d5_0 - - libgcc-ng=11.2.0=h1234567_1 - - libgomp=11.2.0=h1234567_1 - - libstdcxx-ng=11.2.0=h1234567_1 - - libuuid=1.41.5=h5eee18b_0 - - ncurses=6.4=h6a678d5_0 - - openssl=3.0.11=h7f8727e_2 - - pip=23.2.1=py311h06a4308_0 - - python=3.11.5=h955ad1f_0 - - readline=8.2=h5eee18b_0 - - setuptools=68.0.0=py311h06a4308_0 - - sqlite=3.41.2=h5eee18b_0 - - tk=8.6.12=h1ccaba5_0 - - wheel=0.41.2=py311h06a4308_0 - - xz=5.4.2=h5eee18b_0 - - zlib=1.2.13=h5eee18b_0 - - pip: - - --pre - - --extra-index-url https://download.pytorch.org/whl/nightly/ - - accelerate==0.27.0 - - auto-gptq==0.7.1 - - aiohttp==3.8.5 - - aiosignal==1.3.1 - - async-timeout==4.0.3 - - attrs==23.1.0 - - bark==0.1.5 - - boto3==1.28.61 - - botocore==1.31.61 - - certifi==2023.7.22 - - TTS==0.22.0 - - charset-normalizer==3.3.0 - - datasets==2.14.5 - - sentence-transformers==2.5.1 # Updated Version - - sentencepiece==0.1.99 - - dill==0.3.7 - - einops==0.7.0 - - encodec==0.1.1 - - filelock==3.12.4 - - frozenlist==1.4.0 - - fsspec==2023.6.0 - - funcy==2.0 - - grpcio==1.63.0 - - huggingface-hub - - idna==3.4 - - jinja2==3.1.2 - - jmespath==1.0.1 - - markupsafe==2.1.3 - - mpmath==1.3.0 - - multidict==6.0.4 - - multiprocess==0.70.15 - - networkx - - numpy==1.26.0 - - packaging==23.2 - - pandas - - peft==0.5.0 - - protobuf==4.24.4 - - psutil==5.9.5 - - pyarrow==13.0.0 - - python-dateutil==2.8.2 - - pytz==2023.3.post1 - - pyyaml==6.0.1 - - regex==2023.10.3 - - requests==2.31.0 - - rouge==1.0.1 - - s3transfer==0.7.0 - - safetensors>=0.4.1 - - scipy==1.12.0 # Updated Version - - six==1.16.0 - - sympy==1.12 - - tokenizers - - torch - - torchaudio - - tqdm==4.66.1 - - triton==2.1.0 - - typing-extensions==4.8.0 - - tzdata==2023.3 - - urllib3==1.26.17 - - xxhash==3.4.1 - - yarl==1.9.2 - - soundfile - - langid - - wget - - unidecode - - optimum==1.17.1 - - pyopenjtalk-prebuilt - - pypinyin - - inflect - - cn2an - - jieba - - eng_to_ipa - - openai-whisper - - matplotlib - - gradio==3.41.2 - - nltk - - sudachipy - - sudachidict_core - - vocos - - vllm>=0.4.0 - - transformers>=4.38.2 # Updated Version - - transformers_stream_generator==0.0.5 - - xformers==0.0.23.post1 - - rerankers[transformers] - - pydantic -prefix: /opt/conda/envs/transformers diff --git a/backend/python/common-env/transformers/transformers.yml b/backend/python/common-env/transformers/transformers.yml deleted file mode 100644 index 4cc66b11619..00000000000 --- a/backend/python/common-env/transformers/transformers.yml +++ /dev/null @@ -1,118 +0,0 @@ -name: transformers -channels: - - defaults -dependencies: - - _libgcc_mutex=0.1=main - - _openmp_mutex=5.1=1_gnu - - bzip2=1.0.8=h7b6447c_0 - - ca-certificates=2023.08.22=h06a4308_0 - - ld_impl_linux-64=2.38=h1181459_1 - - libffi=3.4.4=h6a678d5_0 - - libgcc-ng=11.2.0=h1234567_1 - - libgomp=11.2.0=h1234567_1 - - libstdcxx-ng=11.2.0=h1234567_1 - - libuuid=1.41.5=h5eee18b_0 - - ncurses=6.4=h6a678d5_0 - - openssl=3.0.11=h7f8727e_2 - - pip=23.2.1=py311h06a4308_0 - - python=3.11.5=h955ad1f_0 - - readline=8.2=h5eee18b_0 - - setuptools=68.0.0=py311h06a4308_0 - - sqlite=3.41.2=h5eee18b_0 - - tk=8.6.12=h1ccaba5_0 - - wheel=0.41.2=py311h06a4308_0 - - xz=5.4.2=h5eee18b_0 - - zlib=1.2.13=h5eee18b_0 - - pip: - - accelerate==0.27.0 - - aiohttp==3.8.5 - - aiosignal==1.3.1 - - auto-gptq==0.7.1 - - async-timeout==4.0.3 - - attrs==23.1.0 - - bark==0.1.5 - - boto3==1.28.61 - - botocore==1.31.61 - - certifi==2023.7.22 - - coloredlogs==15.0.1 - - TTS==0.22.0 - - charset-normalizer==3.3.0 - - datasets==2.14.5 - - sentence-transformers==2.5.1 # Updated Version - - sentencepiece==0.1.99 - - dill==0.3.7 - - einops==0.7.0 - - encodec==0.1.1 - - filelock==3.12.4 - - frozenlist==1.4.0 - - fsspec==2023.6.0 - - funcy==2.0 - - grpcio==1.63.0 - - huggingface-hub - - humanfriendly==10.0 - - idna==3.4 - - jinja2==3.1.2 - - jmespath==1.0.1 - - markupsafe==2.1.3 - - mpmath==1.3.0 - - multidict==6.0.4 - - multiprocess==0.70.15 - - networkx - - numpy==1.26.0 - - onnx==1.15.0 - - openvino==2024.1.0 - - openvino-telemetry==2024.1.0 - - optimum[openvino]==1.19.1 - - optimum-intel==1.16.1 - - packaging==23.2 - - pandas - - peft==0.5.0 - - protobuf==4.24.4 - - psutil==5.9.5 - - pyarrow==13.0.0 - - python-dateutil==2.8.2 - - pytz==2023.3.post1 - - pyyaml==6.0.1 - - regex==2023.10.3 - - requests==2.31.0 - - rouge==1.0.1 - - s3transfer==0.7.0 - - safetensors>=0.4.1 - - scipy==1.12.0 # Updated Version - - six==1.16.0 - - sympy==1.12 - - tokenizers - - torch==2.1.2 - - torchvision==0.16.2 - - torchaudio==2.1.2 - - tqdm==4.66.1 - - triton==2.1.0 - - typing-extensions==4.8.0 - - tzdata==2023.3 - - urllib3==1.26.17 - - xxhash==3.4.1 - - yarl==1.9.2 - - soundfile - - langid - - wget - - unidecode - - pyopenjtalk-prebuilt - - pypinyin - - inflect - - cn2an - - jieba - - eng_to_ipa - - openai-whisper - - matplotlib - - gradio==3.41.2 - - nltk - - sudachipy - - sudachidict_core - - vocos - - vllm>=0.4.0 - - transformers>=4.38.2 # Updated Version - - transformers_stream_generator==0.0.5 - - xformers==0.0.23.post1 - - rerankers[transformers] - - pydantic -prefix: /opt/conda/envs/transformers diff --git a/backend/python/common/libbackend.sh b/backend/python/common/libbackend.sh new file mode 100644 index 00000000000..b11378bd015 --- /dev/null +++ b/backend/python/common/libbackend.sh @@ -0,0 +1,201 @@ + + +# init handles the setup of the library +# +# use the library by adding the following line to a script: +# source $(dirname $0)/../common/libbackend.sh +# +# If you want to limit what targets a backend can be used on, set the variable LIMIT_TARGETS to a +# space separated list of valid targets BEFORE sourcing the library, for example to only allow a backend +# to be used on CUDA and CPU backends: +# +# LIMIT_TARGETS="cublas cpu" +# source $(dirname $0)/../common/libbackend.sh +# +# You can use any valid BUILD_TYPE or BUILD_PROFILE, if you need to limit a backend to CUDA 12 only: +# +# LIMIT_TARGETS="cublas12" +# source $(dirname $0)/../common/libbackend.sh +# +function init() { + BACKEND_NAME=${PWD##*/} + MY_DIR=$(realpath `dirname $0`) + BUILD_PROFILE=$(getBuildProfile) + + # If a backend has defined a list of valid build profiles... + if [ ! -z "${LIMIT_TARGETS}" ]; then + isValidTarget=$(checkTargets ${LIMIT_TARGETS}) + if [ ${isValidTarget} != true ]; then + echo "${BACKEND_NAME} can only be used on the following targets: ${LIMIT_TARGETS}" + exit 0 + fi + fi + + echo "Initializing libbackend for ${BACKEND_NAME}" +} + +# getBuildProfile will inspect the system to determine which build profile is appropriate: +# returns one of the following: +# - cublas11 +# - cublas12 +# - hipblas +# - intel +function getBuildProfile() { + # First check if we are a cublas build, and if so report the correct build profile + if [ x"${BUILD_TYPE}" == "xcublas" ]; then + if [ ! -z ${CUDA_MAJOR_VERSION} ]; then + # If we have been given a CUDA version, we trust it + echo ${BUILD_TYPE}${CUDA_MAJOR_VERSION} + else + # We don't know what version of cuda we are, so we report ourselves as a generic cublas + echo ${BUILD_TYPE} + fi + return 0 + fi + + # If /opt/intel exists, then we are doing an intel/ARC build + if [ -d "/opt/intel" ]; then + echo "intel" + return 0 + fi + + # If for any other values of BUILD_TYPE, we don't need any special handling/discovery + if [ ! -z ${BUILD_TYPE} ]; then + echo ${BUILD_TYPE} + return 0 + fi + + # If there is no BUILD_TYPE set at all, set a build-profile value of CPU, we aren't building for any GPU targets + echo "cpu" +} + +# ensureVenv makes sure that the venv for the backend both exists, and is activated. +# +# This function is idempotent, so you can call it as many times as you want and it will +# always result in an activated virtual environment +function ensureVenv() { + if [ ! -d "${MY_DIR}/venv" ]; then + uv venv ${MY_DIR}/venv + echo "virtualenv created" + fi + + if [ "x${VIRTUAL_ENV}" != "x${MY_DIR}/venv" ]; then + source ${MY_DIR}/venv/bin/activate + echo "virtualenv activated" + fi + + echo "activated virtualenv has been ensured" +} + +# installRequirements looks for several requirements files and if they exist runs the install for them in order +# +# - requirements-install.txt +# - requirements.txt +# - requirements-${BUILD_TYPE}.txt +# - requirements-${BUILD_PROFILE}.txt +# +# BUILD_PROFILE is a pore specific version of BUILD_TYPE, ex: cuda11 or cuda12 +# it can also include some options that we do not have BUILD_TYPES for, ex: intel +# +# NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index. +# you may want to add the following line to a requirements-intel.txt if you use one: +# +# --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ +# +# If you need to add extra flags into the pip install command you can do so by setting the variable EXTRA_PIP_INSTALL_FLAGS +# before calling installRequirements. For example: +# +# source $(dirname $0)/../common/libbackend.sh +# EXTRA_PIP_INSTALL_FLAGS="--no-build-isolation" +# installRequirements +function installRequirements() { + ensureVenv + + # These are the requirements files we will attempt to install, in order + declare -a requirementFiles=( + "${MY_DIR}/requirements-install.txt" + "${MY_DIR}/requirements.txt" + "${MY_DIR}/requirements-${BUILD_TYPE}.txt" + ) + + if [ "x${BUILD_TYPE}" != "x${BUILD_PROFILE}" ]; then + requirementFiles+=("${MY_DIR}/requirements-${BUILD_PROFILE}.txt") + fi + + for reqFile in ${requirementFiles[@]}; do + if [ -f ${reqFile} ]; then + echo "starting requirements install for ${reqFile}" + uv pip install ${EXTRA_PIP_INSTALL_FLAGS} --requirement ${reqFile} + echo "finished requirements install for ${reqFile}" + fi + done +} + +# startBackend discovers and runs the backend GRPC server +# +# You can specify a specific backend file to execute by setting BACKEND_FILE before calling startBackend. +# example: +# +# source ../common/libbackend.sh +# BACKEND_FILE="${MY_DIR}/source/backend.py" +# startBackend $@ +# +# valid filenames for autodiscovered backend servers are: +# - server.py +# - backend.py +# - ${BACKEND_NAME}.py +function startBackend() { + ensureVenv + + if [ ! -z ${BACKEND_FILE} ]; then + python ${BACKEND_FILE} $@ + elif [ -e "${MY_DIR}/server.py" ]; then + python ${MY_DIR}/server.py $@ + elif [ -e "${MY_DIR}/backend.py" ]; then + python ${MY_DIR}/backend.py $@ + elif [ -e "${MY_DIR}/${BACKEND_NAME}.py" ]; then + python ${MY_DIR}/${BACKEND_NAME}.py $@ + fi +} + +function limitTarget() { + target=$1 + echo $target +} + +function runUnittests() { + ensureVenv + if [ -f "${MY_DIR}/test.py" ]; then + pushd ${MY_DIR} + python -m unittest test.py + popd + else + echo "no tests defined for ${BACKEND_NAME}" + fi +} + +################################################################################## +# Below here are helper functions not intended to be used outside of the library # +################################################################################## + +# checkTargets determines if the current BUILD_TYPE or BUILD_PROFILE is in a list of valid targets +function checkTargets() { + # Collect all provided targets into a variable and... + targets=$@ + # ...convert it into an array + declare -a targets=($targets) + + for target in ${targets[@]}; do + if [ "x${BUILD_TYPE}" == "x${target}" ]; then + echo true + return 0 + fi + if [ "x${BUILD_PROFILE}" == "x${target}" ]; then + echo true + return 0 + fi + done + echo false +} + +init \ No newline at end of file diff --git a/backend/python/common/template/Makefile b/backend/python/common/template/Makefile new file mode 100644 index 00000000000..13a74c5347e --- /dev/null +++ b/backend/python/common/template/Makefile @@ -0,0 +1,17 @@ +.PHONY: template +template: protogen + bash install.sh + +.PHONY: protogen +protogen: backend_pb2_grpc.py backend_pb2.py + +.PHONY: protogen-clean +protogen-clean: + $(RM) backend_pb2_grpc.py backend_pb2.py + +backend_pb2_grpc.py backend_pb2.py: + python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto + +.PHONY: clean +clean: protogen-clean + rm -rf venv __pycache__ \ No newline at end of file diff --git a/backend/python/common/template/backend.py b/backend/python/common/template/backend.py new file mode 100755 index 00000000000..7592d3a5ade --- /dev/null +++ b/backend/python/common/template/backend.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python3 +import grpc +import backend_pb2 +import backend_pb2_grpc diff --git a/backend/python/common/template/install.sh b/backend/python/common/template/install.sh new file mode 100755 index 00000000000..2829b927e3b --- /dev/null +++ b/backend/python/common/template/install.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -e + +source $(dirname $0)/../common/libbackend.sh + +installRequirements diff --git a/backend/python/common/template/requirements.txt b/backend/python/common/template/requirements.txt new file mode 100644 index 00000000000..8859595c3a4 --- /dev/null +++ b/backend/python/common/template/requirements.txt @@ -0,0 +1,2 @@ +grpcio==1.63.0 +protobuf \ No newline at end of file diff --git a/backend/python/common/template/run.sh b/backend/python/common/template/run.sh new file mode 100755 index 00000000000..375c07e5f42 --- /dev/null +++ b/backend/python/common/template/run.sh @@ -0,0 +1,4 @@ +#!/bin/bash +source $(dirname $0)/../common/libbackend.sh + +startBackend $@ \ No newline at end of file diff --git a/backend/python/common/template/test.sh b/backend/python/common/template/test.sh new file mode 100755 index 00000000000..6940b0661df --- /dev/null +++ b/backend/python/common/template/test.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -e + +source $(dirname $0)/../common/libbackend.sh + +runUnittests diff --git a/backend/python/coqui/Makefile b/backend/python/coqui/Makefile index 4196d26d501..26999ef2751 100644 --- a/backend/python/coqui/Makefile +++ b/backend/python/coqui/Makefile @@ -26,4 +26,4 @@ backend_pb2_grpc.py backend_pb2.py: .PHONY: clean clean: protogen-clean - rm -rf venv \ No newline at end of file + rm -rf venv __pycache__ \ No newline at end of file diff --git a/backend/python/coqui/coqui_server.py b/backend/python/coqui/backend.py similarity index 100% rename from backend/python/coqui/coqui_server.py rename to backend/python/coqui/backend.py diff --git a/backend/python/coqui/install.sh b/backend/python/coqui/install.sh index 311203ca602..2829b927e3b 100755 --- a/backend/python/coqui/install.sh +++ b/backend/python/coqui/install.sh @@ -1,34 +1,6 @@ #!/bin/bash -set -ex +set -e -BUILD_ISOLATION_FLAG="" +source $(dirname $0)/../common/libbackend.sh -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -uv venv ${MY_DIR}/venv -source ${MY_DIR}/venv/bin/activate - -if [ -f "requirements-install.txt" ]; then - # If we have a requirements-install.txt, it means that a package does not properly declare it's build time - # dependencies per PEP-517, so we have to set up the proper build environment ourselves, and then install - # the package without build isolation - BUILD_ISOLATION_FLAG="--no-build-isolation" - uv pip install --requirement ${MY_DIR}/requirements-install.txt -fi -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements.txt - -if [ -f "requirements-${BUILD_TYPE}.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements-${BUILD_TYPE}.txt -fi - -if [ -d "/opt/intel" ]; then - # Intel GPU: If the directory exists, we assume we are using the Intel image - # https://github.com/intel/intel-extension-for-pytorch/issues/538 - if [ -f "requirements-intel.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --requirement ${MY_DIR}/requirements-intel.txt - fi -fi - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file +installRequirements diff --git a/backend/python/coqui/run.sh b/backend/python/coqui/run.sh index eb0b31261e3..375c07e5f42 100755 --- a/backend/python/coqui/run.sh +++ b/backend/python/coqui/run.sh @@ -1,10 +1,4 @@ #!/bin/bash +source $(dirname $0)/../common/libbackend.sh -## -## A bash script wrapper that runs the coqui server - -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -source $MY_DIR/venv/bin/activate - -python $MY_DIR/coqui_server.py $@ \ No newline at end of file +startBackend $@ \ No newline at end of file diff --git a/backend/python/coqui/test.py b/backend/python/coqui/test.py index 13bbc8c385c..d1418fa3c80 100644 --- a/backend/python/coqui/test.py +++ b/backend/python/coqui/test.py @@ -18,7 +18,7 @@ def setUp(self): """ This method sets up the gRPC service by starting the server """ - self.service = subprocess.Popen(["python3", "coqui_server.py", "--addr", "localhost:50051"]) + self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) time.sleep(10) def tearDown(self) -> None: diff --git a/backend/python/coqui/test.sh b/backend/python/coqui/test.sh index 4b742b3f25e..6940b0661df 100755 --- a/backend/python/coqui/test.sh +++ b/backend/python/coqui/test.sh @@ -1,16 +1,6 @@ #!/bin/bash -## -## A bash script wrapper that runs python unittests +set -e -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -source $MY_DIR/venv/bin/activate - -if [ -f "${MY_DIR}/test.py" ]; then - pushd ${MY_DIR} - python -m unittest test.py - popd -else - echo "ERROR: No tests defined for backend!" - exit 1 -fi \ No newline at end of file +runUnittests diff --git a/backend/python/diffusers/Makefile b/backend/python/diffusers/Makefile index beec821dd54..66bf03e81d4 100644 --- a/backend/python/diffusers/Makefile +++ b/backend/python/diffusers/Makefile @@ -36,4 +36,4 @@ backend_pb2_grpc.py backend_pb2.py: .PHONY: clean clean: protogen-clean - rm -rf venv \ No newline at end of file + rm -rf venv __pycache__ \ No newline at end of file diff --git a/backend/python/diffusers/backend_diffusers.py b/backend/python/diffusers/backend.py similarity index 100% rename from backend/python/diffusers/backend_diffusers.py rename to backend/python/diffusers/backend.py diff --git a/backend/python/diffusers/install.sh b/backend/python/diffusers/install.sh index 311203ca602..2829b927e3b 100755 --- a/backend/python/diffusers/install.sh +++ b/backend/python/diffusers/install.sh @@ -1,34 +1,6 @@ #!/bin/bash -set -ex +set -e -BUILD_ISOLATION_FLAG="" +source $(dirname $0)/../common/libbackend.sh -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -uv venv ${MY_DIR}/venv -source ${MY_DIR}/venv/bin/activate - -if [ -f "requirements-install.txt" ]; then - # If we have a requirements-install.txt, it means that a package does not properly declare it's build time - # dependencies per PEP-517, so we have to set up the proper build environment ourselves, and then install - # the package without build isolation - BUILD_ISOLATION_FLAG="--no-build-isolation" - uv pip install --requirement ${MY_DIR}/requirements-install.txt -fi -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements.txt - -if [ -f "requirements-${BUILD_TYPE}.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements-${BUILD_TYPE}.txt -fi - -if [ -d "/opt/intel" ]; then - # Intel GPU: If the directory exists, we assume we are using the Intel image - # https://github.com/intel/intel-extension-for-pytorch/issues/538 - if [ -f "requirements-intel.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --requirement ${MY_DIR}/requirements-intel.txt - fi -fi - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file +installRequirements diff --git a/backend/python/diffusers/requirements-intel.txt b/backend/python/diffusers/requirements-intel.txt index 979c5c18082..490c68b0b91 100644 --- a/backend/python/diffusers/requirements-intel.txt +++ b/backend/python/diffusers/requirements-intel.txt @@ -1,3 +1,4 @@ +--index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ intel-extension-for-pytorch torchaudio torchvision \ No newline at end of file diff --git a/backend/python/diffusers/run.sh b/backend/python/diffusers/run.sh index 03d9c500658..375c07e5f42 100755 --- a/backend/python/diffusers/run.sh +++ b/backend/python/diffusers/run.sh @@ -1,10 +1,4 @@ #!/bin/bash +source $(dirname $0)/../common/libbackend.sh -## -## A bash script wrapper that runs the GRPC backend - -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -source $MY_DIR/venv/bin/activate - -python $MY_DIR/backend_diffusers.py $@ \ No newline at end of file +startBackend $@ \ No newline at end of file diff --git a/backend/python/diffusers/test.py b/backend/python/diffusers/test.py index 7d685f7810c..b5e381ba86b 100644 --- a/backend/python/diffusers/test.py +++ b/backend/python/diffusers/test.py @@ -18,7 +18,7 @@ def setUp(self): """ This method sets up the gRPC service by starting the server """ - self.service = subprocess.Popen(["python3", "backend_diffusers.py", "--addr", "localhost:50051"]) + self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) def tearDown(self) -> None: """ diff --git a/backend/python/diffusers/test.sh b/backend/python/diffusers/test.sh index 4b742b3f25e..6940b0661df 100755 --- a/backend/python/diffusers/test.sh +++ b/backend/python/diffusers/test.sh @@ -1,16 +1,6 @@ #!/bin/bash -## -## A bash script wrapper that runs python unittests +set -e -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -source $MY_DIR/venv/bin/activate - -if [ -f "${MY_DIR}/test.py" ]; then - pushd ${MY_DIR} - python -m unittest test.py - popd -else - echo "ERROR: No tests defined for backend!" - exit 1 -fi \ No newline at end of file +runUnittests diff --git a/backend/python/exllama/Makefile b/backend/python/exllama/Makefile index 82001861a4b..e6a678810c3 100644 --- a/backend/python/exllama/Makefile +++ b/backend/python/exllama/Makefile @@ -22,4 +22,4 @@ backend_pb2_grpc.py backend_pb2.py: .PHONY: clean clean: protogen-clean - $(RM) -r venv source \ No newline at end of file + $(RM) -r venv source __pycache__ \ No newline at end of file diff --git a/backend/python/exllama/exllama.py b/backend/python/exllama/backend.py similarity index 97% rename from backend/python/exllama/exllama.py rename to backend/python/exllama/backend.py index c49e627a34d..58d1392c5ee 100755 --- a/backend/python/exllama/exllama.py +++ b/backend/python/exllama/backend.py @@ -14,9 +14,9 @@ import torch.nn.functional as F from torch import version as torch_version -from tokenizer import ExLlamaTokenizer -from generator import ExLlamaGenerator -from model import ExLlama, ExLlamaCache, ExLlamaConfig +from source.tokenizer import ExLlamaTokenizer +from source.generator import ExLlamaGenerator +from source.model import ExLlama, ExLlamaCache, ExLlamaConfig _ONE_DAY_IN_SECONDS = 60 * 60 * 24 diff --git a/backend/python/exllama/exllama.yml b/backend/python/exllama/exllama.yml deleted file mode 100644 index 80f52af5ecf..00000000000 --- a/backend/python/exllama/exllama.yml +++ /dev/null @@ -1,56 +0,0 @@ -name: exllama -channels: - - defaults -dependencies: - - _libgcc_mutex=0.1=main - - _openmp_mutex=5.1=1_gnu - - bzip2=1.0.8=h7b6447c_0 - - ca-certificates=2023.08.22=h06a4308_0 - - ld_impl_linux-64=2.38=h1181459_1 - - libffi=3.4.4=h6a678d5_0 - - libgcc-ng=11.2.0=h1234567_1 - - libgomp=11.2.0=h1234567_1 - - libstdcxx-ng=11.2.0=h1234567_1 - - libuuid=1.41.5=h5eee18b_0 - - ncurses=6.4=h6a678d5_0 - - openssl=3.0.11=h7f8727e_2 - - pip=23.2.1=py311h06a4308_0 - - python=3.11.5=h955ad1f_0 - - readline=8.2=h5eee18b_0 - - setuptools=68.0.0=py311h06a4308_0 - - sqlite=3.41.2=h5eee18b_0 - - tk=8.6.12=h1ccaba5_0 - - tzdata=2023c=h04d1e81_0 - - wheel=0.41.2=py311h06a4308_0 - - xz=5.4.2=h5eee18b_0 - - zlib=1.2.13=h5eee18b_0 - - pip: - - filelock==3.12.4 - - fsspec==2023.9.2 - - grpcio==1.63.0 - - jinja2==3.1.2 - - markupsafe==2.1.3 - - mpmath==1.3.0 - - networkx==3.1 - - ninja==1.11.1 - - protobuf==4.24.4 - - nvidia-cublas-cu12==12.1.3.1 - - nvidia-cuda-cupti-cu12==12.1.105 - - nvidia-cuda-nvrtc-cu12==12.1.105 - - nvidia-cuda-runtime-cu12==12.1.105 - - nvidia-cudnn-cu12==8.9.2.26 - - nvidia-cufft-cu12==11.0.2.54 - - nvidia-curand-cu12==10.3.2.106 - - nvidia-cusolver-cu12==11.4.5.107 - - nvidia-cusparse-cu12==12.1.0.106 - - nvidia-nccl-cu12==2.18.1 - - nvidia-nvjitlink-cu12==12.2.140 - - nvidia-nvtx-cu12==12.1.105 - - safetensors==0.3.2 - - sentencepiece==0.1.99 - - sympy==1.12 - - torch==2.1.0 - - triton==2.1.0 - - typing-extensions==4.8.0 - - numpy -prefix: /opt/conda/envs/exllama diff --git a/backend/python/exllama/install.sh b/backend/python/exllama/install.sh index 10b7519d2b0..d33c435600d 100755 --- a/backend/python/exllama/install.sh +++ b/backend/python/exllama/install.sh @@ -1,37 +1,13 @@ #!/bin/bash -set -ex +set -e -BUILD_ISOLATION_FLAG="" +LIMIT_TARGETS="cublas" -if [ "$BUILD_TYPE" != "cublas" ]; then - echo "[exllama] Attention!!! Nvidia GPU is required - skipping installation" - exit 0 -fi +source $(dirname $0)/../common/libbackend.sh -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -uv venv ${MY_DIR}/venv -source ${MY_DIR}/venv/bin/activate - -if [ -f "requirements-install.txt" ]; then - # If we have a requirements-install.txt, it means that a package does not properly declare it's build time - # dependencies per PEP-517, so we have to set up the proper build environment ourselves, and then install - # the package without build isolation - BUILD_ISOLATION_FLAG="--no-build-isolation" - uv pip install --requirement ${MY_DIR}/requirements-install.txt -fi - -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements.txt - -if [ -f "requirements-${BUILD_TYPE}.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements-${BUILD_TYPE}.txt -fi +installRequirements git clone https://github.com/turboderp/exllama $MY_DIR/source uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/source/requirements.txt -cp -rfv ./*py $MY_DIR/source/ - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file +cp -v ./*py $MY_DIR/source/ diff --git a/backend/python/exllama/run.sh b/backend/python/exllama/run.sh index 700247707dd..d6c36080cab 100755 --- a/backend/python/exllama/run.sh +++ b/backend/python/exllama/run.sh @@ -1,10 +1,7 @@ #!/bin/bash +LIMIT_TARGETS="cublas" +BACKEND_FILE="./source/backend.py" -## -## A bash script wrapper that runs the exllama server with uv +source $(dirname $0)/../common/libbackend.sh -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -source $MY_DIR/venv/bin/activate - -python $MY_DIR/source/exllama.py $@ \ No newline at end of file +startBackend $@ \ No newline at end of file diff --git a/backend/python/exllama/test.sh b/backend/python/exllama/test.sh index 4b742b3f25e..6940b0661df 100755 --- a/backend/python/exllama/test.sh +++ b/backend/python/exllama/test.sh @@ -1,16 +1,6 @@ #!/bin/bash -## -## A bash script wrapper that runs python unittests +set -e -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -source $MY_DIR/venv/bin/activate - -if [ -f "${MY_DIR}/test.py" ]; then - pushd ${MY_DIR} - python -m unittest test.py - popd -else - echo "ERROR: No tests defined for backend!" - exit 1 -fi \ No newline at end of file +runUnittests diff --git a/backend/python/exllama2/Makefile b/backend/python/exllama2/Makefile index 09db200ddac..cc3e0f0d75d 100644 --- a/backend/python/exllama2/Makefile +++ b/backend/python/exllama2/Makefile @@ -20,4 +20,4 @@ backend_pb2_grpc.py backend_pb2.py: .PHONY: clean clean: protogen-clean - $(RM) -r venv source \ No newline at end of file + $(RM) -r venv source __pycache__ \ No newline at end of file diff --git a/backend/python/exllama2/exllama2_backend.py b/backend/python/exllama2/backend.py similarity index 100% rename from backend/python/exllama2/exllama2_backend.py rename to backend/python/exllama2/backend.py diff --git a/backend/python/exllama2/install.sh b/backend/python/exllama2/install.sh index 3bb030fb639..1a27182d322 100755 --- a/backend/python/exllama2/install.sh +++ b/backend/python/exllama2/install.sh @@ -1,44 +1,16 @@ #!/bin/bash set -e -## -## A bash script installs the required dependencies of VALL-E-X and prepares the environment -EXLLAMA2_VERSION=c0ddebaaaf8ffd1b3529c2bb654e650bce2f790f - -BUILD_ISOLATION_FLAG="" - -if [ "$BUILD_TYPE" != "cublas" ]; then - echo "[exllama] Attention!!! Nvidia GPU is required - skipping installation" - exit 0 -fi - -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" -uv venv ${MY_DIR}/venv -source ${MY_DIR}/venv/bin/activate - -if [ -f "requirements-install.txt" ]; then - # If we have a requirements-install.txt, it means that a package does not properly declare it's build time - # dependencies per PEP-517, so we have to set up the proper build environment ourselves, and then install - # the package without build isolation - BUILD_ISOLATION_FLAG="--no-build-isolation" - uv pip install --requirement ${MY_DIR}/requirements-install.txt -fi +LIMIT_TARGETS="cublas" +EXTRA_PIP_INSTALL_FLAGS="--no-build-isolation" +EXLLAMA2_VERSION=c0ddebaaaf8ffd1b3529c2bb654e650bce2f790f -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements.txt +source $(dirname $0)/../common/libbackend.sh -if [ -f "requirements-${BUILD_TYPE}.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements-${BUILD_TYPE}.txt -fi +installRequirements git clone https://github.com/turboderp/exllamav2 $MY_DIR/source pushd ${MY_DIR}/source && git checkout -b build ${EXLLAMA2_VERSION} && popd -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/source/requirements.txt # This installs exllamav2 in JIT mode so it will compile the appropriate torch extension at runtime -EXLLAMA_NOCOMPILE= uv pip install ${BUILD_ISOLATION_FLAG} ${MY_DIR}/source/ - -cp -rfv ./*py $MY_DIR/source/ - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file +EXLLAMA_NOCOMPILE= uv pip install ${EXTRA_PIP_INSTALL_FLAGS} ${MY_DIR}/source/ diff --git a/backend/python/exllama2/run.sh b/backend/python/exllama2/run.sh index cb390a31fcd..1afc39848c7 100755 --- a/backend/python/exllama2/run.sh +++ b/backend/python/exllama2/run.sh @@ -1,10 +1,6 @@ #!/bin/bash +LIMIT_TARGETS="cublas" -## -## A bash script wrapper that runs the exllama2 server +source $(dirname $0)/../common/libbackend.sh -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -source $MY_DIR/venv/bin/activate - -python $MY_DIR/source/exllama2_backend.py $@ \ No newline at end of file +startBackend $@ \ No newline at end of file diff --git a/backend/python/exllama2/test.sh b/backend/python/exllama2/test.sh index 4b742b3f25e..6940b0661df 100755 --- a/backend/python/exllama2/test.sh +++ b/backend/python/exllama2/test.sh @@ -1,16 +1,6 @@ #!/bin/bash -## -## A bash script wrapper that runs python unittests +set -e -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -source $MY_DIR/venv/bin/activate - -if [ -f "${MY_DIR}/test.py" ]; then - pushd ${MY_DIR} - python -m unittest test.py - popd -else - echo "ERROR: No tests defined for backend!" - exit 1 -fi \ No newline at end of file +runUnittests diff --git a/backend/python/mamba/Makefile b/backend/python/mamba/Makefile index 47c423692a2..52b1c53a4d2 100644 --- a/backend/python/mamba/Makefile +++ b/backend/python/mamba/Makefile @@ -26,4 +26,4 @@ backend_pb2_grpc.py backend_pb2.py: .PHONY: clean clean: protogen-clean - $(RM) -r venv \ No newline at end of file + $(RM) -r venv __pycache__ \ No newline at end of file diff --git a/backend/python/mamba/backend_mamba.py b/backend/python/mamba/backend.py similarity index 100% rename from backend/python/mamba/backend_mamba.py rename to backend/python/mamba/backend.py diff --git a/backend/python/mamba/install.sh b/backend/python/mamba/install.sh index 84612960aa8..db18eefc9ac 100755 --- a/backend/python/mamba/install.sh +++ b/backend/python/mamba/install.sh @@ -1,39 +1,9 @@ #!/bin/bash -set -ex +set -e -if [ "$BUILD_TYPE" != "cublas" ]; then - echo "[mamba] Attention!!! nvcc is required - skipping installation" - exit 0 -fi +LIMIT_TARGETS="cublas" +EXTRA_PIP_INSTALL_FLAGS="--no-build-isolation" -BUILD_ISOLATION_FLAG="" +source $(dirname $0)/../common/libbackend.sh -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -uv venv ${MY_DIR}/venv -source ${MY_DIR}/venv/bin/activate - -if [ -f "requirements-install.txt" ]; then - # If we have a requirements-install.txt, it means that a package does not properly declare it's build time - # dependencies per PEP-517, so we have to set up the proper build environment ourselves, and then install - # the package without build isolation - BUILD_ISOLATION_FLAG="--no-build-isolation" - uv pip install --requirement ${MY_DIR}/requirements-install.txt -fi -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements.txt - -if [ -f "requirements-${BUILD_TYPE}.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements-${BUILD_TYPE}.txt -fi - -if [ -d "/opt/intel" ]; then - # Intel GPU: If the directory exists, we assume we are using the Intel image - # https://github.com/intel/intel-extension-for-pytorch/issues/538 - if [ -f "requirements-intel.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --requirement ${MY_DIR}/requirements-intel.txt - fi -fi - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file +installRequirements \ No newline at end of file diff --git a/backend/python/mamba/run.sh b/backend/python/mamba/run.sh index 54d19970311..1afc39848c7 100755 --- a/backend/python/mamba/run.sh +++ b/backend/python/mamba/run.sh @@ -1,10 +1,6 @@ #!/bin/bash +LIMIT_TARGETS="cublas" -## -## A bash script wrapper that runs the GRPC server +source $(dirname $0)/../common/libbackend.sh -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -source $MY_DIR/venv/bin/activate - -python $MY_DIR/backend_mamba.py $@ \ No newline at end of file +startBackend $@ \ No newline at end of file diff --git a/backend/python/mamba/test.py b/backend/python/mamba/test.py index 92dde016764..83fb26518e7 100644 --- a/backend/python/mamba/test.py +++ b/backend/python/mamba/test.py @@ -20,7 +20,7 @@ class TestBackendServicer(unittest.TestCase): This class contains methods to test the startup and shutdown of the gRPC service. """ def setUp(self): - self.service = subprocess.Popen(["python", "backend_mamba.py", "--addr", "localhost:50051"]) + self.service = subprocess.Popen(["python", "backend.py", "--addr", "localhost:50051"]) time.sleep(10) def tearDown(self) -> None: diff --git a/backend/python/mamba/test.sh b/backend/python/mamba/test.sh index 4b742b3f25e..6940b0661df 100755 --- a/backend/python/mamba/test.sh +++ b/backend/python/mamba/test.sh @@ -1,16 +1,6 @@ #!/bin/bash -## -## A bash script wrapper that runs python unittests +set -e -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -source $MY_DIR/venv/bin/activate - -if [ -f "${MY_DIR}/test.py" ]; then - pushd ${MY_DIR} - python -m unittest test.py - popd -else - echo "ERROR: No tests defined for backend!" - exit 1 -fi \ No newline at end of file +runUnittests diff --git a/backend/python/parler-tts/Makefile b/backend/python/parler-tts/Makefile index 590401f35b7..c25b2af79a7 100644 --- a/backend/python/parler-tts/Makefile +++ b/backend/python/parler-tts/Makefile @@ -40,4 +40,4 @@ backend_pb2_grpc.py backend_pb2.py: .PHONY: clean clean: protogen-clean - $(RM) -r venv \ No newline at end of file + $(RM) -r venv __pycache__ \ No newline at end of file diff --git a/backend/python/parler-tts/parler_tts_server.py b/backend/python/parler-tts/backend.py similarity index 100% rename from backend/python/parler-tts/parler_tts_server.py rename to backend/python/parler-tts/backend.py diff --git a/backend/python/parler-tts/install.sh b/backend/python/parler-tts/install.sh index 2d60fc355f6..967e97d3966 100755 --- a/backend/python/parler-tts/install.sh +++ b/backend/python/parler-tts/install.sh @@ -1,39 +1,13 @@ #!/bin/bash -set -ex +set -e -BUILD_ISOLATION_FLAG="" +LIMIT_TARGETS="cublas" -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -uv venv ${MY_DIR}/venv -source ${MY_DIR}/venv/bin/activate - -if [ -f "requirements-install.txt" ]; then - # If we have a requirements-install.txt, it means that a package does not properly declare it's build time - # dependencies per PEP-517, so we have to set up the proper build environment ourselves, and then install - # the package without build isolation - BUILD_ISOLATION_FLAG="--no-build-isolation" - uv pip install --requirement ${MY_DIR}/requirements-install.txt -fi -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements.txt - -if [ -f "requirements-${BUILD_TYPE}.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements-${BUILD_TYPE}.txt -fi - -if [ -d "/opt/intel" ]; then - # Intel GPU: If the directory exists, we assume we are using the Intel image - # https://github.com/intel/intel-extension-for-pytorch/issues/538 - if [ -f "requirements-intel.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --requirement ${MY_DIR}/requirements-intel.txt - fi -fi +installRequirements # https://github.com/descriptinc/audiotools/issues/101 # incompatible protobuf versions. -PYDIR=$(ls $MY_DIR/venv/lib) -curl -L https://raw.githubusercontent.com/protocolbuffers/protobuf/main/python/google/protobuf/internal/builder.py -o $MY_DIR/venv/lib/$PYDIR/site-packages/google/protobuf/internal/builder.py - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file +PYDIR=$(ls ${MY_DIR}/venv/lib) +curl -L https://raw.githubusercontent.com/protocolbuffers/protobuf/main/python/google/protobuf/internal/builder.py -o ${MY_DIR}/venv/lib/${PYDIR}/site-packages/google/protobuf/internal/builder.py diff --git a/backend/python/parler-tts/parler-nvidia.yml b/backend/python/parler-tts/parler-nvidia.yml deleted file mode 100644 index 28ffd14c855..00000000000 --- a/backend/python/parler-tts/parler-nvidia.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: parler -channels: - - defaults -dependencies: - - _libgcc_mutex=0.1=main - - _openmp_mutex=5.1=1_gnu - - bzip2=1.0.8=h7b6447c_0 - - ca-certificates=2023.08.22=h06a4308_0 - - ld_impl_linux-64=2.38=h1181459_1 - - libffi=3.4.4=h6a678d5_0 - - libgcc-ng=11.2.0=h1234567_1 - - libgomp=11.2.0=h1234567_1 - - libstdcxx-ng=11.2.0=h1234567_1 - - libuuid=1.41.5=h5eee18b_0 - - ncurses=6.4=h6a678d5_0 - - openssl=3.0.11=h7f8727e_2 - - pip=23.2.1=py311h06a4308_0 - - python=3.11.5=h955ad1f_0 - - readline=8.2=h5eee18b_0 - - setuptools=68.0.0=py311h06a4308_0 - - sqlite=3.41.2=h5eee18b_0 - - tk=8.6.12=h1ccaba5_0 - - tzdata=2023c=h04d1e81_0 - - wheel=0.41.2=py311h06a4308_0 - - xz=5.4.2=h5eee18b_0 - - zlib=1.2.13=h5eee18b_0 - - pip: - - accelerate>=0.11.0 - - grpcio==1.63.0 - - numpy==1.26.0 - - nvidia-cublas-cu12==12.1.3.1 - - nvidia-cuda-cupti-cu12==12.1.105 - - nvidia-cuda-nvrtc-cu12==12.1.105 - - nvidia-cuda-runtime-cu12==12.1.105 - - nvidia-cudnn-cu12==8.9.2.26 - - nvidia-cufft-cu12==11.0.2.54 - - nvidia-curand-cu12==10.3.2.106 - - nvidia-cusolver-cu12==11.4.5.107 - - nvidia-cusparse-cu12==12.1.0.106 - - nvidia-nccl-cu12==2.18.1 - - nvidia-nvjitlink-cu12==12.2.140 - - nvidia-nvtx-cu12==12.1.105 - - torch==2.1.0 - - transformers>=4.34.0 - - descript-audio-codec - - sentencepiece - - git+https://github.com/huggingface/parler-tts.git@10016fb0300c0dc31a0fb70e26f3affee7b62f16 -prefix: /opt/conda/envs/diffusers diff --git a/backend/python/parler-tts/parler.yml b/backend/python/parler-tts/parler.yml deleted file mode 100644 index a3028fe13e3..00000000000 --- a/backend/python/parler-tts/parler.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: parler -channels: - - defaults -dependencies: - - _libgcc_mutex=0.1=main - - _openmp_mutex=5.1=1_gnu - - bzip2=1.0.8=h7b6447c_0 - - ca-certificates=2023.08.22=h06a4308_0 - - ld_impl_linux-64=2.38=h1181459_1 - - libffi=3.4.4=h6a678d5_0 - - libgcc-ng=11.2.0=h1234567_1 - - libgomp=11.2.0=h1234567_1 - - libstdcxx-ng=11.2.0=h1234567_1 - - libuuid=1.41.5=h5eee18b_0 - - ncurses=6.4=h6a678d5_0 - - openssl=3.0.11=h7f8727e_2 - - pip=23.2.1=py311h06a4308_0 - - python=3.11.5=h955ad1f_0 - - readline=8.2=h5eee18b_0 - - setuptools=68.0.0=py311h06a4308_0 - - sqlite=3.41.2=h5eee18b_0 - - tk=8.6.12=h1ccaba5_0 - - tzdata=2023c=h04d1e81_0 - - wheel=0.41.2=py311h06a4308_0 - - xz=5.4.2=h5eee18b_0 - - zlib=1.2.13=h5eee18b_0 - - pip: - - accelerate>=0.11.0 - - numpy==1.26.0 - - grpcio==1.63.0 - - torch==2.1.0 - - transformers>=4.34.0 - - descript-audio-codec - - sentencepiece - - git+https://github.com/huggingface/parler-tts.git@10016fb0300c0dc31a0fb70e26f3affee7b62f16 -prefix: /opt/conda/envs/parler diff --git a/backend/python/parler-tts/run.sh b/backend/python/parler-tts/run.sh index 76425379181..375c07e5f42 100755 --- a/backend/python/parler-tts/run.sh +++ b/backend/python/parler-tts/run.sh @@ -1,10 +1,4 @@ #!/bin/bash +source $(dirname $0)/../common/libbackend.sh -## -## A bash script wrapper that runs the GRPC backend - -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -source $MY_DIR/venv/bin/activate - -python $MY_DIR/parler_tts_server.py $@ \ No newline at end of file +startBackend $@ \ No newline at end of file diff --git a/backend/python/parler-tts/test.py b/backend/python/parler-tts/test.py index ce9b66acaa9..639d43a9101 100644 --- a/backend/python/parler-tts/test.py +++ b/backend/python/parler-tts/test.py @@ -18,7 +18,7 @@ def setUp(self): """ This method sets up the gRPC service by starting the server """ - self.service = subprocess.Popen(["python3", "parler_tts_server.py", "--addr", "localhost:50051"]) + self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) time.sleep(10) def tearDown(self) -> None: diff --git a/backend/python/parler-tts/test.sh b/backend/python/parler-tts/test.sh index 4b742b3f25e..6940b0661df 100755 --- a/backend/python/parler-tts/test.sh +++ b/backend/python/parler-tts/test.sh @@ -1,16 +1,6 @@ #!/bin/bash -## -## A bash script wrapper that runs python unittests +set -e -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -source $MY_DIR/venv/bin/activate - -if [ -f "${MY_DIR}/test.py" ]; then - pushd ${MY_DIR} - python -m unittest test.py - popd -else - echo "ERROR: No tests defined for backend!" - exit 1 -fi \ No newline at end of file +runUnittests diff --git a/backend/python/petals/Makefile b/backend/python/petals/Makefile index af32e8453d6..81b06c2984f 100644 --- a/backend/python/petals/Makefile +++ b/backend/python/petals/Makefile @@ -28,4 +28,4 @@ backend_pb2_grpc.py backend_pb2.py: .PHONY: clean clean: protogen-clean - rm -rf venv \ No newline at end of file + rm -rf venv __pycache__ \ No newline at end of file diff --git a/backend/python/petals/backend_petals.py b/backend/python/petals/backend.py similarity index 100% rename from backend/python/petals/backend_petals.py rename to backend/python/petals/backend.py diff --git a/backend/python/petals/install.sh b/backend/python/petals/install.sh index 311203ca602..2829b927e3b 100755 --- a/backend/python/petals/install.sh +++ b/backend/python/petals/install.sh @@ -1,34 +1,6 @@ #!/bin/bash -set -ex +set -e -BUILD_ISOLATION_FLAG="" +source $(dirname $0)/../common/libbackend.sh -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -uv venv ${MY_DIR}/venv -source ${MY_DIR}/venv/bin/activate - -if [ -f "requirements-install.txt" ]; then - # If we have a requirements-install.txt, it means that a package does not properly declare it's build time - # dependencies per PEP-517, so we have to set up the proper build environment ourselves, and then install - # the package without build isolation - BUILD_ISOLATION_FLAG="--no-build-isolation" - uv pip install --requirement ${MY_DIR}/requirements-install.txt -fi -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements.txt - -if [ -f "requirements-${BUILD_TYPE}.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements-${BUILD_TYPE}.txt -fi - -if [ -d "/opt/intel" ]; then - # Intel GPU: If the directory exists, we assume we are using the Intel image - # https://github.com/intel/intel-extension-for-pytorch/issues/538 - if [ -f "requirements-intel.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --requirement ${MY_DIR}/requirements-intel.txt - fi -fi - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file +installRequirements diff --git a/backend/python/petals/petals.yml b/backend/python/petals/petals.yml deleted file mode 100644 index 35eea15ad5d..00000000000 --- a/backend/python/petals/petals.yml +++ /dev/null @@ -1,30 +0,0 @@ -name: petals -channels: - - defaults -dependencies: - # - _libgcc_mutex=0.1=main - # - _openmp_mutex=5.1=1_gnu - # - bzip2=1.0.8=h7b6447c_0 - # - ca-certificates=2023.08.22=h06a4308_0 - # - ld_impl_linux-64=2.38=h1181459_1 - # - libffi=3.4.4=h6a678d5_0 - # - libgcc-ng=11.2.0=h1234567_1 - # - libgomp=11.2.0=h1234567_1 - # - libstdcxx-ng=11.2.0=h1234567_1 - # - libuuid=1.41.5=h5eee18b_0 - # - ncurses=6.4=h6a678d5_0 - # - openssl=3.0.11=h7f8727e_2 - # - pip=23.2.1=py311h06a4308_0 - - python=3.11.5=h955ad1f_0 - # - readline=8.2=h5eee18b_0 - # - setuptools=68.0.0=py311h06a4308_0 - # - sqlite=3.41.2=h5eee18b_0 - # - tk=8.6.12=h1ccaba5_0 - # - tzdata=2023c=h04d1e81_0 - # - wheel=0.41.2=py311h06a4308_0 - # - xz=5.4.2=h5eee18b_0 - # - zlib=1.2.13=h5eee18b_0 - - pip: - - torch==2.1.0 - - git+https://github.com/bigscience-workshop/petals -prefix: /opt/conda/envs/petals diff --git a/backend/python/petals/run.sh b/backend/python/petals/run.sh index 87bf1fa91fd..375c07e5f42 100755 --- a/backend/python/petals/run.sh +++ b/backend/python/petals/run.sh @@ -1,10 +1,4 @@ #!/bin/bash +source $(dirname $0)/../common/libbackend.sh -## -## A bash script wrapper that runs the GRPC backend - -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -source $MY_DIR/venv/bin/activate - -python $MY_DIR/backend_petals.py $@ \ No newline at end of file +startBackend $@ \ No newline at end of file diff --git a/backend/python/petals/test.py b/backend/python/petals/test.py index cd74a7019f7..586d24437e1 100644 --- a/backend/python/petals/test.py +++ b/backend/python/petals/test.py @@ -20,7 +20,7 @@ class TestBackendServicer(unittest.TestCase): This class contains methods to test the startup and shutdown of the gRPC service. """ def setUp(self): - self.service = subprocess.Popen(["python", "backend_petals.py", "--addr", "localhost:50051"]) + self.service = subprocess.Popen(["python", "backend.py", "--addr", "localhost:50051"]) time.sleep(10) def tearDown(self) -> None: diff --git a/backend/python/petals/test.sh b/backend/python/petals/test.sh index 4b742b3f25e..6940b0661df 100755 --- a/backend/python/petals/test.sh +++ b/backend/python/petals/test.sh @@ -1,16 +1,6 @@ #!/bin/bash -## -## A bash script wrapper that runs python unittests +set -e -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -source $MY_DIR/venv/bin/activate - -if [ -f "${MY_DIR}/test.py" ]; then - pushd ${MY_DIR} - python -m unittest test.py - popd -else - echo "ERROR: No tests defined for backend!" - exit 1 -fi \ No newline at end of file +runUnittests diff --git a/backend/python/rerankers/Makefile b/backend/python/rerankers/Makefile index 0c812047e3f..6863ff03ddc 100644 --- a/backend/python/rerankers/Makefile +++ b/backend/python/rerankers/Makefile @@ -27,4 +27,4 @@ backend_pb2_grpc.py backend_pb2.py: .PHONY: clean clean: protogen-clean - rm -rf venv \ No newline at end of file + rm -rf venv __pycache__ \ No newline at end of file diff --git a/backend/python/rerankers/reranker.py b/backend/python/rerankers/backend.py similarity index 100% rename from backend/python/rerankers/reranker.py rename to backend/python/rerankers/backend.py diff --git a/backend/python/rerankers/install.sh b/backend/python/rerankers/install.sh index 311203ca602..2829b927e3b 100755 --- a/backend/python/rerankers/install.sh +++ b/backend/python/rerankers/install.sh @@ -1,34 +1,6 @@ #!/bin/bash -set -ex +set -e -BUILD_ISOLATION_FLAG="" +source $(dirname $0)/../common/libbackend.sh -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -uv venv ${MY_DIR}/venv -source ${MY_DIR}/venv/bin/activate - -if [ -f "requirements-install.txt" ]; then - # If we have a requirements-install.txt, it means that a package does not properly declare it's build time - # dependencies per PEP-517, so we have to set up the proper build environment ourselves, and then install - # the package without build isolation - BUILD_ISOLATION_FLAG="--no-build-isolation" - uv pip install --requirement ${MY_DIR}/requirements-install.txt -fi -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements.txt - -if [ -f "requirements-${BUILD_TYPE}.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements-${BUILD_TYPE}.txt -fi - -if [ -d "/opt/intel" ]; then - # Intel GPU: If the directory exists, we assume we are using the Intel image - # https://github.com/intel/intel-extension-for-pytorch/issues/538 - if [ -f "requirements-intel.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --requirement ${MY_DIR}/requirements-intel.txt - fi -fi - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file +installRequirements diff --git a/backend/python/rerankers/run.sh b/backend/python/rerankers/run.sh index b7f6384c2a8..375c07e5f42 100755 --- a/backend/python/rerankers/run.sh +++ b/backend/python/rerankers/run.sh @@ -1,10 +1,4 @@ #!/bin/bash +source $(dirname $0)/../common/libbackend.sh -## -## A bash script wrapper that runs the GRPC backend - -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -source $MY_DIR/venv/bin/activate - -python $MY_DIR/reranker.py $@ \ No newline at end of file +startBackend $@ \ No newline at end of file diff --git a/backend/python/rerankers/test.py b/backend/python/rerankers/test.py index c1cf3d70a19..d3e4e075b8b 100755 --- a/backend/python/rerankers/test.py +++ b/backend/python/rerankers/test.py @@ -18,7 +18,7 @@ def setUp(self): """ This method sets up the gRPC service by starting the server """ - self.service = subprocess.Popen(["python3", "reranker.py", "--addr", "localhost:50051"]) + self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) time.sleep(10) def tearDown(self) -> None: diff --git a/backend/python/rerankers/test.sh b/backend/python/rerankers/test.sh index 4b742b3f25e..6940b0661df 100755 --- a/backend/python/rerankers/test.sh +++ b/backend/python/rerankers/test.sh @@ -1,16 +1,6 @@ #!/bin/bash -## -## A bash script wrapper that runs python unittests +set -e -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -source $MY_DIR/venv/bin/activate - -if [ -f "${MY_DIR}/test.py" ]; then - pushd ${MY_DIR} - python -m unittest test.py - popd -else - echo "ERROR: No tests defined for backend!" - exit 1 -fi \ No newline at end of file +runUnittests diff --git a/backend/python/sentencetransformers/Makefile b/backend/python/sentencetransformers/Makefile index 5fa6acd73f9..8b18e94338b 100644 --- a/backend/python/sentencetransformers/Makefile +++ b/backend/python/sentencetransformers/Makefile @@ -28,4 +28,4 @@ backend_pb2_grpc.py backend_pb2.py: .PHONY: clean clean: protogen-clean - rm -rf venv \ No newline at end of file + rm -rf venv __pycache__ \ No newline at end of file diff --git a/backend/python/sentencetransformers/sentencetransformers.py b/backend/python/sentencetransformers/backend.py similarity index 100% rename from backend/python/sentencetransformers/sentencetransformers.py rename to backend/python/sentencetransformers/backend.py diff --git a/backend/python/sentencetransformers/install.sh b/backend/python/sentencetransformers/install.sh index 311203ca602..2829b927e3b 100755 --- a/backend/python/sentencetransformers/install.sh +++ b/backend/python/sentencetransformers/install.sh @@ -1,34 +1,6 @@ #!/bin/bash -set -ex +set -e -BUILD_ISOLATION_FLAG="" +source $(dirname $0)/../common/libbackend.sh -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -uv venv ${MY_DIR}/venv -source ${MY_DIR}/venv/bin/activate - -if [ -f "requirements-install.txt" ]; then - # If we have a requirements-install.txt, it means that a package does not properly declare it's build time - # dependencies per PEP-517, so we have to set up the proper build environment ourselves, and then install - # the package without build isolation - BUILD_ISOLATION_FLAG="--no-build-isolation" - uv pip install --requirement ${MY_DIR}/requirements-install.txt -fi -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements.txt - -if [ -f "requirements-${BUILD_TYPE}.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements-${BUILD_TYPE}.txt -fi - -if [ -d "/opt/intel" ]; then - # Intel GPU: If the directory exists, we assume we are using the Intel image - # https://github.com/intel/intel-extension-for-pytorch/issues/538 - if [ -f "requirements-intel.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --requirement ${MY_DIR}/requirements-intel.txt - fi -fi - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file +installRequirements diff --git a/backend/python/sentencetransformers/run.sh b/backend/python/sentencetransformers/run.sh index c2b04d8dea7..375c07e5f42 100755 --- a/backend/python/sentencetransformers/run.sh +++ b/backend/python/sentencetransformers/run.sh @@ -1,10 +1,4 @@ #!/bin/bash +source $(dirname $0)/../common/libbackend.sh -## -## A bash script wrapper that runs the GRPC backend - -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -source $MY_DIR/venv/bin/activate - -python $MY_DIR/sentencetransformers.py $@ \ No newline at end of file +startBackend $@ \ No newline at end of file diff --git a/backend/python/sentencetransformers/test.py b/backend/python/sentencetransformers/test.py index 45ba1e5cf10..9df52b141db 100644 --- a/backend/python/sentencetransformers/test.py +++ b/backend/python/sentencetransformers/test.py @@ -18,7 +18,7 @@ def setUp(self): """ This method sets up the gRPC service by starting the server """ - self.service = subprocess.Popen(["python3", "sentencetransformers.py", "--addr", "localhost:50051"]) + self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) time.sleep(10) def tearDown(self) -> None: diff --git a/backend/python/sentencetransformers/test.sh b/backend/python/sentencetransformers/test.sh index 4b742b3f25e..6940b0661df 100755 --- a/backend/python/sentencetransformers/test.sh +++ b/backend/python/sentencetransformers/test.sh @@ -1,16 +1,6 @@ #!/bin/bash -## -## A bash script wrapper that runs python unittests +set -e -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -source $MY_DIR/venv/bin/activate - -if [ -f "${MY_DIR}/test.py" ]; then - pushd ${MY_DIR} - python -m unittest test.py - popd -else - echo "ERROR: No tests defined for backend!" - exit 1 -fi \ No newline at end of file +runUnittests diff --git a/backend/python/transformers-musicgen/Makefile b/backend/python/transformers-musicgen/Makefile index d45b4cb4f83..06badf6d1d5 100644 --- a/backend/python/transformers-musicgen/Makefile +++ b/backend/python/transformers-musicgen/Makefile @@ -26,4 +26,4 @@ backend_pb2_grpc.py backend_pb2.py: .PHONY: clean clean: protogen-clean - rm -rf venv \ No newline at end of file + rm -rf venv __pycache__ \ No newline at end of file diff --git a/backend/python/transformers-musicgen/transformers_server.py b/backend/python/transformers-musicgen/backend.py similarity index 100% rename from backend/python/transformers-musicgen/transformers_server.py rename to backend/python/transformers-musicgen/backend.py diff --git a/backend/python/transformers-musicgen/install.sh b/backend/python/transformers-musicgen/install.sh index 311203ca602..2829b927e3b 100755 --- a/backend/python/transformers-musicgen/install.sh +++ b/backend/python/transformers-musicgen/install.sh @@ -1,34 +1,6 @@ #!/bin/bash -set -ex +set -e -BUILD_ISOLATION_FLAG="" +source $(dirname $0)/../common/libbackend.sh -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -uv venv ${MY_DIR}/venv -source ${MY_DIR}/venv/bin/activate - -if [ -f "requirements-install.txt" ]; then - # If we have a requirements-install.txt, it means that a package does not properly declare it's build time - # dependencies per PEP-517, so we have to set up the proper build environment ourselves, and then install - # the package without build isolation - BUILD_ISOLATION_FLAG="--no-build-isolation" - uv pip install --requirement ${MY_DIR}/requirements-install.txt -fi -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements.txt - -if [ -f "requirements-${BUILD_TYPE}.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements-${BUILD_TYPE}.txt -fi - -if [ -d "/opt/intel" ]; then - # Intel GPU: If the directory exists, we assume we are using the Intel image - # https://github.com/intel/intel-extension-for-pytorch/issues/538 - if [ -f "requirements-intel.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --requirement ${MY_DIR}/requirements-intel.txt - fi -fi - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file +installRequirements diff --git a/backend/python/transformers-musicgen/run.sh b/backend/python/transformers-musicgen/run.sh index 2087d6c89d2..375c07e5f42 100755 --- a/backend/python/transformers-musicgen/run.sh +++ b/backend/python/transformers-musicgen/run.sh @@ -1,10 +1,4 @@ #!/bin/bash +source $(dirname $0)/../common/libbackend.sh -## -## A bash script wrapper that runs the GRPC backend - -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -source $MY_DIR/venv/bin/activate - -python $MY_DIR/transformers_server.py $@ \ No newline at end of file +startBackend $@ \ No newline at end of file diff --git a/backend/python/transformers-musicgen/test.py b/backend/python/transformers-musicgen/test.py index 46daafbe9eb..777b399abb5 100644 --- a/backend/python/transformers-musicgen/test.py +++ b/backend/python/transformers-musicgen/test.py @@ -18,7 +18,7 @@ def setUp(self): """ This method sets up the gRPC service by starting the server """ - self.service = subprocess.Popen(["python3", "transformers_server.py", "--addr", "localhost:50051"]) + self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) time.sleep(10) def tearDown(self) -> None: diff --git a/backend/python/transformers-musicgen/test.sh b/backend/python/transformers-musicgen/test.sh index 4b742b3f25e..6940b0661df 100755 --- a/backend/python/transformers-musicgen/test.sh +++ b/backend/python/transformers-musicgen/test.sh @@ -1,16 +1,6 @@ #!/bin/bash -## -## A bash script wrapper that runs python unittests +set -e -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -source $MY_DIR/venv/bin/activate - -if [ -f "${MY_DIR}/test.py" ]; then - pushd ${MY_DIR} - python -m unittest test.py - popd -else - echo "ERROR: No tests defined for backend!" - exit 1 -fi \ No newline at end of file +runUnittests diff --git a/backend/python/transformers/Makefile b/backend/python/transformers/Makefile index b3593c50e55..e3460a63443 100644 --- a/backend/python/transformers/Makefile +++ b/backend/python/transformers/Makefile @@ -27,4 +27,4 @@ backend_pb2_grpc.py backend_pb2.py: .PHONY: clean clean: protogen-clean - rm -rf venv \ No newline at end of file + rm -rf venv __pycache__ \ No newline at end of file diff --git a/backend/python/transformers/transformers_server.py b/backend/python/transformers/backend.py similarity index 100% rename from backend/python/transformers/transformers_server.py rename to backend/python/transformers/backend.py diff --git a/backend/python/transformers/install.sh b/backend/python/transformers/install.sh index 311203ca602..2829b927e3b 100755 --- a/backend/python/transformers/install.sh +++ b/backend/python/transformers/install.sh @@ -1,34 +1,6 @@ #!/bin/bash -set -ex +set -e -BUILD_ISOLATION_FLAG="" +source $(dirname $0)/../common/libbackend.sh -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -uv venv ${MY_DIR}/venv -source ${MY_DIR}/venv/bin/activate - -if [ -f "requirements-install.txt" ]; then - # If we have a requirements-install.txt, it means that a package does not properly declare it's build time - # dependencies per PEP-517, so we have to set up the proper build environment ourselves, and then install - # the package without build isolation - BUILD_ISOLATION_FLAG="--no-build-isolation" - uv pip install --requirement ${MY_DIR}/requirements-install.txt -fi -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements.txt - -if [ -f "requirements-${BUILD_TYPE}.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements-${BUILD_TYPE}.txt -fi - -if [ -d "/opt/intel" ]; then - # Intel GPU: If the directory exists, we assume we are using the Intel image - # https://github.com/intel/intel-extension-for-pytorch/issues/538 - if [ -f "requirements-intel.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --requirement ${MY_DIR}/requirements-intel.txt - fi -fi - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file +installRequirements diff --git a/backend/python/transformers/run.sh b/backend/python/transformers/run.sh index 23899c0f3ba..375c07e5f42 100755 --- a/backend/python/transformers/run.sh +++ b/backend/python/transformers/run.sh @@ -1,17 +1,4 @@ #!/bin/bash +source $(dirname $0)/../common/libbackend.sh -## -## A bash script wrapper that runs the GRPC backend - - -if [ -d "/opt/intel" ]; then - # Assumes we are using the Intel oneAPI container image - # https://github.com/intel/intel-extension-for-pytorch/issues/538 - export XPU=1 -fi - -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -source $MY_DIR/venv/bin/activate - -python $MY_DIR/transformers_server.py $@ \ No newline at end of file +startBackend $@ \ No newline at end of file diff --git a/backend/python/transformers/test.py b/backend/python/transformers/test.py index 13d753e919d..aab3c05e357 100644 --- a/backend/python/transformers/test.py +++ b/backend/python/transformers/test.py @@ -18,7 +18,7 @@ def setUp(self): """ This method sets up the gRPC service by starting the server """ - self.service = subprocess.Popen(["python3", "transformers_server.py", "--addr", "localhost:50051"]) + self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) def tearDown(self) -> None: """ diff --git a/backend/python/transformers/test.sh b/backend/python/transformers/test.sh index 4b742b3f25e..6940b0661df 100755 --- a/backend/python/transformers/test.sh +++ b/backend/python/transformers/test.sh @@ -1,16 +1,6 @@ #!/bin/bash -## -## A bash script wrapper that runs python unittests +set -e -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -source $MY_DIR/venv/bin/activate - -if [ -f "${MY_DIR}/test.py" ]; then - pushd ${MY_DIR} - python -m unittest test.py - popd -else - echo "ERROR: No tests defined for backend!" - exit 1 -fi \ No newline at end of file +runUnittests diff --git a/backend/python/vall-e-x/Makefile b/backend/python/vall-e-x/Makefile index 0f6e377fcb0..a3ca32a3fb4 100644 --- a/backend/python/vall-e-x/Makefile +++ b/backend/python/vall-e-x/Makefile @@ -30,4 +30,4 @@ backend_pb2_grpc.py backend_pb2.py: .PHONY: clean clean: protogen-clean - rm -rf source venv \ No newline at end of file + rm -rf source venv __pycache__ \ No newline at end of file diff --git a/backend/python/vall-e-x/__init__.py b/backend/python/vall-e-x/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/backend/python/vall-e-x/ttsvalle.py b/backend/python/vall-e-x/backend.py similarity index 97% rename from backend/python/vall-e-x/ttsvalle.py rename to backend/python/vall-e-x/backend.py index fc9d93bd0fb..a110134f7e5 100644 --- a/backend/python/vall-e-x/ttsvalle.py +++ b/backend/python/vall-e-x/backend.py @@ -11,9 +11,9 @@ import grpc -from utils.generation import SAMPLE_RATE, generate_audio, preload_models +from source.utils.generation import SAMPLE_RATE, generate_audio, preload_models from scipy.io.wavfile import write as write_wav -from utils.prompt_making import make_prompt +from source.utils.prompt_making import make_prompt _ONE_DAY_IN_SECONDS = 60 * 60 * 24 diff --git a/backend/python/vall-e-x/install.sh b/backend/python/vall-e-x/install.sh index 82170be6ccc..ff9003ff039 100755 --- a/backend/python/vall-e-x/install.sh +++ b/backend/python/vall-e-x/install.sh @@ -1,40 +1,14 @@ #!/bin/bash -set -ex +set -e -BUILD_ISOLATION_FLAG="" +VALL_E_X_VERSION=3faaf8ccadb154d63b38070caf518ce9309ea0f4 -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -uv venv ${MY_DIR}/venv -source ${MY_DIR}/venv/bin/activate +installRequirements -if [ -f "requirements-install.txt" ]; then - # If we have a requirements-install.txt, it means that a package does not properly declare it's build time - # dependencies per PEP-517, so we have to set up the proper build environment ourselves, and then install - # the package without build isolation - BUILD_ISOLATION_FLAG="--no-build-isolation" - uv pip install --requirement ${MY_DIR}/requirements-install.txt -fi -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements.txt - -if [ -f "requirements-${BUILD_TYPE}.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements-${BUILD_TYPE}.txt -fi - -if [ -d "/opt/intel" ]; then - # Intel GPU: If the directory exists, we assume we are using the Intel image - # https://github.com/intel/intel-extension-for-pytorch/issues/538 - if [ -f "requirements-intel.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --requirement ${MY_DIR}/requirements-intel.txt - fi -fi - -git clone https://github.com/Plachtaa/VALL-E-X.git $MY_DIR/source -pushd $MY_DIR/source && git checkout -b build $VALL_E_X_VERSION && popd +git clone https://github.com/Plachtaa/VALL-E-X.git ${MY_DIR}/source +pushd ${MY_DIR}/source && git checkout -b build ${VALL_E_X_VERSION} && popd uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/source/requirements.txt -cp -rfv ./*py $MY_DIR/source/ - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file +cp -v ./*py $MY_DIR/source/ diff --git a/backend/python/vall-e-x/run.sh b/backend/python/vall-e-x/run.sh index ce316a120c2..4b0682adf1f 100755 --- a/backend/python/vall-e-x/run.sh +++ b/backend/python/vall-e-x/run.sh @@ -1,10 +1,6 @@ #!/bin/bash +BACKEND_FILE="${MY_DIR}/source/backend.py" -## -## A bash script wrapper that runs the GRPC backend +source $(dirname $0)/../common/libbackend.sh -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -source $MY_DIR/venv/bin/activate - -pushd $MY_DIR/source && python ttsvalle.py $@ \ No newline at end of file +startBackend $@ \ No newline at end of file diff --git a/backend/python/vall-e-x/test.py b/backend/python/vall-e-x/test.py index 9acc7ec649e..f31a148c1f2 100644 --- a/backend/python/vall-e-x/test.py +++ b/backend/python/vall-e-x/test.py @@ -18,7 +18,7 @@ def setUp(self): """ This method sets up the gRPC service by starting the server """ - self.service = subprocess.Popen(["python3", "ttsvalle.py", "--addr", "localhost:50051"]) + self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"]) time.sleep(10) def tearDown(self) -> None: diff --git a/backend/python/vall-e-x/test.sh b/backend/python/vall-e-x/test.sh index 91c6477d4ba..6940b0661df 100755 --- a/backend/python/vall-e-x/test.sh +++ b/backend/python/vall-e-x/test.sh @@ -1,16 +1,6 @@ #!/bin/bash -## -## A bash script wrapper that runs python unittests +set -e -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -source $MY_DIR/venv/bin/activate - -if [ -f "${MY_DIR}/test.py" ]; then - pushd ${MY_DIR}/source - python -m unittest test.py - popd -else - echo "ERROR: No tests defined for backend!" - exit 1 -fi \ No newline at end of file +runUnittests diff --git a/backend/python/vall-e-x/ttsvalle.yml b/backend/python/vall-e-x/ttsvalle.yml deleted file mode 100644 index 09dbd9466a8..00000000000 --- a/backend/python/vall-e-x/ttsvalle.yml +++ /dev/null @@ -1,101 +0,0 @@ -name: ttsvalle -channels: - - defaults -dependencies: - - _libgcc_mutex=0.1=main - - _openmp_mutex=5.1=1_gnu - - bzip2=1.0.8=h7b6447c_0 - - ca-certificates=2023.08.22=h06a4308_0 - - ld_impl_linux-64=2.38=h1181459_1 - - libffi=3.4.4=h6a678d5_0 - - libgcc-ng=11.2.0=h1234567_1 - - libgomp=11.2.0=h1234567_1 - - libstdcxx-ng=11.2.0=h1234567_1 - - libuuid=1.41.5=h5eee18b_0 - - ncurses=6.4=h6a678d5_0 - - openssl=3.0.11=h7f8727e_2 - - pip=23.2.1=py310h06a4308_0 - - python=3.10.13=h955ad1f_0 - - readline=8.2=h5eee18b_0 - - setuptools=68.0.0=py310h06a4308_0 - - sqlite=3.41.2=h5eee18b_0 - - tk=8.6.12=h1ccaba5_0 - - tzdata=2023c=h04d1e81_0 - - wheel=0.41.2=py310h06a4308_0 - - xz=5.4.2=h5eee18b_0 - - zlib=1.2.13=h5eee18b_0 - - pip: - - aiofiles==23.2.1 - - altair==5.1.2 - - annotated-types==0.6.0 - - anyio==3.7.1 - - click==8.1.7 - - cn2an==0.5.22 - - cython==3.0.3 - - einops==0.7.0 - - encodec==0.1.1 - - eng-to-ipa==0.0.2 - - fastapi==0.103.2 - - ffmpeg-python==0.2.0 - - ffmpy==0.3.1 - - fsspec==2023.9.2 - - future==0.18.3 - - gradio==3.47.1 - - gradio-client==0.6.0 - - grpcio==1.63.0 - - h11==0.14.0 - - httpcore==0.18.0 - - httpx==0.25.0 - - huggingface-hub==0.17.3 - - importlib-resources==6.1.0 - - inflect==7.0.0 - - jieba==0.42.1 - - langid==1.1.6 - - llvmlite==0.41.0 - - more-itertools==10.1.0 - - nltk==3.8.1 - - numba==0.58.0 - - numpy==1.25.2 - - nvidia-cublas-cu12==12.1.3.1 - - nvidia-cuda-cupti-cu12==12.1.105 - - nvidia-cuda-nvrtc-cu12==12.1.105 - - nvidia-cuda-runtime-cu12==12.1.105 - - nvidia-cudnn-cu12==8.9.2.26 - - nvidia-cufft-cu12==11.0.2.54 - - nvidia-curand-cu12==10.3.2.106 - - nvidia-cusolver-cu12==11.4.5.107 - - nvidia-cusparse-cu12==12.1.0.106 - - nvidia-nccl-cu12==2.18.1 - - nvidia-nvjitlink-cu12==12.2.140 - - nvidia-nvtx-cu12==12.1.105 - - openai-whisper==20230306 - - orjson==3.9.7 - - proces==0.1.7 - - protobuf==4.24.4 - - pydantic==2.4.2 - - pydantic-core==2.10.1 - - pydub==0.25.1 - - pyopenjtalk-prebuilt==0.3.0 - - pypinyin==0.49.0 - - python-multipart==0.0.6 - - regex==2023.10.3 - - safetensors>=0.4.0 - - semantic-version==2.10.0 - - soundfile==0.12.1 - - starlette==0.27.0 - - sudachidict-core==20230927 - - sudachipy==0.6.7 - - tokenizers==0.14.1 - - toolz==0.12.0 - - torch==2.1.0 - - torchaudio==2.1.0 - - torchvision==0.16.0 - - tqdm==4.66.1 - - transformers==4.34.0 - - triton==2.1.0 - - unidecode==1.3.7 - - uvicorn==0.23.2 - - vocos==0.0.3 - - websockets==11.0.3 - - wget==3.2 -prefix: /opt/conda/envs/ttsvalle diff --git a/backend/python/vllm/Makefile b/backend/python/vllm/Makefile index 79bff60e95f..e2204a43bc4 100644 --- a/backend/python/vllm/Makefile +++ b/backend/python/vllm/Makefile @@ -26,4 +26,4 @@ backend_pb2_grpc.py backend_pb2.py: .PHONY: clean clean: protogen-clean - rm -rf venv \ No newline at end of file + rm -rf venv __pycache__ \ No newline at end of file diff --git a/backend/python/vllm/backend_vllm.py b/backend/python/vllm/backend.py similarity index 100% rename from backend/python/vllm/backend_vllm.py rename to backend/python/vllm/backend.py diff --git a/backend/python/vllm/install.sh b/backend/python/vllm/install.sh index 311203ca602..ccc83f96285 100755 --- a/backend/python/vllm/install.sh +++ b/backend/python/vllm/install.sh @@ -1,34 +1,8 @@ #!/bin/bash -set -ex +set -e -BUILD_ISOLATION_FLAG="" +EXTRA_PIP_INSTALL_FLAGS="--no-build-isolation" -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -uv venv ${MY_DIR}/venv -source ${MY_DIR}/venv/bin/activate - -if [ -f "requirements-install.txt" ]; then - # If we have a requirements-install.txt, it means that a package does not properly declare it's build time - # dependencies per PEP-517, so we have to set up the proper build environment ourselves, and then install - # the package without build isolation - BUILD_ISOLATION_FLAG="--no-build-isolation" - uv pip install --requirement ${MY_DIR}/requirements-install.txt -fi -uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements.txt - -if [ -f "requirements-${BUILD_TYPE}.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --requirement ${MY_DIR}/requirements-${BUILD_TYPE}.txt -fi - -if [ -d "/opt/intel" ]; then - # Intel GPU: If the directory exists, we assume we are using the Intel image - # https://github.com/intel/intel-extension-for-pytorch/issues/538 - if [ -f "requirements-intel.txt" ]; then - uv pip install ${BUILD_ISOLATION_FLAG} --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --requirement ${MY_DIR}/requirements-intel.txt - fi -fi - -if [ "$PIP_CACHE_PURGE" = true ] ; then - pip cache purge -fi \ No newline at end of file +installRequirements diff --git a/backend/python/vllm/run.sh b/backend/python/vllm/run.sh index 34127e89459..375c07e5f42 100755 --- a/backend/python/vllm/run.sh +++ b/backend/python/vllm/run.sh @@ -1,10 +1,4 @@ #!/bin/bash +source $(dirname $0)/../common/libbackend.sh -## -## A bash script wrapper that runs the GRPC backend - -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" - -source $MY_DIR/venv/bin/activate - -python $MY_DIR/backend_vllm.py $@ \ No newline at end of file +startBackend $@ \ No newline at end of file diff --git a/backend/python/vllm/test.py b/backend/python/vllm/test.py index 7760f8163f4..83fb26518e7 100644 --- a/backend/python/vllm/test.py +++ b/backend/python/vllm/test.py @@ -20,7 +20,7 @@ class TestBackendServicer(unittest.TestCase): This class contains methods to test the startup and shutdown of the gRPC service. """ def setUp(self): - self.service = subprocess.Popen(["python", "backend_vllm.py", "--addr", "localhost:50051"]) + self.service = subprocess.Popen(["python", "backend.py", "--addr", "localhost:50051"]) time.sleep(10) def tearDown(self) -> None: diff --git a/backend/python/vllm/test.sh b/backend/python/vllm/test.sh index 4b742b3f25e..6940b0661df 100755 --- a/backend/python/vllm/test.sh +++ b/backend/python/vllm/test.sh @@ -1,16 +1,6 @@ #!/bin/bash -## -## A bash script wrapper that runs python unittests +set -e -MY_DIR="$(dirname -- "${BASH_SOURCE[0]}")" +source $(dirname $0)/../common/libbackend.sh -source $MY_DIR/venv/bin/activate - -if [ -f "${MY_DIR}/test.py" ]; then - pushd ${MY_DIR} - python -m unittest test.py - popd -else - echo "ERROR: No tests defined for backend!" - exit 1 -fi \ No newline at end of file +runUnittests From 468903663f51720d0fce43e2b2143c1a59daaff4 Mon Sep 17 00:00:00 2001 From: Chris Jowett <421501+cryptk@users.noreply.github.com> Date: Fri, 10 May 2024 21:51:40 -0500 Subject: [PATCH 2/5] chore: minor cleanup Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com> --- backend/python/common/libbackend.sh | 5 ----- backend/python/common/template/Makefile | 6 ++++-- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/backend/python/common/libbackend.sh b/backend/python/common/libbackend.sh index b11378bd015..5814abeaaae 100644 --- a/backend/python/common/libbackend.sh +++ b/backend/python/common/libbackend.sh @@ -158,11 +158,6 @@ function startBackend() { fi } -function limitTarget() { - target=$1 - echo $target -} - function runUnittests() { ensureVenv if [ -f "${MY_DIR}/test.py" ]; then diff --git a/backend/python/common/template/Makefile b/backend/python/common/template/Makefile index 13a74c5347e..6cc45707c64 100644 --- a/backend/python/common/template/Makefile +++ b/backend/python/common/template/Makefile @@ -1,5 +1,7 @@ -.PHONY: template -template: protogen +.DEFAULT_GOAL := install + +.PHONY: install +install: protogen bash install.sh .PHONY: protogen From fc21eccb352e8bcdfcf3edc4f13a0c9a182dcbca Mon Sep 17 00:00:00 2001 From: Chris Jowett <421501+cryptk@users.noreply.github.com> Date: Fri, 10 May 2024 21:51:57 -0500 Subject: [PATCH 3/5] fix: remove incorrect LIMIT_TARGETS from parler-tts Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com> --- backend/python/parler-tts/install.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/backend/python/parler-tts/install.sh b/backend/python/parler-tts/install.sh index 967e97d3966..41f3abfe26a 100755 --- a/backend/python/parler-tts/install.sh +++ b/backend/python/parler-tts/install.sh @@ -1,8 +1,6 @@ #!/bin/bash set -e -LIMIT_TARGETS="cublas" - source $(dirname $0)/../common/libbackend.sh installRequirements From 0e9aa0c2236548186ece6981ef176ef6a61fe42f Mon Sep 17 00:00:00 2001 From: Chris Jowett <421501+cryptk@users.noreply.github.com> Date: Fri, 10 May 2024 22:36:10 -0500 Subject: [PATCH 4/5] fix: update runUnitests to handle running tests from a custom test file Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com> --- backend/python/common/libbackend.sh | 9 ++++++++- backend/python/vall-e-x/__init__.py | 0 backend/python/vall-e-x/backend.py | 4 ++-- backend/python/vall-e-x/test.sh | 1 + 4 files changed, 11 insertions(+), 3 deletions(-) delete mode 100644 backend/python/vall-e-x/__init__.py diff --git a/backend/python/common/libbackend.sh b/backend/python/common/libbackend.sh index 5814abeaaae..00d2958cf5b 100644 --- a/backend/python/common/libbackend.sh +++ b/backend/python/common/libbackend.sh @@ -160,7 +160,14 @@ function startBackend() { function runUnittests() { ensureVenv - if [ -f "${MY_DIR}/test.py" ]; then + + if [ ! -z ${TEST_FILE} ]; then + testDir=$(dirname `realpath ${TEST_FILE}`) + testFile=$(basename ${TEST_FILE}) + pushd ${testDir} + python -m unittest ${testFile} + popd + elif [ -f "${MY_DIR}/test.py" ]; then pushd ${MY_DIR} python -m unittest test.py popd diff --git a/backend/python/vall-e-x/__init__.py b/backend/python/vall-e-x/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/backend/python/vall-e-x/backend.py b/backend/python/vall-e-x/backend.py index a110134f7e5..fc9d93bd0fb 100644 --- a/backend/python/vall-e-x/backend.py +++ b/backend/python/vall-e-x/backend.py @@ -11,9 +11,9 @@ import grpc -from source.utils.generation import SAMPLE_RATE, generate_audio, preload_models +from utils.generation import SAMPLE_RATE, generate_audio, preload_models from scipy.io.wavfile import write as write_wav -from source.utils.prompt_making import make_prompt +from utils.prompt_making import make_prompt _ONE_DAY_IN_SECONDS = 60 * 60 * 24 diff --git a/backend/python/vall-e-x/test.sh b/backend/python/vall-e-x/test.sh index 6940b0661df..57336b39347 100755 --- a/backend/python/vall-e-x/test.sh +++ b/backend/python/vall-e-x/test.sh @@ -1,5 +1,6 @@ #!/bin/bash set -e +TEST_FILE="./source/test.py" source $(dirname $0)/../common/libbackend.sh From f266e7e5578cac9225bc5110f275ee5b5cdadaf6 Mon Sep 17 00:00:00 2001 From: Chris Jowett <421501+cryptk@users.noreply.github.com> Date: Fri, 10 May 2024 22:41:15 -0500 Subject: [PATCH 5/5] chore: document runUnittests Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com> --- backend/python/common/libbackend.sh | 10 ++++++++++ backend/python/exllama/run.sh | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/backend/python/common/libbackend.sh b/backend/python/common/libbackend.sh index 00d2958cf5b..5c5805b9917 100644 --- a/backend/python/common/libbackend.sh +++ b/backend/python/common/libbackend.sh @@ -158,6 +158,16 @@ function startBackend() { fi } +# runUnittests discovers and runs python unittests +# +# You can specify a specific test file to use by setting TEST_FILE before calling runUnittests. +# example: +# +# source ../common/libbackend.sh +# TEST_FILE="${MY_DIR}/source/test.py" +# runUnittests $@ +# +# be default a file named test.py in the backends directory will be used function runUnittests() { ensureVenv diff --git a/backend/python/exllama/run.sh b/backend/python/exllama/run.sh index d6c36080cab..63119689d27 100755 --- a/backend/python/exllama/run.sh +++ b/backend/python/exllama/run.sh @@ -1,6 +1,6 @@ #!/bin/bash LIMIT_TARGETS="cublas" -BACKEND_FILE="./source/backend.py" +BACKEND_FILE="${MY_DIR}/source/backend.py" source $(dirname $0)/../common/libbackend.sh