From 9f574d9d2128e1298fd754e3af1a839cd39bd52f Mon Sep 17 00:00:00 2001 From: Matthew Meyer Date: Fri, 10 Mar 2023 13:35:45 -0600 Subject: [PATCH 1/4] Support AMD for auto1111 --- docker-compose.yml | 14 ++++++-------- services/AUTOMATIC1111/Dockerfile | 11 ++--------- 2 files changed, 8 insertions(+), 17 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index e780c8da7..8f1835952 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -7,13 +7,11 @@ x-base_service: &base_service - &v1 ./data:/data - &v2 ./output:/output stop_signal: SIGINT - deploy: - resources: - reservations: - devices: - - driver: nvidia - device_ids: ['0'] - capabilities: [gpu] + group_add: + - video + devices: + - "/dev/dri" + - "/dev/kfd" name: webui-docker @@ -30,7 +28,7 @@ services: build: ./services/AUTOMATIC1111 image: sd-auto:47 environment: - - CLI_ARGS=--allow-code --medvram --xformers --enable-insecure-extension-access --api + - CLI_ARGS=--allow-code --medvram --enable-insecure-extension-access --api auto-cpu: <<: *automatic diff --git a/services/AUTOMATIC1111/Dockerfile b/services/AUTOMATIC1111/Dockerfile index 22b254b16..196f8c479 100644 --- a/services/AUTOMATIC1111/Dockerfile +++ b/services/AUTOMATIC1111/Dockerfile @@ -24,17 +24,13 @@ RUN . /clone.sh k-diffusion https://github.com/crowsonkb/k-diffusion.git 5b3af03 RUN . /clone.sh clip-interrogator https://github.com/pharmapsychotic/clip-interrogator 2486589f24165c8e3b303f84e9dbbea318df83e8 -FROM alpine:3.17 as xformers -RUN apk add --no-cache aria2 -RUN aria2c -x 5 --dir / --out wheel.whl 'https://github.com/AbdBarho/stable-diffusion-webui-docker/releases/download/5.0.0/xformers-0.0.17.dev449-cp310-cp310-manylinux2014_x86_64.whl' - FROM python:3.10.9-slim SHELL ["/bin/bash", "-ceuxo", "pipefail"] ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1 -RUN PIP_NO_CACHE_DIR=1 pip install torch==1.13.1+cu117 torchvision --extra-index-url https://download.pytorch.org/whl/cu117 +RUN PIP_NO_CACHE_DIR=1 pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2 RUN apt-get update && apt install fonts-dejavu-core rsync git jq moreutils -y && apt-get clean @@ -46,10 +42,6 @@ git reset --hard d7aec59c4eb02f723b3d55c6f927a42e97acd679 pip install -r requirements_versions.txt EOF -RUN --mount=type=cache,target=/root/.cache/pip \ - --mount=type=bind,from=xformers,source=/wheel.whl,target=/xformers-0.0.15-cp310-cp310-linux_x86_64.whl \ - pip install triton /xformers-0.0.15-cp310-cp310-linux_x86_64.whl - ENV ROOT=/stable-diffusion-webui @@ -94,4 +86,5 @@ WORKDIR ${ROOT} ENV CLI_ARGS="" EXPOSE 7860 ENTRYPOINT ["/docker/entrypoint.sh"] +ENV HSA_OVERRIDE_GFX_VERSION=10.3.0 CMD python -u webui.py --listen --port 7860 ${CLI_ARGS} From 40b376fa7bc9dda22692d5c9b6550514dcd0f39d Mon Sep 17 00:00:00 2001 From: Matthew Meyer Date: Sat, 11 Mar 2023 16:41:05 -0600 Subject: [PATCH 2/4] New service for AUTOMATIC1111-AMD --- docker-compose.nvidia.yml | 76 ++++++++++++++++++++ services/AUTOMATIC1111-AMD/Dockerfile | 90 ++++++++++++++++++++++++ services/AUTOMATIC1111-AMD/config.json | 10 +++ services/AUTOMATIC1111-AMD/entrypoint.sh | 65 +++++++++++++++++ services/AUTOMATIC1111-AMD/info.py | 14 ++++ services/AUTOMATIC1111/Dockerfile | 15 ++-- 6 files changed, 266 insertions(+), 4 deletions(-) create mode 100644 docker-compose.nvidia.yml create mode 100644 services/AUTOMATIC1111-AMD/Dockerfile create mode 100644 services/AUTOMATIC1111-AMD/config.json create mode 100755 services/AUTOMATIC1111-AMD/entrypoint.sh create mode 100644 services/AUTOMATIC1111-AMD/info.py diff --git a/docker-compose.nvidia.yml b/docker-compose.nvidia.yml new file mode 100644 index 000000000..9e57403fe --- /dev/null +++ b/docker-compose.nvidia.yml @@ -0,0 +1,76 @@ +version: '3.9' + +x-base_service: + &base_service + ports: + - "7860:7860" + volumes: + - &v1 ./data:/data + - &v2 ./output:/output + stop_signal: SIGINT + deploy: + resources: + reservations: + devices: + - driver: nvidia + device_ids: [ '0' ] + capabilities: [ gpu ] + +name: webui-docker + +services: + download: + build: ./services/download/ + profiles: [ "download" ] + volumes: + - *v1 + + auto: + &automatic + <<: *base_service + profiles: [ "auto" ] + build: ./services/AUTOMATIC1111 + image: sd-auto:47 + environment: + - CLI_ARGS=--allow-code --medvram --xformers --enable-insecure-extension-access --api + + auto-amd: + &automatic + <<: *base_service + profiles: [ "auto-amd" ] + build: ./services/AUTOMATIC1111-AMD + image: sd-auto:47 + environment: + - CLI_ARGS=--allow-code --medvram --enable-insecure-extension-access --api --no-half --precision full --opt-sub-quad-attention + + auto-cpu: + <<: *automatic + profiles: [ "auto-cpu" ] + deploy: {} + environment: + - CLI_ARGS=--no-half --precision full --allow-code --enable-insecure-extension-access --api + + invoke: + <<: *base_service + profiles: [ "invoke" ] + build: ./services/invoke/ + image: sd-invoke:26 + environment: + - PRELOAD=true + - CLI_ARGS= + + sygil: + &sygil + <<: *base_service + profiles: [ "sygil" ] + build: ./services/sygil/ + image: sd-sygil:16 + environment: + - CLI_ARGS=--optimized-turbo + - USE_STREAMLIT=0 + + sygil-sl: + <<: *sygil + profiles: [ "sygil-sl" ] + environment: + - USE_STREAMLIT=1 diff --git a/services/AUTOMATIC1111-AMD/Dockerfile b/services/AUTOMATIC1111-AMD/Dockerfile new file mode 100644 index 000000000..716ec805d --- /dev/null +++ b/services/AUTOMATIC1111-AMD/Dockerfile @@ -0,0 +1,90 @@ +# syntax=docker/dockerfile:1 + +FROM alpine/git:2.36.2 as download + +SHELL ["/bin/sh", "-ceuxo", "pipefail"] + +RUN < /clone.sh +mkdir -p repositories/"$1" && cd repositories/"$1" && git init && git remote add origin "$2" && git fetch origin "$3" --depth=1 && git reset --hard "$3" && rm -rf .git +EOE +EOF + +RUN . /clone.sh taming-transformers https://github.com/CompVis/taming-transformers.git 24268930bf1dce879235a7fddd0b2355b84d7ea6 \ + && rm -rf data assets **/*.ipynb + +RUN . /clone.sh stable-diffusion-stability-ai https://github.com/Stability-AI/stablediffusion.git 47b6b607fdd31875c9279cd2f4f16b92e4ea958e \ + && rm -rf assets data/**/*.png data/**/*.jpg data/**/*.gif + +RUN . /clone.sh CodeFormer https://github.com/sczhou/CodeFormer.git c5b4593074ba6214284d6acd5f1719b6c5d739af \ + && rm -rf assets inputs + +RUN . /clone.sh BLIP https://github.com/salesforce/BLIP.git 48211a1594f1321b00f14c9f7a5b4813144b2fb9 +RUN . /clone.sh k-diffusion https://github.com/crowsonkb/k-diffusion.git 5b3af030dd83e0297272d861c19477735d0317ec +RUN . /clone.sh clip-interrogator https://github.com/pharmapsychotic/clip-interrogator 2486589f24165c8e3b303f84e9dbbea318df83e8 + + +FROM python:3.10.9-slim + +SHELL ["/bin/bash", "-ceuxo", "pipefail"] + +ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1 + +RUN PIP_NO_CACHE_DIR=1 pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2 + +RUN apt-get update && apt install fonts-dejavu-core rsync git jq moreutils bash -y && apt-get clean + + +RUN --mount=type=cache,target=/root/.cache/pip </data/config/auto/ui-config.json +fi + +declare -A MOUNTS + +MOUNTS["/root/.cache"]="/data/.cache" + +# main +MOUNTS["${ROOT}/models/Stable-diffusion"]="/data/StableDiffusion" +MOUNTS["${ROOT}/models/VAE"]="/data/VAE" +MOUNTS["${ROOT}/models/Codeformer"]="/data/Codeformer" +MOUNTS["${ROOT}/models/GFPGAN"]="/data/GFPGAN" +MOUNTS["${ROOT}/models/ESRGAN"]="/data/ESRGAN" +MOUNTS["${ROOT}/models/BSRGAN"]="/data/BSRGAN" +MOUNTS["${ROOT}/models/RealESRGAN"]="/data/RealESRGAN" +MOUNTS["${ROOT}/models/SwinIR"]="/data/SwinIR" +MOUNTS["${ROOT}/models/ScuNET"]="/data/ScuNET" +MOUNTS["${ROOT}/models/LDSR"]="/data/LDSR" +MOUNTS["${ROOT}/models/hypernetworks"]="/data/Hypernetworks" +MOUNTS["${ROOT}/models/torch_deepdanbooru"]="/data/Deepdanbooru" +MOUNTS["${ROOT}/models/BLIP"]="/data/BLIP" +MOUNTS["${ROOT}/models/midas"]="/data/MiDaS" +MOUNTS["${ROOT}/models/Lora"]="/data/Lora" + +MOUNTS["${ROOT}/embeddings"]="/data/embeddings" +MOUNTS["${ROOT}/config.json"]="/data/config/auto/config.json" +MOUNTS["${ROOT}/ui-config.json"]="/data/config/auto/ui-config.json" +MOUNTS["${ROOT}/extensions"]="/data/config/auto/extensions" + +# extra hacks +MOUNTS["${ROOT}/repositories/CodeFormer/weights/facelib"]="/data/.cache" + +for to_path in "${!MOUNTS[@]}"; do + set -Eeuo pipefail + from_path="${MOUNTS[${to_path}]}" + rm -rf "${to_path}" + if [ ! -f "$from_path" ]; then + mkdir -vp "$from_path" + fi + mkdir -vp "$(dirname "${to_path}")" + ln -sT "${from_path}" "${to_path}" + echo Mounted $(basename "${from_path}") +done + +if [ -f "/data/config/auto/startup.sh" ]; then + pushd ${ROOT} + . /data/config/auto/startup.sh + popd +fi + +exec "$@" diff --git a/services/AUTOMATIC1111-AMD/info.py b/services/AUTOMATIC1111-AMD/info.py new file mode 100644 index 000000000..edfa7b004 --- /dev/null +++ b/services/AUTOMATIC1111-AMD/info.py @@ -0,0 +1,14 @@ +import sys +from pathlib import Path + +file = Path(sys.argv[1]) +file.write_text( + file.read_text()\ + .replace(' return demo', """ + with demo: + gr.Markdown( + 'Created by [AUTOMATIC1111 / stable-diffusion-webui-docker](https://github.com/AbdBarho/stable-diffusion-webui-docker/)' + ) + return demo +""", 1) +) diff --git a/services/AUTOMATIC1111/Dockerfile b/services/AUTOMATIC1111/Dockerfile index 196f8c479..3cd919ff9 100644 --- a/services/AUTOMATIC1111/Dockerfile +++ b/services/AUTOMATIC1111/Dockerfile @@ -24,13 +24,17 @@ RUN . /clone.sh k-diffusion https://github.com/crowsonkb/k-diffusion.git 5b3af03 RUN . /clone.sh clip-interrogator https://github.com/pharmapsychotic/clip-interrogator 2486589f24165c8e3b303f84e9dbbea318df83e8 +FROM alpine:3.17 as xformers +RUN apk add --no-cache aria2 +RUN aria2c -x 5 --dir / --out wheel.whl 'https://github.com/AbdBarho/stable-diffusion-webui-docker/releases/download/5.0.0/xformers-0.0.17.dev449-cp310-cp310-manylinux2014_x86_64.whl' + FROM python:3.10.9-slim SHELL ["/bin/bash", "-ceuxo", "pipefail"] ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1 -RUN PIP_NO_CACHE_DIR=1 pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2 +RUN PIP_NO_CACHE_DIR=1 pip install torch==1.13.1+cu117 torchvision --extra-index-url https://download.pytorch.org/whl/cu117 RUN apt-get update && apt install fonts-dejavu-core rsync git jq moreutils -y && apt-get clean @@ -42,6 +46,10 @@ git reset --hard d7aec59c4eb02f723b3d55c6f927a42e97acd679 pip install -r requirements_versions.txt EOF +RUN --mount=type=cache,target=/root/.cache/pip \ + --mount=type=bind,from=xformers,source=/wheel.whl,target=/xformers-0.0.15-cp310-cp310-linux_x86_64.whl \ + pip install triton /xformers-0.0.15-cp310-cp310-linux_x86_64.whl + ENV ROOT=/stable-diffusion-webui @@ -63,7 +71,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \ RUN apt-get -y install libgoogle-perftools-dev && apt-get clean ENV LD_PRELOAD=libtcmalloc.so -ARG SHA=0cc0ee1bcb4c24a8c9715f66cede06601bfc00c8 +ARG SHA=27e319dc4f09a2f040043948e5c52965976f8491 RUN --mount=type=cache,target=/root/.cache/pip < Date: Sat, 11 Mar 2023 17:21:42 -0600 Subject: [PATCH 3/4] Added services for invoke and sygil AMD --- docker-compose.nvidia.yml | 76 ----------------- docker-compose.yml | 86 +++++++++++++++----- services/AUTOMATIC1111-AMD/Dockerfile | 5 +- services/invoke-AMD/Dockerfile | 71 ++++++++++++++++ services/invoke-AMD/entrypoint.sh | 46 +++++++++++ services/invoke-AMD/models.yaml | 23 ++++++ services/sygil-AMD/Dockerfile | 49 +++++++++++ services/sygil-AMD/info.py | 13 +++ services/sygil-AMD/mount.sh | 32 ++++++++ services/sygil-AMD/run.sh | 10 +++ services/sygil-AMD/userconfig_streamlit.yaml | 11 +++ 11 files changed, 325 insertions(+), 97 deletions(-) delete mode 100644 docker-compose.nvidia.yml create mode 100644 services/invoke-AMD/Dockerfile create mode 100755 services/invoke-AMD/entrypoint.sh create mode 100644 services/invoke-AMD/models.yaml create mode 100644 services/sygil-AMD/Dockerfile create mode 100644 services/sygil-AMD/info.py create mode 100755 services/sygil-AMD/mount.sh create mode 100755 services/sygil-AMD/run.sh create mode 100644 services/sygil-AMD/userconfig_streamlit.yaml diff --git a/docker-compose.nvidia.yml b/docker-compose.nvidia.yml deleted file mode 100644 index 9e57403fe..000000000 --- a/docker-compose.nvidia.yml +++ /dev/null @@ -1,76 +0,0 @@ -version: '3.9' - -x-base_service: - &base_service - ports: - - "7860:7860" - volumes: - - &v1 ./data:/data - - &v2 ./output:/output - stop_signal: SIGINT - deploy: - resources: - reservations: - devices: - - driver: nvidia - device_ids: [ '0' ] - capabilities: [ gpu ] - -name: webui-docker - -services: - download: - build: ./services/download/ - profiles: [ "download" ] - volumes: - - *v1 - - auto: - &automatic - <<: *base_service - profiles: [ "auto" ] - build: ./services/AUTOMATIC1111 - image: sd-auto:47 - environment: - - CLI_ARGS=--allow-code --medvram --xformers --enable-insecure-extension-access --api - - auto-amd: - &automatic - <<: *base_service - profiles: [ "auto-amd" ] - build: ./services/AUTOMATIC1111-AMD - image: sd-auto:47 - environment: - - CLI_ARGS=--allow-code --medvram --enable-insecure-extension-access --api --no-half --precision full --opt-sub-quad-attention - - auto-cpu: - <<: *automatic - profiles: [ "auto-cpu" ] - deploy: {} - environment: - - CLI_ARGS=--no-half --precision full --allow-code --enable-insecure-extension-access --api - - invoke: - <<: *base_service - profiles: [ "invoke" ] - build: ./services/invoke/ - image: sd-invoke:26 - environment: - - PRELOAD=true - - CLI_ARGS= - - sygil: - &sygil - <<: *base_service - profiles: [ "sygil" ] - build: ./services/sygil/ - image: sd-sygil:16 - environment: - - CLI_ARGS=--optimized-turbo - - USE_STREAMLIT=0 - - sygil-sl: - <<: *sygil - profiles: [ "sygil-sl" ] - environment: - - USE_STREAMLIT=1 diff --git a/docker-compose.yml b/docker-compose.yml index 683dc09ed..f1b5164da 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,63 +1,109 @@ version: '3.9' -x-base_service: &base_service - ports: - - "7860:7860" - volumes: - - &v1 ./data:/data - - &v2 ./output:/output - stop_signal: SIGINT - group_add: - - video - devices: - - "/dev/dri" - - "/dev/kfd" +x-base_service: + &base_service + ports: + - "7860:7860" + volumes: + - &v1 ./data:/data + - &v2 ./output:/output + stop_signal: SIGINT + deploy: + resources: + reservations: + devices: + - driver: nvidia + device_ids: [ '0' ] + capabilities: [ gpu ] + +x-base_service_amd: + &base_service_amd + ports: + - "7860:7860" + volumes: + - &v1 ./data:/data + - &v2 ./output:/output + stop_signal: SIGINT + group_add: + - video + devices: + - "/dev/dri" + - "/dev/kfd" name: webui-docker services: download: build: ./services/download/ - profiles: ["download"] + profiles: [ "download" ] volumes: - *v1 - auto: &automatic + auto: + &automatic <<: *base_service - profiles: ["auto"] + profiles: [ "auto" ] build: ./services/AUTOMATIC1111 image: sd-auto:48 environment: - CLI_ARGS=--allow-code --medvram --enable-insecure-extension-access --api + auto-amd: + &automatic + <<: *base_service_amd + profiles: [ "auto-amd" ] + build: ./services/AUTOMATIC1111-AMD + image: sd-auto:48 + environment: + - CLI_ARGS=--allow-code --medvram --no-half --precision full --enable-insecure-extension-access --api + auto-cpu: <<: *automatic - profiles: ["auto-cpu"] + profiles: [ "auto-cpu" ] deploy: {} environment: - CLI_ARGS=--no-half --precision full --allow-code --enable-insecure-extension-access --api invoke: <<: *base_service - profiles: ["invoke"] + profiles: [ "invoke" ] build: ./services/invoke/ image: sd-invoke:26 environment: - PRELOAD=true - CLI_ARGS= + invoke-amd: + <<: *base_service_amd + profiles: [ "invoke-amd" ] + build: ./services/invoke-AMD/ + image: sd-invoke:26 + environment: + - PRELOAD=true + - CLI_ARGS= - sygil: &sygil + sygil: + &sygil <<: *base_service - profiles: ["sygil"] + profiles: [ "sygil" ] build: ./services/sygil/ image: sd-sygil:16 environment: - CLI_ARGS=--optimized-turbo - USE_STREAMLIT=0 + sygil-amd: + &sygil + <<: *base_service_amd + profiles: [ "sygil-amd" ] + build: ./services/sygil-AMD/ + image: sd-sygil:16 + environment: + - CLI_ARGS=--optimized-turbo + - USE_STREAMLIT=0 + sygil-sl: <<: *sygil - profiles: ["sygil-sl"] + profiles: [ "sygil-sl" ] environment: - USE_STREAMLIT=1 diff --git a/services/AUTOMATIC1111-AMD/Dockerfile b/services/AUTOMATIC1111-AMD/Dockerfile index 716ec805d..5903c44d4 100644 --- a/services/AUTOMATIC1111-AMD/Dockerfile +++ b/services/AUTOMATIC1111-AMD/Dockerfile @@ -30,7 +30,7 @@ SHELL ["/bin/bash", "-ceuxo", "pipefail"] ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1 -RUN PIP_NO_CACHE_DIR=1 pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2 +RUN PIP_NO_CACHE_DIR=1 pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2 RUN apt-get update && apt install fonts-dejavu-core rsync git jq moreutils bash -y && apt-get clean @@ -86,5 +86,8 @@ WORKDIR ${ROOT} ENV CLI_ARGS="" EXPOSE 7860 ENTRYPOINT ["/docker/entrypoint.sh"] + +# Depending on your actual GPU you may want to comment this out. +# Without this you may get the error "hipErrorNoBinaryForGpu: Unable to find code object for all current devices!" ENV HSA_OVERRIDE_GFX_VERSION=10.3.0 CMD python -u webui.py --listen --port 7860 ${CLI_ARGS} diff --git a/services/invoke-AMD/Dockerfile b/services/invoke-AMD/Dockerfile new file mode 100644 index 000000000..073582fb9 --- /dev/null +++ b/services/invoke-AMD/Dockerfile @@ -0,0 +1,71 @@ +# syntax=docker/dockerfile:1 + +FROM alpine:3.17 as xformers +RUN apk add --no-cache aria2 +RUN aria2c -x 5 --dir / --out wheel.whl 'https://github.com/AbdBarho/stable-diffusion-webui-docker/releases/download/5.0.0/xformers-0.0.17.dev449-cp310-cp310-manylinux2014_x86_64.whl' + + + +FROM python:3.10-slim +SHELL ["/bin/bash", "-ceuxo", "pipefail"] + +ENV DEBIAN_FRONTEND=noninteractive PIP_EXISTS_ACTION=w PIP_PREFER_BINARY=1 + + +RUN --mount=type=cache,target=/root/.cache/pip pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2 + +RUN apt-get update && apt-get install git -y && apt-get clean + +RUN git clone https://github.com/invoke-ai/InvokeAI.git /stable-diffusion + +WORKDIR /stable-diffusion + +RUN --mount=type=cache,target=/root/.cache/pip < req.txt +pip install -r req.txt +rm req.txt +EOF + + +# patch match: +# https://github.com/invoke-ai/InvokeAI/blob/main/docs/installation/INSTALL_PATCHMATCH.md +RUN <=4.24' + +# add info +COPY . /docker/ +RUN <For help and advanced usage guides,', """ +

+ Created using stable-diffusion-webui-docker. +

+

For help and advanced usage guides, +""", 1) +) diff --git a/services/sygil-AMD/mount.sh b/services/sygil-AMD/mount.sh new file mode 100755 index 000000000..cc0dc9dca --- /dev/null +++ b/services/sygil-AMD/mount.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +set -Eeuo pipefail + +declare -A MOUNTS + +ROOT=/stable-diffusion/src + +# cache +MOUNTS["/root/.cache"]=/data/.cache +# ui specific +MOUNTS["${PWD}/models/realesrgan"]=/data/RealESRGAN +MOUNTS["${PWD}/models/ldsr"]=/data/LDSR +MOUNTS["${PWD}/models/custom"]=/data/StableDiffusion + +# hack +MOUNTS["${PWD}/models/gfpgan/GFPGANv1.3.pth"]=/data/GFPGAN/GFPGANv1.4.pth +MOUNTS["${PWD}/models/gfpgan/GFPGANv1.4.pth"]=/data/GFPGAN/GFPGANv1.4.pth +MOUNTS["${PWD}/gfpgan/weights"]=/data/.cache + + +for to_path in "${!MOUNTS[@]}"; do + set -Eeuo pipefail + from_path="${MOUNTS[${to_path}]}" + rm -rf "${to_path}" + mkdir -p "$(dirname "${to_path}")" + ln -sT "${from_path}" "${to_path}" + echo Mounted $(basename "${from_path}") +done + +# streamlit config +ln -sf /docker/userconfig_streamlit.yaml /stable-diffusion/configs/webui/userconfig_streamlit.yaml diff --git a/services/sygil-AMD/run.sh b/services/sygil-AMD/run.sh new file mode 100755 index 000000000..89f795992 --- /dev/null +++ b/services/sygil-AMD/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -Eeuo pipefail + +echo "USE_STREAMLIT = ${USE_STREAMLIT}" +if [ "${USE_STREAMLIT}" == "1" ]; then + python -u -m streamlit run scripts/webui_streamlit.py +else + python3 -u scripts/webui.py --outdir /output --ckpt /data/StableDiffusion/v1-5-pruned-emaonly.ckpt ${CLI_ARGS} +fi diff --git a/services/sygil-AMD/userconfig_streamlit.yaml b/services/sygil-AMD/userconfig_streamlit.yaml new file mode 100644 index 000000000..07a20afc7 --- /dev/null +++ b/services/sygil-AMD/userconfig_streamlit.yaml @@ -0,0 +1,11 @@ +# https://github.com/Sygil-Dev/sygil-webui/blob/master/configs/webui/webui_streamlit.yaml +general: + version: 1.24.6 + outdir: /output + default_model: "Stable Diffusion v1.5" + default_model_path: /data/StableDiffusion/v1-5-pruned-emaonly.ckpt + outdir_txt2img: /output/txt2img + outdir_img2img: /output/img2img + outdir_img2txt: /output/img2txt + optimized: True + optimized_turbo: True From bf1c9e3332fb2d3c29cc735cf455434e93456368 Mon Sep 17 00:00:00 2001 From: Matthew Meyer Date: Sat, 11 Mar 2023 20:51:09 -0600 Subject: [PATCH 4/4] Restored newline --- services/AUTOMATIC1111/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/AUTOMATIC1111/Dockerfile b/services/AUTOMATIC1111/Dockerfile index 3cd919ff9..1fa178e44 100644 --- a/services/AUTOMATIC1111/Dockerfile +++ b/services/AUTOMATIC1111/Dockerfile @@ -94,4 +94,4 @@ WORKDIR ${ROOT} ENV CLI_ARGS="" EXPOSE 7860 ENTRYPOINT ["/docker/entrypoint.sh"] -CMD python -u webui.py --listen --port 7860 ${CLI_ARGS} \ No newline at end of file +CMD python -u webui.py --listen --port 7860 ${CLI_ARGS}