Skip to content

Commit

Permalink
Merge pull request #7 from cloud-py-api/appapi-2.5
Browse files Browse the repository at this point in the history
AMD 7900XTX support, smaller Docker Image size - [AppAPI 2.5]
  • Loading branch information
bigcat88 authored Apr 25, 2024
2 parents ff36ebb + 6108432 commit 62547ea
Show file tree
Hide file tree
Showing 11 changed files with 189 additions and 25 deletions.
1 change: 1 addition & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
.installed_flag
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -93,3 +93,4 @@ MANIFEST
converted/

geckodriver.log
.installed_flag
4 changes: 2 additions & 2 deletions .run/NC (last).run.xml → .run/NC (28).run.xml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="NC (last)" type="PythonConfigurationType" factoryName="Python">
<configuration default="false" name="NC (28)" type="PythonConfigurationType" factoryName="Python">
<module name="ai_image_generator_bot" />
<option name="ENV_FILES" value="" />
<option name="INTERPRETER_OPTIONS" value="" />
Expand All @@ -10,7 +10,7 @@
<env name="APP_PORT" value="9080" />
<env name="APP_SECRET" value="12345" />
<env name="APP_VERSION" value="1.0.0" />
<env name="NEXTCLOUD_URL" value="http://nextcloud.local" />
<env name="NEXTCLOUD_URL" value="http://stable28.local" />
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
Expand Down
32 changes: 32 additions & 0 deletions .run/NC (29).run.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
<component name="ProjectRunConfigurationManager">
<configuration default="false" name="NC (29)" type="PythonConfigurationType" factoryName="Python">
<module name="ai_image_generator_bot" />
<option name="ENV_FILES" value="" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="APP_HOST" value="0.0.0.0" />
<env name="APP_ID" value="ai_image_generator_bot" />
<env name="APP_PORT" value="9080" />
<env name="APP_SECRET" value="12345" />
<env name="APP_VERSION" value="1.0.0" />
<env name="NEXTCLOUD_URL" value="http://stable29.local" />
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="SDK_NAME" value="Python 3.10 (ai_image_generator_bot)" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/lib" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/lib/main.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
</component>
5 changes: 4 additions & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,11 @@ ADD j[s] /app/js
ADD l10[n] /app/l10n
ADD li[b] /app/lib

COPY --chmod=775 healthcheck.sh /

RUN \
python3 -m pip install -r requirements.txt && rm -rf ~/.cache && rm requirements.txt

WORKDIR /app/lib
ENTRYPOINT ["python3", "main.py"]
ENTRYPOINT ["python3", "hw_install.py", "main.py"]
HEALTHCHECK --interval=2s --timeout=2s --retries=300 CMD /healthcheck.sh
48 changes: 34 additions & 14 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,42 +9,62 @@ help:
@echo " "
@echo " build-push build image and upload to ghcr.io"
@echo " "
@echo " run install AIImageGeneratorBot for Nextcloud Last"
@echo " run27 install AIImageGeneratorBot for Nextcloud 27"
@echo " run install AIImageGeneratorBot for Nextcloud 28"
@echo " run install AIImageGeneratorBot for Nextcloud 29"
@echo " "
@echo " For development of this example use PyCharm run configurations. Development is always set for last Nextcloud."
@echo " First run 'AIImageGeneratorBot' and then 'make registerXX', after that you can use/debug/develop it and easy test."
@echo " "
@echo " register perform registration of running 'AIImageGeneratorBot' into the 'manual_install' deploy daemon."
@echo " register27 perform registration of running 'AIImageGeneratorBot' into the 'manual_install' deploy daemon."
@echo " register28 perform registration of running 'AIImageGeneratorBot' into the 'manual_install' deploy daemon."
@echo " register29 perform registration of running 'AIImageGeneratorBot' into the 'manual_install' deploy daemon."

.PHONY: build-push
build-push:
docker login ghcr.io
docker buildx build --push --platform linux/arm64/v8,linux/amd64 --tag ghcr.io/cloud-py-api/ai_image_generator_bot:2.0.0 --tag ghcr.io/cloud-py-api/ai_image_generator_bot:latest .
docker buildx build --push --platform linux/arm64/v8,linux/amd64 --tag ghcr.io/cloud-py-api/ai_image_generator_bot:2.1.0 .

.PHONY: run
run:
docker exec master-nextcloud-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
docker exec master-nextcloud-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot --force-scopes \
--info-xml https://raw.githubusercontent.com/cloud-py-api/ai_image_generator_bot/main/appinfo/info.xml
.PHONY: build-push-latest
build-push-latest:
docker login ghcr.io
docker buildx build --push --platform linux/arm64/v8,linux/amd64 --tag ghcr.io/cloud-py-api/ai_image_generator_bot:latest .

.PHONY: run27
run27:
docker exec master-stable27-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
docker exec master-stable27-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot --force-scopes \
--info-xml https://raw.githubusercontent.com/cloud-py-api/ai_image_generator_bot/main/appinfo/info.xml

.PHONY: register
register:
docker exec master-nextcloud-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
docker exec master-nextcloud-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot manual_install --json-info \
"{\"id\":\"ai_image_generator_bot\",\"name\":\"AIImageGeneratorBot\",\"daemon_config_name\":\"manual_install\",\"version\":\"1.0.0\",\"secret\":\"12345\",\"port\":9080,\"scopes\":[\"TALK\", \"TALK_BOT\", \"FILES\", \"FILES_SHARING\"],\"system\":1}" \
--force-scopes --wait-finish
.PHONY: run28
run28:
docker exec master-stable28-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
docker exec master-stable28-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot --force-scopes \
--info-xml https://raw.githubusercontent.com/cloud-py-api/ai_image_generator_bot/main/appinfo/info.xml

.PHONY: run29
run29:
docker exec master-stable29-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
docker exec master-stable29-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot --force-scopes \
--info-xml https://raw.githubusercontent.com/cloud-py-api/ai_image_generator_bot/main/appinfo/info.xml

.PHONY: register27
register27:
docker exec master-stable27-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
docker exec master-stable27-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot manual_install --json-info \
"{\"id\":\"ai_image_generator_bot\",\"name\":\"AIImageGeneratorBot\",\"daemon_config_name\":\"manual_install\",\"version\":\"1.0.0\",\"secret\":\"12345\",\"port\":9080,\"scopes\":[\"TALK\", \"TALK_BOT\", \"FILES\", \"FILES_SHARING\"],\"system\":1}" \
--force-scopes --wait-finish

.PHONY: register28
register28:
docker exec master-stable28-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
docker exec master-stable28-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot manual_install --json-info \
"{\"id\":\"ai_image_generator_bot\",\"name\":\"AIImageGeneratorBot\",\"daemon_config_name\":\"manual_install\",\"version\":\"1.0.0\",\"secret\":\"12345\",\"port\":9080,\"scopes\":[\"TALK\", \"TALK_BOT\", \"FILES\", \"FILES_SHARING\"],\"system\":1}" \
--force-scopes --wait-finish

.PHONY: register29
register29:
docker exec master-stable29-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
docker exec master-stable29-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot manual_install --json-info \
"{\"id\":\"ai_image_generator_bot\",\"name\":\"AIImageGeneratorBot\",\"daemon_config_name\":\"manual_install\",\"version\":\"1.0.0\",\"secret\":\"12345\",\"port\":9080,\"scopes\":[\"TALK\", \"TALK_BOT\", \"FILES\", \"FILES_SHARING\"],\"system\":1}" \
--force-scopes --wait-finish
6 changes: 6 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,9 @@ Uses [SDXL-Turbo](https://huggingface.co/stabilityai/sdxl-turbo) for fast image
## State of support

The project is being developed in personal and free time, any ideas or pull requests are welcome.

*Note: We understand that the model used here is quite old and cannot compare with the new SDXL-Lightning*

*We are currently working on a new much more advanced image generation project which you can find here: [Visionatrix](https://github.com/Visionatrix/Visionatrix)*

*If this is of interest to the Nextcloud community, we can try to adapt Visionatrix either as an Image Provider for Nextcloud 30 or simply as an AppAPI Nextcloud application.*
16 changes: 11 additions & 5 deletions appinfo/info.xml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
<summary>Stable Diffusion Talk Bot</summary>
<description>
<![CDATA[
**Requires [`AppAPI`](https://github.com/cloud-py-api/app_api) to work.**
**Requires [`AppAPI`](https://github.com/cloud-py-api/app_api) version 2.5.0+ to work.**
The AI model used by this application requires **~9 gigabytes** of video memory.
Expand All @@ -15,11 +15,17 @@ If the application is running on a CPU, **14 to 18 gigabytes** of system memory
[`AI Model`](https://huggingface.co/stabilityai/sdxl-turbo) is loaded into memory on the first request and remains in it to quickly process further requests.
This is not an example, this is a ready-to-use application, just enable the bot in the conversation, and type:
After installing, just enable the bot in the conversation, and type:
`@image cinematic portrait of fluffy cat with black eyes`
*Note: We understand that the model used here is quite old and cannot compare with the new SDXL-Lightning*
*We are currently working on a new `much more advanced` image generation project which you can find here: [`Visionatrix`](https://github.com/Visionatrix/Visionatrix)*
*If this is of interest to the Nextcloud community, we can try to adapt Visionatrix either as an Image Provider for Nextcloud 30 or simply as an AppAPI Nextcloud application.*
]]></description>
<version>2.0.1</version>
<version>2.1.0</version>
<licence>MIT</licence>
<author mail="andrey18106x@gmail.com" homepage="https://github.com/andrey18106">Andrey Borysenko</author>
<author mail="bigcat88@icloud.com" homepage="https://github.com/bigcat88">Alexander Piskun</author>
Expand All @@ -35,13 +41,13 @@ This is not an example, this is a ready-to-use application, just enable the bot
<bugs>https://github.com/cloud-py-api/ai_image_generator_bot/issues</bugs>
<repository type="git">https://github.com/cloud-py-api/ai_image_generator_bot</repository>
<dependencies>
<nextcloud min-version="27" max-version="29"/>
<nextcloud min-version="27" max-version="30"/>
</dependencies>
<external-app>
<docker-install>
<registry>ghcr.io</registry>

<image-tag>2.0.0</image-tag>
<image-tag>2.1.0</image-tag>
</docker-install>
<scopes>
<value>TALK</value>
Expand Down
7 changes: 7 additions & 0 deletions healthcheck.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/bash

if [ -f "/.installed_flag" ]; then
exit 0
else
exit 1
fi
87 changes: 87 additions & 0 deletions lib/hw_install.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
"""Script to install PyTorch based on "COMPUTE_DEVICE" environment variable.
Possible values: "cuda", "rocm", "cpu"
Advice: "pciutils" package should be installed inside container,
it can be used in a very rare cases to perform autodetect of hardware.
If an additional argument is specified, the script considers this to be the file name of the ExApp entry point.
Remember to adjust it with anything your ExApp need of and add it here or a separately.
Copyright (c) 2024 Alexander Piskun, Nextcloud
"""

# pylint: disable=consider-using-with

import os
import subprocess
import sys
import typing
from pathlib import Path


def hw_autodetect() -> typing.Literal["cuda", "rocm", "cpu"]:
process = subprocess.Popen(
"lspci", # noqa: S607
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
shell=True, # noqa: S602
)
output, errors = process.communicate()
if process.returncode != 0:
print("hw_install: Error running lspci:", flush=True)
print(errors, flush=True)
return "cpu"
for line in output.split("\n"):
if line.find("VGA") != -1:
if line.find("NVIDIA") != -1:
return "cuda"
if line.find("AMD") != -1:
return "rocm"
return "cpu"


def hw_install():
defined_accelerator = os.environ.get("COMPUTE_DEVICE", "")
if not defined_accelerator:
defined_accelerator = hw_autodetect()

if defined_accelerator == "cpu":
requirements = "torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu"
elif defined_accelerator == "rocm":
requirements = "torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.0"
else:
requirements = "torch torchvision torchaudio"

process_args = [sys.executable, "-m", "pip", "install", "--force-reinstall", *requirements.split()]
subprocess.run(
process_args, # noqa: S603
check=False,
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
)


if __name__ == "__main__":
# we do not want to reinstall "PyTorch" each time the container starts
flag_file = Path("/.installed_flag")
if not flag_file.exists():
print("hw_install: perform installation", flush=True)
hw_install()
flag_file.touch()
if len(sys.argv) <= 1:
print("hw_install: exit", flush=True)
sys.exit(0)
# execute another script if needed
print(f"hw_install: executing additional script: {sys.argv[1]}", flush=True)
r = subprocess.run(
[sys.executable, sys.argv[1]], # noqa: S603
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
check=False,
)
sys.exit(r.returncode)
7 changes: 4 additions & 3 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
nc_py_api[app]>=0.10.0
--extra-index-url https://download.pytorch.org/whl/cpu
torch
torchvision
nc_py_api[app]>=0.12.0
diffusers>=0.23.1
transformers>=4.36.1
accelerate
huggingface_hub
torch
torchvision

0 comments on commit 62547ea

Please sign in to comment.