Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
250 commits
Select commit Hold shift + click to select a range
6564775
Initial commit
arisha07 Jul 22, 2022
c4cf702
Initial commit of gimp-ml with semantic seg powered by OV
arisha07 Jul 22, 2022
03d67f5
Added OV based deblur plugin
arisha07 Jul 26, 2022
5f4cd3b
Added super-resolution OV based plugin supports esrgan & sr_1033
arisha07 Aug 2, 2022
30029f5
Initial version for OV Inpainting
arisha07 Aug 16, 2022
fbee0f6
Replaced checkboxes with drop down list & added edsr model support to…
arisha07 Aug 24, 2022
a08e2ce
Added ov style transfer filter
arisha07 Sep 2, 2022
7f77444
Super-res changes
arisha07 Sep 19, 2022
8eff08e
Adding weights folder
arisha07 Sep 19, 2022
27bd67b
Added initial version of stable diffusion
arisha07 Oct 11, 2022
bddcecb
automated stable difussion model paths
arisha07 Oct 12, 2022
1fcb7e3
Initial commit of gimp-ov
arisha07 Oct 13, 2022
d56e6a8
Removed unwanted files
arisha07 Oct 13, 2022
6f0f96b
Removed unwanted files
arisha07 Oct 13, 2022
ff8bd71
Fixed semseg and removed unwanted code
arisha07 Oct 13, 2022
cccaa83
Removed unwanted dependencies
arisha07 Oct 19, 2022
4e62710
Added screenshot and removed unwanted files
arisha07 Oct 20, 2022
a46ec14
Updated Readme
arisha07 Oct 20, 2022
ada92dd
Updated Readme
arisha07 Oct 20, 2022
00a9f13
Updated Readme
arisha07 Oct 27, 2022
104fde4
Updated Readme
arisha07 Oct 27, 2022
15ca69a
Updated Readme
arisha07 Oct 27, 2022
786f26f
Updated Readme
arisha07 Oct 28, 2022
97254a0
Updated Readme
arisha07 Oct 28, 2022
8f56a83
Delete LICENSE
arisha07 Nov 7, 2022
ca41082
Added thirdparty license and copyrights notices
arisha07 Nov 7, 2022
7e59c76
Create LICENSE.md
arisha07 Nov 7, 2022
96a480a
Updated Readme
arisha07 Nov 9, 2022
e035fa7
Delete testscases/.ipynb_checkpoints directory
arisha07 Nov 9, 2022
60777df
Updated Readme
arisha07 Nov 9, 2022
61d6d2b
Added missing gdown during install
arisha07 Nov 10, 2022
7633328
Added missing gdown during install
arisha07 Nov 10, 2022
b51220c
Changed license to Apache
arisha07 Nov 17, 2022
17d35ea
Merge pull request #1 from intel-sandbox/fix_branch
arisha07 Nov 17, 2022
2e57c78
Removed VPU device from mainline and deleted deblur plugin
arisha07 Nov 21, 2022
a9e3615
Merge pull request #2 from intel-sandbox/fix_branch
arisha07 Nov 22, 2022
115ea74
Merge pull request #3 from intel-sandbox/fix_branch
arisha07 Nov 22, 2022
364a1bf
Removed unused function and files after checkmarx scan
arisha07 Nov 22, 2022
b72361e
Commented out the exc_info to fix the Information_Exporsure issue in …
arisha07 Nov 28, 2022
c91bac7
Merge pull request #4 from intel-sandbox/fix_branch
arisha07 Nov 28, 2022
3403b9c
Replaced gimpov from the entire code base
arisha07 Dec 1, 2022
81c125b
Updated the GUI screen shot for readme
arisha07 Dec 1, 2022
20ea3b1
Renamed gimpov folder
arisha07 Dec 1, 2022
d2123fc
Replaced GIMP-OV to GIMP-OpenVINO & gimpopenvino
arisha07 Dec 1, 2022
a298a7b
Removed md5 and html related blacklisted items for bandit
arisha07 Dec 8, 2022
187b901
Removed use of pickle from the plugin infrastructure
arisha07 Dec 9, 2022
0b3d9d7
Replace readlines from text with json format for semantic seg
arisha07 Dec 9, 2022
c20dda9
Replaced readlines method with json
arisha07 Dec 9, 2022
79cb7de
Updated OpenVino to 2022.3.0
arisha07 Jan 23, 2023
90be45c
Updated transformer version
arisha07 Jan 24, 2023
95634e6
Added copyright header to all source files
arisha07 Feb 1, 2023
22bd8e6
staging for final upstream part1
arisha07 Feb 8, 2023
484c6f7
final staging part2
arisha07 Feb 8, 2023
8f2a84d
Modify readme
arisha07 Feb 8, 2023
8d53a41
modify readme
arisha07 Feb 8, 2023
37a1747
Added more gifs
arisha07 Feb 8, 2023
a697415
replaced the links
arisha07 Feb 8, 2023
7455b69
updated readme
arisha07 Feb 8, 2023
f8e96e5
updated readme
arisha07 Feb 8, 2023
98941ee
updated readme
arisha07 Feb 8, 2023
ed60d95
updated sem-seg gif
arisha07 Feb 9, 2023
3c128f1
updated readme
arisha07 Feb 9, 2023
a323588
updated readme
arisha07 Feb 9, 2023
5330064
updated readme
arisha07 Feb 9, 2023
2e0d61e
updated readme
arisha07 Feb 9, 2023
9d18c6d
Added new web
arisha07 Feb 9, 2023
c9aa2fe
Final push
arisha07 Feb 9, 2023
af65e3d
Merge pull request #5 from intel-sandbox/openvino-gimp
RyanMetcalfeInt8 Feb 10, 2023
8cd2b4b
Added in-painting to main branch of open-source
arisha07 Jun 23, 2023
2a14a41
Added model conversion support for SD1.5 Inpainting
arisha07 Jun 23, 2023
98b8e1a
Added inital image from canvas support
arisha07 Jun 24, 2023
7332898
Modified mask creation step, fixed bug in load model, updated readme
arisha07 Jun 27, 2023
1b8acdf
small readme update
arisha07 Jun 27, 2023
4565e0e
small readme update
arisha07 Jun 27, 2023
e193974
small readme update
arisha07 Jun 27, 2023
fd62ae6
small readme update
arisha07 Jun 27, 2023
c8b1f5a
Added controlnet-openpose
arisha07 Aug 3, 2023
dac5db7
First version of updated internal version with external & internal co…
gblong1 Aug 11, 2023
ce68ee4
Optimized pipeline - moving infer_request.wait()
gblong1 Aug 15, 2023
fc95446
updated pipeline to N=2 controlnet-pose IR
gblong1 Aug 15, 2023
85491bc
Added Super-Res workflow in SD for acer
gblong1 Aug 15, 2023
120d858
Latest optimized pipeline wirh share_outputs = True etc.
gblong1 Aug 16, 2023
5aac575
updated pipeline to use ergan and set SR resolution to landscape 2240…
gblong1 Aug 18, 2023
98764a7
keeping final cache.png image
gblong1 Aug 18, 2023
59ec9f2
updated to run esrgan on GPU
gblong1 Aug 21, 2023
2976f3a
Added inpainting internal version and updated device list to show NPU…
gblong1 Aug 28, 2023
ffb235f
updated transformer version and removed unwanted debug prints
gblong1 Aug 29, 2023
823a15f
Merge pull request #7 from intel-sandbox/main_temp
arisha07 Aug 29, 2023
416ffa8
Update stable-diffusion-ov-server.py
luke-lin-vmc Sep 4, 2023
a05eb7f
Merge pull request #9 from luke-lin-vmc/innovation_v2_John
arisha07 Sep 5, 2023
858d43e
Added model caching for SR model to reduce the load time
gblong1 Sep 5, 2023
da444ba
Added model caching for SR model to reduce the load time
gblong1 Sep 6, 2023
d7cad78
Added padding as input image is square and output landscape
gblong1 Sep 12, 2023
2d702e1
fixed a bug in SR standalone plugin
gblong1 Sep 13, 2023
90b31ac
Fixed a bug in style transfer and semseg
arisha07 Sep 27, 2023
a3a382a
fixed progress bar update in controlnet-pose, added model caching for…
arisha07 Sep 27, 2023
468b0a8
Merge pull request #10 from intel-sandbox/main_temp
arisha07 Sep 27, 2023
1c6bbc4
Updates for VPUX/VPU to NPU transition
gblong1 Oct 2, 2023
db30a37
Removed direct SR support from SD gui for release
arisha07 Oct 9, 2023
6dd27d5
Added controlnet-canny internal blob support
arisha07 Oct 10, 2023
d8e6cf6
Updates to the path specification to be cross platform compatible
gblong1 Oct 18, 2023
5bb2489
Updating paths to be cross platform compatible
gblong1 Oct 18, 2023
97798d4
Update to OV 2023.0.2
gblong1 Oct 18, 2023
d4e02b4
Cleanup of _NEW and _org files
gblong1 Oct 18, 2023
10ac1a1
Combined StableDiffusion engines into one module
gblong1 Oct 18, 2023
d5b5b84
Run Inference becomes Generate, Internal becomes Advanced.
gblong1 Oct 24, 2023
9265f90
Replaced model conversion with model download from hugging face, some…
arisha07 Nov 8, 2023
27cea49
fixed a bug in repo-id for HF download
arisha07 Nov 8, 2023
af37fa1
bug fix around shutil.copytree
arisha07 Nov 8, 2023
8a82800
uncomment the delete HF model cache
arisha07 Nov 8, 2023
d66a804
Fixed few more bugs
arisha07 Nov 8, 2023
4a40275
Fixed a typo
arisha07 Nov 8, 2023
709280b
Removed UniPCM scheduler from local file for all batch-size 2 controlnet
arisha07 Nov 8, 2023
6179810
Fixed corrupt image issue with landscape and portrait models:
arisha07 Nov 9, 2023
6df53d5
Moved NPU to INT8 and added LCM
gblong1 Nov 9, 2023
6e40f50
Merge branch 'release_v1' of https://github.com/intel-sandbox/GIMP-ML…
gblong1 Nov 9, 2023
5219c92
Updated import of diffusion pipeline for diffusers 0.23.0
gblong1 Nov 14, 2023
76fe008
Initial checkin of working LCM
gblong1 Nov 14, 2023
ec79c25
Updated to OV 2023.1.0 and diffusers 0.23.0. Added accelerate
gblong1 Nov 14, 2023
e429ef5
removed some debug statements.
gblong1 Nov 15, 2023
00250fb
Moving install to use requirements.txt to ease linux/windows parity
gblong1 Nov 18, 2023
6f67661
Updates for the UI to hide non-needed elements for LCM
gblong1 Nov 18, 2023
4ffd387
Removed old class, other LCM updates
gblong1 Nov 18, 2023
f0e7e5f
Adding LCM Scheduler and removing unneeded prints.
gblong1 Nov 20, 2023
bcd5ec0
Updated Latent_Consistency to SD_1.5_lcm
gblong1 Nov 20, 2023
3f6cb85
Removed HF pull of scheduler for LCM. Commented out scheduler log print
gblong1 Nov 21, 2023
4820688
updated LCM model dir to be in square_lcm
gblong1 Nov 21, 2023
ecfaf57
added lcm in model download
arisha07 Nov 21, 2023
57800f4
Merge branch 'main' into release_v1
arisha07 Nov 21, 2023
2db7bd9
Merge pull request #12 from intel-sandbox/release_v1
arisha07 Nov 21, 2023
ab3a542
update tensorflow version for scan issue
arisha07 Nov 21, 2023
157279f
Merge pull request #13 from intel-sandbox/release_v1
arisha07 Nov 21, 2023
6afbbf3
update onnx version for scan issue
arisha07 Nov 21, 2023
f88a005
Merge pull request #14 from intel-sandbox/release_v1
arisha07 Nov 21, 2023
108439c
update README
arisha07 Nov 21, 2023
ea7fef3
Fixed tabs v spaces
gblong1 Nov 21, 2023
9a460a0
Added model_setup to Linux install script.
gblong1 Nov 21, 2023
8913240
Updated to be linux compatible. added functions, and put back the "al…
gblong1 Nov 22, 2023
a8df1af
Merge pull request #15 from intel-sandbox/model_setup_fixes_for_linux
arisha07 Nov 22, 2023
22c6ee6
Merge pull request #16 from intel-sandbox/release_v1
arisha07 Nov 28, 2023
caee6ac
Update README.md
gblong1 Nov 29, 2023
3be765f
Merge pull request #17 from intel-sandbox/update_readme_for_Linux
RyanMetcalfeInt8 Nov 30, 2023
ad1d079
Update model_setup.py to remove token
gblong1 Nov 30, 2023
a61437d
Merge pull request #18 from intel-sandbox/remove_HF_Token
RyanMetcalfeInt8 Nov 30, 2023
ec97adc
Fixed device list parsing due to bug found in systmed with dGPU
gblong1 Dec 5, 2023
6d8c39e
Merge pull request #19 from intel-sandbox/device_list_fix
RyanMetcalfeInt8 Dec 5, 2023
0678351
updated to match public repo
arisha07 Mar 4, 2024
70f498f
updated to match public repo
arisha07 Mar 4, 2024
e3d8815
Initial commit
arisha07 Jul 22, 2022
8182fe1
Rebase to http://github.com/intel/openvino-ai-plugins-gimp
gblong1 May 2, 2024
97abc98
removed gimpml dir, not needed.
gblong1 May 3, 2024
e29f439
Merge branch 'rebase_with_opensource'
gblong1 May 3, 2024
3eadd60
Merge branch 'main' into rebase_with_opensource
gblong1 May 3, 2024
440a23c
Merge pull request #20 from intel-sandbox/rebase_with_opensource
arisha07 May 3, 2024
7431dac
Fixed installer location bug.
gblong1 May 4, 2024
c30d867
initial set of changes to support config files for models to determine
gblong1 May 4, 2024
8021d8b
Adding driver detection for NPU
gblong1 May 6, 2024
26d4a53
more updates for powermodes
gblong1 May 6, 2024
be8421c
fixed few issues, including revision
gblong1 May 6, 2024
9f78b4f
Adding LCM under my idsid
gblong1 May 8, 2024
5268dde
Updating LCM to use config.json and load blobs for NPU
gblong1 May 8, 2024
b91b87a
Updating setup to point to my private repo for testing.
gblong1 May 8, 2024
3179fab
Removed blob and swap flags, as they are no longer needed.
gblong1 May 8, 2024
d10e44f
Removed blob and swap flags, as they are no longer needed.
gblong1 May 9, 2024
c699c2d
Updated links in README.md
gblong1 May 9, 2024
32b3148
Removing current direcotry echo as not needed.
gblong1 May 13, 2024
b7d833a
Initial checkin of SD3 support
gblong1 May 13, 2024
8abb07a
Update power mode support checking. Still needs work, though...
gblong1 May 14, 2024
395a307
Fixing typo in MTL NPU arch naming
gblong1 May 22, 2024
100734b
Adding config file and code to download models from the revision
ymangukiya May 22, 2024
889caa1
Merge pull request #21 from intel-sandbox/updates_for_lnl_from_linux
gblong1 May 24, 2024
dad3f26
Updating model setup for LNL.
gblong1 May 24, 2024
c6c3ede
SAI Logo adding for SD3.0
gblong1 May 31, 2024
54e35d4
Merge pull request #22 from intel-sandbox/config_file_change_copy
gblong1 May 31, 2024
7ab626d
Merge branch 'main' into updates_for_lnl
gblong1 Jun 3, 2024
c680307
Bug fixes for Linux.
gblong1 Jun 4, 2024
10d199f
Merge pull request #23 from intel-sandbox/linux_config_bugfix
gblong1 Jun 4, 2024
250b129
Added default for config.json, and better dGPU handling
gblong1 Jun 4, 2024
cde12f4
trimming down plugin requirements
gblong1 Jun 5, 2024
2432f3c
Merge pull request #24 from intel-sandbox/json_config_update
arisha07 Jun 5, 2024
1224320
Pulling updates from opensource.
gblong1 Jun 6, 2024
65ee041
Rolled back SAI logo addition
gblong1 Jun 6, 2024
19ebb02
Moved spinner to avoid horizontal shifts in the UI. Moved the license…
gblong1 Jun 6, 2024
8ed5c02
Merge branch 'main' into updates_for_lnl
gblong1 Jun 6, 2024
7090c85
Update model_setup.py with spectral comment
gblong1 Jun 6, 2024
8c79db1
Update model_setup.py
gblong1 Jun 6, 2024
8a0ae77
Update model_setup.py
gblong1 Jun 6, 2024
759e7e8
Update model_setup.py
gblong1 Jun 6, 2024
c289fd1
Update model_setup.py
gblong1 Jun 6, 2024
e56b838
Update model_setup_config.json
gblong1 Jun 6, 2024
e05eb3a
fixed typo
gblong1 Jun 6, 2024
bce9a92
Removed repeated code.
gblong1 Jun 10, 2024
5df7d98
Merge pull request #26 from intel-sandbox/updates_for_lnl
gblong1 Jun 10, 2024
77d7320
Update for testing configs
gblong1 Jun 10, 2024
b20eec9
Fixed issues with platform detection.
gblong1 Jun 13, 2024
31cf094
fixing tab alignment
gblong1 Jun 13, 2024
85d798c
Fixing case issue with SD which was root cause of neg prompt still sh…
gblong1 Jun 13, 2024
fe783fe
update for merge to main.
gblong1 Jun 13, 2024
2c20ad3
Updating testcases
gblong1 Jun 13, 2024
4b4d77d
Merge pull request #27 from intel-sandbox/ww24_sync_to_main
gblong1 Jun 13, 2024
03870a5
removing unneeded files
gblong1 Jun 13, 2024
3f04cbf
Updated test case with all SD models.
gblong1 Jun 14, 2024
ab9ca61
Reduced redunant redundancies.
gblong1 Jun 14, 2024
5b05b22
Aligned around image instead of init_image to be consistent with othe…
gblong1 Jun 14, 2024
7a80509
Added more engine support
gblong1 Jun 14, 2024
ee1de8f
Added FP16 support for NPU
gblong1 Jun 15, 2024
6d165a4
fixing powermode ui logic
gblong1 Jun 15, 2024
56c4352
Merge pull request #28 from intel-sandbox/tc_update
gblong1 Jun 20, 2024
0bdaa97
Added Decoder info for LCM model.
gblong1 Jun 21, 2024
4bb464d
updating result naming to make comparing the same seed images easier.
gblong1 Jun 21, 2024
7e193af
Adding blob compilation to model setup
gblong1 Jun 23, 2024
7e6d226
remove key from model setup
gblong1 Jun 23, 2024
e0ebd6f
Removing chmod call for deprecated features.
gblong1 Jun 24, 2024
a385ec4
Merge pull request #30 from intel-sandbox/remove_deprecated_funcs_linux
gblong1 Jun 26, 2024
ee1b3d0
Merge pull request #29 from intel-sandbox/npu_support_for_sd_1.5_square
gblong1 Jun 26, 2024
3a13f17
update model_setup to compile 2T for Unet INT8
gblong1 Jun 26, 2024
8515ff4
Merge pull request #31 from intel-sandbox/npu_2t_config
gblong1 Jun 27, 2024
74970aa
add CLI options for model weights directory location
nschaper-intel Jun 28, 2024
35791c1
Merge pull request #34 from intel-sandbox/test_case_update
gblong1 Jun 29, 2024
e75a2f2
removed reliance on driver versions.
gblong1 Jul 8, 2024
c643db4
removed win32com which is no longer needed.
gblong1 Jul 8, 2024
a1e4feb
Merge pull request #35 from intel-sandbox/major_model_setup_update
gblong1 Jul 10, 2024
0c01582
Create README.md
gblong1 Jul 10, 2024
c67dfe9
Merge pull request #36 from intel-sandbox/add_sd_testcase_documenation
gblong1 Jul 10, 2024
5bb0565
Update README.md
gblong1 Jul 10, 2024
4e822f0
Model Loading and Compiling are multithreaded, mainly on SD1.5 INT8
spdumie Jul 22, 2024
c80f3c5
Adding multithreading for compile and load
gblong1 Jul 23, 2024
188183e
Merge pull request #37 from siwooparkintel/multithreaded
gblong1 Jul 23, 2024
5e85296
tweaks for perf
gblong1 Jul 23, 2024
f70d7b0
adding back in exit
gblong1 Jul 23, 2024
3c58e90
Fixing naming
gblong1 Jul 23, 2024
324dcdc
Syncing with internal snapshot
gblong1 Jul 23, 2024
7522e94
Syncing with internal snapshot
gblong1 Jul 24, 2024
0276c0f
Merge WW30 Snapshot into Main (#38)
gblong1 Jul 25, 2024
6654909
Correcting capitalization
gblong1 Jul 25, 2024
4583b0b
Npu turbo feature (#40)
gblong1 Aug 13, 2024
15dae78
Fixing default steps and min guidance.
gblong1 Aug 13, 2024
7040ca6
fixing NPU turbo to not fail if not enabled in the plugin
gblong1 Aug 13, 2024
1e02c9d
Adding flash SD3 and hardcoding dev version of OV (for now).
gblong1 Aug 13, 2024
ca53bca
Merge pull request #43 from intel-sandbox/issues_fix_41_42
gblong1 Aug 14, 2024
53dd559
Fix sys.path extend calls to for openvino_utils/tools folder
RyanMetcalfeInt8 Aug 14, 2024
cffad13
Updating sd3 implementation from Alpha to sd3-medium
gblong1 Aug 15, 2024
f498763
Merge pull request #44 from intel-sandbox/fix_sys_paths
gblong1 Aug 15, 2024
83fe2d0
Merge branch 'main' into sd3_medium
gblong1 Aug 15, 2024
bf483d6
Merging updates to release staging branch
gblong1 Aug 15, 2024
044e9df
update default prompt
gblong1 Aug 19, 2024
b97ab1c
Set openvino release
gblong1 Aug 19, 2024
fe92879
Removing unused files.
gblong1 Aug 19, 2024
85ece51
freezing requirements.txt
gblong1 Aug 19, 2024
0d366e1
Merge branch 'v2.99-R3-staging' into v2.99-R3-staging
gblong1 Aug 20, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
# tokenizer
from transformers import CLIPTokenizer
import torch
import random

from diffusers import DiffusionPipeline
from diffusers.schedulers import (DDIMScheduler,
Expand Down Expand Up @@ -101,7 +102,17 @@ def __init__(self, model="runwayml/stable-diffusion-v1-5",

self.core = Core()
self.core.set_property({'CACHE_DIR': os.path.join(model, 'cache')})
if "NPU" in device and "3720" not in self.core.get_property('NPU', 'DEVICE_ARCHITECTURE'):
try:
self.core.set_property(properties={'NPU_TURBO': 'YES'},device_name='NPU')
print_npu_turbo_art()
except:
# do nothing. Turbo not enabled.
print("")

print("Loading models... ")



with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
futures = {
Expand Down Expand Up @@ -435,14 +446,25 @@ def __init__(

self.core = Core()
self.core.set_property({'CACHE_DIR': os.path.join(model, 'cache')})

self.batch_size = 2 if device[1] == device[2] and device[1] == "GPU" else 1

if "NPU" in device and "3720" not in self.core.get_property('NPU', 'DEVICE_ARCHITECTURE'):
try:
self.core.set_property(properties={'NPU_TURBO': 'YES'},device_name='NPU')
print_npu_turbo_art()
except:
# do nothing. Turbo not enabled.
print("")

try:
self.tokenizer = CLIPTokenizer.from_pretrained(model, local_files_only=True)
except Exception as e:
print("Local tokenizer not found. Attempting to download...")
self.tokenizer = self.download_tokenizer(tokenizer, model)


print("Loading models... ")

with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
text_future = executor.submit(self.load_model, model, "text_encoder", device[0])
vae_de_future = executor.submit(self.load_model, model, "vae_decoder", device[3])
Expand All @@ -452,6 +474,7 @@ def __init__(
unet_future = executor.submit(self.load_model, model, "unet_bs1", device[1])
unet_neg_future = executor.submit(self.load_model, model, "unet_bs1", device[2]) if device[1] != device[2] else None
else:
print("Loading BS2 model")
unet_future = executor.submit(self.load_model, model, "unet", device[1])
unet_neg_future = None

Expand All @@ -470,6 +493,8 @@ def __init__(
self._vae_d_output = self.vae_decoder.output(0)
self._vae_e_output = self.vae_encoder.output(0) if self.vae_encoder else None

self.unet_input_tensor_name = "sample" if 'sample' in self.unet.input(0).names else "latent_model_input"

if self.batch_size == 1:
self.infer_request = self.unet.create_infer_request()
self.infer_request_neg = self.unet_neg.create_infer_request()
Expand All @@ -490,7 +515,7 @@ def load_model(self, model, model_name, device):
return self.core.compile_model(os.path.join(model, f"{model_name}.xml"), device)

def set_dimensions(self):
latent_shape = self.unet.input("latent_model_input").shape
latent_shape = self.unet.input(self.unet_input_tensor_name).shape
if latent_shape[1] == 4:
self.height = latent_shape[2] * 8
self.width = latent_shape[3] * 8
Expand Down Expand Up @@ -583,21 +608,25 @@ def __call__(
latent_model_input_pos = latent_model_input
latent_model_input_neg = latent_model_input

if self.unet.input("latent_model_input").shape[1] != 4:
if self.unet.input(self.unet_input_tensor_name).shape[1] != 4:
try:
latent_model_input_pos = latent_model_input_pos.permute(0,2,3,1)
except:
latent_model_input_pos = latent_model_input_pos.transpose(0,2,3,1)

if self.unet_neg.input("latent_model_input").shape[1] != 4:
if self.unet_neg.input(self.unet_input_tensor_name).shape[1] != 4:
try:
latent_model_input_neg = latent_model_input_neg.permute(0,2,3,1)
except:
latent_model_input_neg = latent_model_input_neg.transpose(0,2,3,1)

input_tens_neg_dict = {"latent_model_input":latent_model_input_neg, "encoder_hidden_states": np.expand_dims(text_embeddings[0], axis=0), "t": np.expand_dims(np.float32(t), axis=0)}
input_tens_pos_dict = {"latent_model_input":latent_model_input_pos, "encoder_hidden_states": np.expand_dims(text_embeddings[1], axis=0), "t": np.expand_dims(np.float32(t), axis=0)}


if "sample" in self.unet_input_tensor_name:
input_tens_neg_dict = {"sample" : latent_model_input_neg, "encoder_hidden_states": np.expand_dims(text_embeddings[0], axis=0), "timestep": np.expand_dims(np.float32(t), axis=0)}
input_tens_pos_dict = {"sample" : latent_model_input_pos, "encoder_hidden_states": np.expand_dims(text_embeddings[1], axis=0), "timestep": np.expand_dims(np.float32(t), axis=0)}
else:
input_tens_neg_dict = {"latent_model_input" : latent_model_input_neg, "encoder_hidden_states": np.expand_dims(text_embeddings[0], axis=0), "t": np.expand_dims(np.float32(t), axis=0)}
input_tens_pos_dict = {"latent_model_input" : latent_model_input_pos, "encoder_hidden_states": np.expand_dims(text_embeddings[1], axis=0), "t": np.expand_dims(np.float32(t), axis=0)}

self.infer_request_neg.start_async(input_tens_neg_dict)
self.infer_request.start_async(input_tens_pos_dict)

Expand Down Expand Up @@ -681,8 +710,6 @@ def prepare_latents(self, image: PIL.Image.Image = None, latent_timestep: torch.

latents = scheduler.add_noise(torch.from_numpy(latents), torch.from_numpy(noise), latent_timestep).numpy()
return latents, meta




def postprocess_image(self, image: np.ndarray, meta: Dict):
Expand Down Expand Up @@ -765,6 +792,16 @@ def __init__(

self.core = Core()
self.core.set_property({'CACHE_DIR': os.path.join(model, 'cache')}) # adding caching to reduce init time

if "NPU" in device and "3720" not in self.core.get_property('NPU', 'DEVICE_ARCHITECTURE'):
try:
self.core.set_property(properties={'NPU_TURBO': 'YES'},device_name='NPU')
print_npu_turbo_art()
except:
# do nothing. Turbo not enabled.
print("")


with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
text_future = executor.submit(self.load_model, model, "text_encoder", device[0])
unet_future = executor.submit(self.load_model, model, "unet", device[1])
Expand Down Expand Up @@ -1401,7 +1438,41 @@ def prepare_image(

return image


def print_npu_turbo_art():
random_number = random.randint(1, 3)

if random_number == 1:
print(" ")
print(" ___ ___ ___ ___ ___ ___ ")
print(" /\ \ /\ \ /\ \ /\ \ /\ \ _____ /\ \ ")
print(" \:\ \ /::\ \ \:\ \ ___ \:\ \ /::\ \ /::\ \ /::\ \ ")
print(" \:\ \ /:/\:\__\ \:\ \ /\__\ \:\ \ /:/\:\__\ /:/\:\ \ /:/\:\ \ ")
print(" _____\:\ \ /:/ /:/ / ___ \:\ \ /:/ / ___ \:\ \ /:/ /:/ / /:/ /::\__\ /:/ \:\ \ ")
print(" /::::::::\__\ /:/_/:/ / /\ \ \:\__\ /:/__/ /\ \ \:\__\ /:/_/:/__/___ /:/_/:/\:|__| /:/__/ \:\__\ ")
print(" \:\~~\~~\/__/ \:\/:/ / \:\ \ /:/ / /::\ \ \:\ \ /:/ / \:\/:::::/ / \:\/:/ /:/ / \:\ \ /:/ / ")
print(" \:\ \ \::/__/ \:\ /:/ / /:/\:\ \ \:\ /:/ / \::/~~/~~~~ \::/_/:/ / \:\ /:/ / ")
print(" \:\ \ \:\ \ \:\/:/ / \/__\:\ \ \:\/:/ / \:\~~\ \:\/:/ / \:\/:/ / ")
print(" \:\__\ \:\__\ \::/ / \:\__\ \::/ / \:\__\ \::/ / \::/ / ")
print(" \/__/ \/__/ \/__/ \/__/ \/__/ \/__/ \/__/ \/__/ ")
print(" ")
elif random_number == 2:
print(" _ _ ____ _ _ _____ _ _ ____ ____ ___ ")
print("| \ | | | _ \ | | | | |_ _| | | | | | _ \ | __ ) / _ \ ")
print("| \| | | |_) | | | | | | | | | | | | |_) | | _ \ | | | |")
print("| |\ | | __/ | |_| | | | | |_| | | _ < | |_) | | |_| |")
print("|_| \_| |_| \___/ |_| \___/ |_| \_\ |____/ \___/ ")
print(" ")
else:
print("")
print(" ) ( ( ) ")
print(" ( /( )\ ) * ) )\ ) ( ( /( ")
print(" )\()) (()/( ( ` ) /( ( (()/( ( )\ )\()) ")
print("((_)\ /(_)) )\ ( )(_)) )\ /(_)) )((_) ((_)\ ")
print(" _((_) (_)) _ ((_) (_(_()) _ ((_) (_)) ((_)_ ((_) ")
print("| \| | | _ \ | | | | |_ _| | | | | | _ \ | _ ) / _ \ ")
print("| .` | | _/ | |_| | | | | |_| | | / | _ \ | (_) | ")
print("|_|\_| |_| \___/ |_| \___/ |_|_\ |___/ \___/ ")
print(" ")



Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
# utils
import cv2
import numpy as np
from sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "..","openvino_utils","tools")])
from sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "tools")])
from tools_utils import get_weight_path import get_weight_path


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import sys

sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "openvino_common")])
sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "..","openvino_utils","tools")])
sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "..","tools")])

import cv2
from stable_diffusion_run_ov import run
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,14 @@
from pathlib import Path
import time
import random
import torch

from PIL import Image
import numpy as np
import psutil
import threading
sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "openvino_common")])
sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "..","openvino_utils","tools")])
sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "..","tools")])
from tools_utils import get_weight_path


Expand All @@ -33,6 +35,19 @@
from models_ov.controlnet_openpose_advanced import ControlNetOpenPoseAdvanced
from models_ov.controlnet_cannyedge_advanced import ControlNetCannyEdgeAdvanced

from models_ov import (
stable_diffusion_engine,
stable_diffusion_engine_inpainting,
stable_diffusion_engine_inpainting_advanced,
stable_diffusion_3,
controlnet_openpose,
controlnet_canny_edge,
controlnet_scribble,
controlnet_openpose_advanced,
controlnet_cannyedge_advanced
)


HOST = "127.0.0.1" # Standard loopback interface address (localhost)
PORT = 65432 # Port to listen on (non-privileged ports are > 1023)

Expand Down Expand Up @@ -65,6 +80,7 @@ def run(model_name, available_devices, power_mode):
"sd_1.5_inpainting_int8": ["stable-diffusion-ov", "stable-diffusion-1.5", "inpainting_int8"],
"sd_2.1_square_base": ["stable-diffusion-ov", "stable-diffusion-2.1", "square_base"],
"sd_2.1_square": ["stable-diffusion-ov", "stable-diffusion-2.1", "square"],
"sd_3.0_square": ["stable-diffusion-ov", "stable-diffusion-3.0"],
"controlnet_referenceonly": ["stable-diffusion-ov", "controlnet-referenceonly"],
"controlnet_openpose": ["stable-diffusion-ov", "controlnet-openpose"],
"controlnet_canny": ["stable-diffusion-ov", "controlnet-canny"],
Expand Down Expand Up @@ -140,35 +156,73 @@ def run(model_name, available_devices, power_mode):
break
handle_client_data(data, conn, engine, model_name, model_path, scheduler)

# def initialize_engine(model_name, model_path, device_list):
# if model_name == "sd_3.0_square":
# device_list = ["GPU"]
# log.info('Device list: %s', device_list)
# return stable_diffusion_3.StableDiffusionThreeEngine(model=model_path, device=device_list)
# if model_name == "sd_1.5_square_int8":
# log.info('Device list: %s', device_list)
# return StableDiffusionEngineAdvanced(model=model_path, device=device_list)
# if model_name == "sd_1.5_inpainting":
# return StableDiffusionEngineInpainting(model=model_path, device=device_list)
# if model_name == "sd_1.5_square_lcm":
# return LatentConsistencyEngine(model=model_path, device=device_list)
# if model_name == "sd_1.5_inpainting_int8":
# log.info('Advanced Inpainting Device list: %s', device_list)
# return StableDiffusionEngineInpaintingAdvanced(model=model_path, device=device_list)
# if model_name == "controlnet_openpose_int8":
# log.info('Device list: %s', device_list)
# return ControlNetOpenPoseAdvanced(model=model_path, device=device_list)
# if model_name == "controlnet_canny_int8":
# log.info('Device list: %s', device_list)
# return ControlNetCannyEdgeAdvanced(model=model_path, device=device_list)
# if model_name == "controlnet_scribble_int8":
# log.info('Device list: %s', device_list)
# return ControlNetScribbleAdvanced(model=model_path, device=device_list)
# if model_name == "controlnet_canny":
# return ControlNetCannyEdge(model=model_path, device=device_list)
# if model_name == "controlnet_scribble":
# return ControlNetScribble(model=model_path, device=device_list)
# if model_name == "controlnet_openpose":
# return ControlNetOpenPose(model=model_path, device=device_list)
# if model_name == "controlnet_referenceonly":
# return StableDiffusionEngineReferenceOnly(model=model_path, device=device_list)
# return StableDiffusionEngine(model=model_path, device=device_list)

def initialize_engine(model_name, model_path, device_list):
if model_name == "sd_1.5_square_int8":
log.info('Device list: %s', device_list)
return StableDiffusionEngineAdvanced(model=model_path, device=device_list)
return stable_diffusion_engine.StableDiffusionEngineAdvanced(model=model_path, device=device_list)
if model_name == "sd_3.0_square":
device_list = ["GPU"]
log.info('Device list: %s', device_list)
return stable_diffusion_3.StableDiffusionThreeEngine(model=model_path, device=device_list)
if model_name == "sd_1.5_inpainting":
return StableDiffusionEngineInpainting(model=model_path, device=device_list)
return stable_diffusion_engine_inpainting.StableDiffusionEngineInpainting(model=model_path, device=device_list)
if model_name == "sd_1.5_square_lcm":
return LatentConsistencyEngine(model=model_path, device=device_list)
return stable_diffusion_engine.LatentConsistencyEngine(model=model_path, device=device_list)
if model_name == "sd_1.5_inpainting_int8":
log.info('Advanced Inpainting Device list: %s', device_list)
return StableDiffusionEngineInpaintingAdvanced(model=model_path, device=device_list)
return stable_diffusion_engine_inpainting_advanced.StableDiffusionEngineInpaintingAdvanced(model=model_path, device=device_list)
if model_name == "controlnet_openpose_int8":
log.info('Device list: %s', device_list)
return ControlNetOpenPoseAdvanced(model=model_path, device=device_list)
return controlnet_openpose_advanced.ControlNetOpenPoseAdvanced(model=model_path, device=device_list)
if model_name == "controlnet_canny_int8":
log.info('Device list: %s', device_list)
return ControlNetCannyEdgeAdvanced(model=model_path, device=device_list)
return controlnet_canny_edge_advanced.ControlNetCannyEdgeAdvanced(model=model_path, device=device_list)
if model_name == "controlnet_scribble_int8":
log.info('Device list: %s', device_list)
return ControlNetScribbleAdvanced(model=model_path, device=device_list)
return controlnet_scribble.ControlNetScribbleAdvanced(model=model_path, device=device_list)
if model_name == "controlnet_canny":
return ControlNetCannyEdge(model=model_path, device=device_list)
return controlnet_canny_edge.ControlNetCannyEdge(model=model_path, device=device_list)
if model_name == "controlnet_scribble":
return ControlNetScribble(model=model_path, device=device_list)
return controlnet_scribble.ControlNetScribble(model=model_path, device=device_list)
if model_name == "controlnet_openpose":
return ControlNetOpenPose(model=model_path, device=device_list)
return controlnet_openpose.ControlNetOpenPose(model=model_path, device=device_list)
if model_name == "controlnet_referenceonly":
return StableDiffusionEngineReferenceOnly(model=model_path, device=device_list)
return StableDiffusionEngine(model=model_path, device=device_list)
return stable_diffusion_engine.StableDiffusionEngineReferenceOnly(model=model_path, device=device_list)
return stable_diffusion_engine.StableDiffusionEngine(model=model_path, device=device_list)

def handle_client_data(data, conn, engine, model_name, model_path, scheduler):
if data.decode() == "kill":
Expand Down Expand Up @@ -280,6 +334,18 @@ def handle_client_data(data, conn, engine, model_name, model_path, scheduler):
callback_userdata=conn,
seed=seed
)
elif "sd_3.0" in model_name:
output = engine(
prompt = prompt,
negative_prompt = negative_prompt,
num_inference_steps = num_infer_steps,
guidance_scale = 0,
generator=torch.Generator().manual_seed(seed),
callback=progress_callback,
callback_userdata=conn
#callback_on_step_end_tensor_inputs = conn,

).images[0]

else:
if model_name == "sd_2.1_square":
Expand Down Expand Up @@ -313,7 +379,7 @@ def handle_client_data(data, conn, engine, model_name, model_path, scheduler):

image = "sd_cache.png"

if "controlnet" in model_name or model_name == "sd_1.5_square_lcm":
if "controlnet" in model_name or model_name == "sd_1.5_square_lcm" or "sd_3.0" in model_name:
output.save(os.path.join(weight_path, "..", image))
src_width, src_height = output.size
else:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import sys

sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "openvino_common")])
sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "..","openvino_utils","tools")])
sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), "..","tools")])


import cv2
Expand Down
Loading