Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
3c80e4c
Update scripts to support onnx pipeline
saikrishna2893 Feb 22, 2023
af4fde7
Update inference pipelines
saikrishna2893 Mar 21, 2023
5aa54ff
Update inference scripts with latest version
saikrishna2893 Mar 23, 2023
cd779c1
Add OVRT installation requirements
saikrishna2893 Mar 23, 2023
3a1e987
Update files for inference
saikrishna2893 Mar 24, 2023
73c0c2b
Update files based on conversion scripts
saikrishna2893 Mar 29, 2023
c8a9f28
Update invoke-optimized files
saikrishna2893 Mar 29, 2023
03479b9
Update files with package imports
jenetscaria-mcw Mar 29, 2023
696b498
Update files to pure onnx
saikrishna2893 Apr 3, 2023
faeb5e3
Update files to fix model configuration
saikrishna2893 Apr 3, 2023
7c74094
Update inference files to run with generate function
saikrishna2893 Apr 5, 2023
c3086ab
Update files to fix naming conventions
saikrishna2893 Apr 6, 2023
f8a57e0
Clean up files and add fix for model name
saikrishna2893 Apr 6, 2023
46259ae
Update files based on comments
saikrishna2893 Apr 6, 2023
9bde7ac
Remove ldm/args.py file
saikrishna2893 Apr 7, 2023
3e4eecb
Update pipeline files and naming convention
saikrishna2893 Apr 19, 2023
d1f9c84
Modularize the pipeline
saikrishna2893 Apr 19, 2023
3054386
Update files based on code structuring
saikrishna2893 Apr 19, 2023
ac172fb
Update base class and args
saikrishna2893 Apr 19, 2023
1124ca1
Update files as part of modularization
saikrishna2893 Apr 20, 2023
ffc7d43
Update files to fix errors
saikrishna2893 Apr 20, 2023
9714102
Update files based on review comments
saikrishna2893 Apr 24, 2023
1b813af
Cleanup files based on comments
saikrishna2893 Apr 24, 2023
d7a8df6
Update files to create output directory
saikrishna2893 Apr 24, 2023
87ab2d5
Update files to fix pytorch execution and comments
saikrishna2893 Apr 26, 2023
441391e
Update CLI file
saikrishna2893 Apr 26, 2023
ca40cce
Cleanup of code
saikrishna2893 Apr 26, 2023
6faa09a
Update files based on review comments
saikrishna2893 Apr 27, 2023
fb07635
Update changes to readme file
saikrishna2893 May 5, 2023
d1b371b
Update changes to cross_attention based on diffusers package
saikrishna2893 May 10, 2023
d060dc9
Update version to fix the issues with compel module
saikrishna2893 May 10, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,13 @@ not supported.

8. Point your browser to http://localhost:9090 to bring up the web interface.
9. Type `banana sushi` in the box on the top left and click `Invoke`.
10. For running Onnx Pipeline need to
i. configure the utils.py in openvino installed packages at line 33 and add line "os.environ["PATH"] = os.path.abspath(lib_path) + ";" + os.environ["PATH"]"
ii. Copy models weight file into the root directory of InvokeAI.
iii. To invoke Onnx pipeline use the following flag
```terminal
invokeai --modeltype "Onnx"
```

Be sure to activate the virtual environment each time before re-launching InvokeAI,
using `source .venv/bin/activate` or `.venv\Scripts\activate`.
Expand Down
3 changes: 2 additions & 1 deletion invokeai/backend/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
"""
Initialization file for invokeai.backend
"""
from .generate import Generate
from .pytorch import Pytorch
from .onnx import ONNX
from .generator import (
InvokeAIGeneratorBasicParams,
InvokeAIGenerator,
Expand Down
7 changes: 7 additions & 0 deletions invokeai/backend/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,6 +462,13 @@ def _create_arg_parser(self):
general_group.add_argument(
"--version", "-V", action="store_true", help="Print InvokeAI version number"
)
model_group.add_argument(
"--modeltype",
dest="modelType",
default="Pytorch",
choices=['Pytorch','Onnx'],
help="Forces to use Pytorch inference by default. Choose Onnx for onnx pipeline",
)
model_group.add_argument(
"--root_dir",
default=None,
Expand Down
18 changes: 18 additions & 0 deletions invokeai/backend/inferencePipeline.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
"""
This file implements base class for Inference pipeline
Implements abstract methods for inference related operations
"""
from abc import ABC, abstractmethod
class inferencePipeline(ABC):

"""
Instantiation of Inference Pipeline class
"""

@abstractmethod
def prompt2image(self):
pass

@abstractmethod
def getCompleter(self):
pass
102 changes: 102 additions & 0 deletions invokeai/backend/onnx.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
"""
This class is derived from Inference Model class
Implements ONNX inference pipeline
"""
import traceback
import time
import sys
import os
import importlib

from diffusers import OnnxStableDiffusionPipeline

utils = importlib.import_module('openvino.utils')
utils.add_openvino_libs_to_path()

from .inferencePipeline import inferencePipeline
from ..frontend.CLI.readline import Completer, get_completer

class ONNX(inferencePipeline) :
"""
Instantiation of Onnx model class
"""
def __init__(
self,
model=None,
sampler_name="k_lms",
precision="auto",
outdir="outputs/",
num_images=1,
steps=50,
):
self.height = 512
self.width = 512
self.iterations = 1
self.steps = 50
self.sampler_name = sampler_name
self.precision = precision
self.model_type = "Onnx"
#Set precision for ONNX inference
self.outdir = outdir
self.precision = "float32"
fallback = "runwayml/stable-diffusion-v1-5"
self.model = model or fallback
self.model_name = model or fallback
self.num_images_per_prompt = num_images
self.num_inference_steps = steps

def prompt2image(
self,
prompt,
iterations=None,
steps=None,
image_callback=None,
step_callback=None,
outdir=None,
width=None,
height=None,
sampler_name=None,
model=None,
precision=None,
catch_interrupts=False,
**args,
):
steps = steps or self.steps
width = width or self.width
height = height or self.height
iterations = iterations or self.iterations
outdir = outdir or self.outdir
sampler_name = self.sampler_name or sampler_name
if not os.path.exists(outdir):
os.makedirs(outdir)
print("Output directory: ", outdir)
tic = time.time()
try:
if precision == "cpu":
onnxPipeline = OnnxStableDiffusionPipeline.from_pretrained(self.model, revision="onnx", provider="CPUExecutionProvider")
else:
onnxPipeline = OnnxStableDiffusionPipeline.from_pretrained(self.model, revision="onnx", provider="OpenVINOExecutionProvider")

image = onnxPipeline(prompt, height=height, width=width, num_images_per_prompt=iterations, num_inference_steps=steps).images[0]
timestamp = int(time.time())
image.save(f"{outdir}/Inference_{timestamp}.png")

except KeyboardInterrupt:

if catch_interrupts:
print("**Interrupted** Partial results will be returned.")
else:
raise KeyboardInterrupt
except RuntimeError:

print(traceback.format_exc(), file=sys.stderr)
print(">> Could not generate image.")
toc = time.time()
print("\n>> Usage stats:")
print(f">> image(s) generated in", "%4.2fs" % (toc - tic))

def getCompleter(self, opt):
"""
Invocation of completer
"""
return get_completer(opt, models=[])
Loading