Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Stability SDK for #405 #411

Merged
merged 42 commits into from
Dec 14, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
9313f25
Initial diffusers backend scaffolding
carson-katri Nov 20, 2022
3946ee4
Support scheduler selection
carson-katri Nov 24, 2022
eff0fd5
Add more schedulers
carson-katri Nov 24, 2022
3e6e92d
Add ocio_transform action
carson-katri Nov 24, 2022
2b8e728
Support seamless generation
carson-katri Nov 24, 2022
ee41241
Support cancellation
carson-katri Nov 24, 2022
b906bb2
Remove old intents, upscaling, actor improvements
carson-katri Nov 26, 2022
81830f5
Add tiling to stable diffusion upscaler
carson-katri Nov 27, 2022
fb07290
Add image_to_image and inpaint
carson-katri Nov 28, 2022
aa0132b
Update render_pass.py
carson-katri Nov 28, 2022
1eb8d1a
fix activating when diffusers is not installed
NullSenseStudio Nov 30, 2022
b5f273b
load dependencies sooner
NullSenseStudio Nov 30, 2022
d5b1efd
re-add other platform requirements
NullSenseStudio Dec 1, 2022
c778402
fix can_use() and cancel/stop operators
NullSenseStudio Dec 2, 2022
c44b8ac
fix non-square images
NullSenseStudio Dec 2, 2022
2af731f
Update optimization options
carson-katri Dec 4, 2022
0c61680
Add proper file naming
carson-katri Dec 4, 2022
382a209
Merge branch 'main' of github.com:carson-katri/dream-textures into di…
carson-katri Dec 4, 2022
e08eaad
Fix data block removal error
carson-katri Dec 4, 2022
eb41b22
Add upscale tile blending with the 'tiler'
carson-katri Dec 4, 2022
333baf4
Add 'tiler' to other requirements files
carson-katri Dec 4, 2022
191e009
Add upscale naming
carson-katri Dec 5, 2022
61cd13a
Add progress bar
carson-katri Dec 5, 2022
69623ee
Add progress bar to upscaling
carson-katri Dec 5, 2022
c01ba6b
Fix image2image
carson-katri Dec 5, 2022
5b5f8d3
Fix inpaint
carson-katri Dec 5, 2022
5b00151
Raise the error in the exception_callback
carson-katri Dec 6, 2022
ca934ee
Support Stability SDK
carson-katri Dec 6, 2022
fac7d13
Remove pinned version from requirements
carson-katri Dec 6, 2022
b5bb947
Pipeline switching
carson-katri Dec 6, 2022
5e24cb6
Merge branch 'diffusers' of github.com:carson-katri/dream-textures in…
carson-katri Dec 6, 2022
4b6a84d
fix upscale
NullSenseStudio Dec 6, 2022
ad2c976
Merge branch 'diffusers' of https://github.com/carson-katri/dream-tex…
NullSenseStudio Dec 6, 2022
dd19635
no_grad already used in pipe
NullSenseStudio Dec 6, 2022
7f93e4e
Re-use the same datablock
carson-katri Dec 6, 2022
fe09872
Merge branch 'diffusers' of github.com:carson-katri/dream-textures in…
carson-katri Dec 6, 2022
4ecbcd8
Merge branch 'diffusers' into stability-sdk
carson-katri Dec 6, 2022
03c2652
Merge branch 'main' of github.com:carson-katri/dream-textures into st…
carson-katri Dec 6, 2022
a8fca6c
Add image_to_image and inpaint
carson-katri Dec 13, 2022
493b484
Update render pass
carson-katri Dec 14, 2022
672489a
Render pass improvements
carson-katri Dec 14, 2022
aefbe4f
Remove incorrect stat
carson-katri Dec 14, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion __init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,6 @@ def unregister():
for tool in TOOLS:
bpy.utils.unregister_tool(tool)

# unregister_render_pass()
unregister_render_pass()

kill_generator()
42 changes: 39 additions & 3 deletions generator_process/actions/image_to_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,11 @@ def image_to_image(

step_preview_mode: StepPreviewMode,

# Stability SDK
key: str | None = None,

**kwargs
) -> Generator[NDArray, None, None]:
) -> Generator[ImageGenerationResult, None, None]:
match pipeline:
case Pipeline.STABLE_DIFFUSION:
import diffusers
Expand Down Expand Up @@ -232,7 +235,40 @@ def __call__(
step_preview_mode=step_preview_mode
)
case Pipeline.STABILITY_SDK:
import stability_sdk
raise NotImplementedError()
import stability_sdk.client
import stability_sdk.interfaces.gooseai.generation.generation_pb2
from PIL import Image, ImageOps
import io

if key is None:
raise ValueError("DreamStudio key not provided. Enter your key in the add-on preferences.")
client = stability_sdk.client.StabilityInference(key=key, engine=model)

if seed is None:
seed = random.randrange(0, np.iinfo(np.uint32).max)

answers = client.generate(
prompt=prompt,
width=width,
height=height,
cfg_scale=cfg_scale,
sampler=scheduler.stability_sdk(),
steps=steps,
seed=seed,
init_image=(Image.open(image) if isinstance(image, str) else Image.fromarray(image)).convert('RGB'),
start_schedule=strength,
)
for answer in answers:
for artifact in answer.artifacts:
if artifact.finish_reason == stability_sdk.interfaces.gooseai.generation.generation_pb2.FILTER:
raise ValueError("Your request activated DreamStudio's safety filter. Please modify your prompt and try again.")
if artifact.type == stability_sdk.interfaces.gooseai.generation.generation_pb2.ARTIFACT_IMAGE:
image = Image.open(io.BytesIO(artifact.binary))
yield ImageGenerationResult(
np.asarray(ImageOps.flip(image).convert('RGBA'), dtype=np.float32) / 255.,
seed,
steps,
True
)
case _:
raise Exception(f"Unsupported pipeline {pipeline}.")
44 changes: 42 additions & 2 deletions generator_process/actions/inpaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,9 @@ def inpaint(

step_preview_mode: StepPreviewMode,

# Stability SDK
key: str | None = None,

**kwargs
) -> Generator[NDArray, None, None]:
match pipeline:
Expand Down Expand Up @@ -272,7 +275,44 @@ def __call__(
step_preview_mode=step_preview_mode
)
case Pipeline.STABILITY_SDK:
import stability_sdk
raise NotImplementedError()
import stability_sdk.client
import stability_sdk.interfaces.gooseai.generation.generation_pb2
from PIL import Image, ImageOps
import io

if key is None:
raise ValueError("DreamStudio key not provided. Enter your key in the add-on preferences.")
client = stability_sdk.client.StabilityInference(key=key, engine=model)

if seed is None:
seed = random.randrange(0, np.iinfo(np.uint32).max)

init_image = Image.open(image) if isinstance(image, str) else Image.fromarray(image)

answers = client.generate(
prompt=prompt,
width=width,
height=height,
cfg_scale=cfg_scale,
sampler=scheduler.stability_sdk(),
steps=steps,
seed=seed,
init_image=init_image.convert('RGB'),
mask_image=init_image.getchannel('A'),
start_schedule=strength,
)
for answer in answers:
for artifact in answer.artifacts:
if artifact.finish_reason == stability_sdk.interfaces.gooseai.generation.generation_pb2.FILTER:
raise ValueError("Your request activated DreamStudio's safety filter. Please modify your prompt and try again.")
if artifact.type == stability_sdk.interfaces.gooseai.generation.generation_pb2.ARTIFACT_IMAGE:
image = Image.open(io.BytesIO(artifact.binary))
yield ImageGenerationResult(
np.asarray(ImageOps.flip(image).convert('RGBA'), dtype=np.float32) / 255.,
seed,
steps,
True
)

case _:
raise Exception(f"Unsupported pipeline {pipeline}.")
54 changes: 52 additions & 2 deletions generator_process/actions/prompt_to_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,22 @@ def scheduler_class():
return scheduler_class().from_pretrained(pretrained['model_path'], subfolder=pretrained['subfolder'])
else:
return scheduler_class().from_config(pipeline.scheduler.config)

def stability_sdk(self):
import stability_sdk.interfaces.gooseai.generation.generation_pb2
match self:
case Scheduler.LMS_DISCRETE:
return stability_sdk.interfaces.gooseai.generation.generation_pb2.SAMPLER_K_LMS
case Scheduler.DDIM:
return stability_sdk.interfaces.gooseai.generation.generation_pb2.SAMPLER_DDIM
case Scheduler.DDPM:
return stability_sdk.interfaces.gooseai.generation.generation_pb2.SAMPLER_DDPM
case Scheduler.EULER_DISCRETE:
return stability_sdk.interfaces.gooseai.generation.generation_pb2.SAMPLER_K_EULER
case Scheduler.EULER_ANCESTRAL_DISCRETE:
return stability_sdk.interfaces.gooseai.generation.generation_pb2.SAMPLER_K_EULER_ANCESTRAL
case _:
raise ValueError(f"{self} cannot be used with DreamStudio.")

@dataclass(eq=True)
class Optimizations:
Expand Down Expand Up @@ -230,6 +246,9 @@ def prompt_to_image(

step_preview_mode: StepPreviewMode,

# Stability SDK
key: str | None = None,

**kwargs
) -> Generator[ImageGenerationResult, None, None]:
match pipeline:
Expand Down Expand Up @@ -429,8 +448,39 @@ def __call__(
step_preview_mode=step_preview_mode
)
case Pipeline.STABILITY_SDK:
import stability_sdk
raise NotImplementedError()
import stability_sdk.client
import stability_sdk.interfaces.gooseai.generation.generation_pb2
from PIL import Image, ImageOps
import io

if key is None:
raise ValueError("DreamStudio key not provided. Enter your key in the add-on preferences.")
client = stability_sdk.client.StabilityInference(key=key, engine=model)

if seed is None:
seed = random.randrange(0, np.iinfo(np.uint32).max)

answers = client.generate(
prompt=prompt,
width=width,
height=height,
cfg_scale=cfg_scale,
sampler=scheduler.stability_sdk(),
steps=steps,
seed=seed
)
for answer in answers:
for artifact in answer.artifacts:
if artifact.finish_reason == stability_sdk.interfaces.gooseai.generation.generation_pb2.FILTER:
raise ValueError("Your request activated DreamStudio's safety filter. Please modify your prompt and try again.")
if artifact.type == stability_sdk.interfaces.gooseai.generation.generation_pb2.ARTIFACT_IMAGE:
image = Image.open(io.BytesIO(artifact.binary))
yield ImageGenerationResult(
np.asarray(ImageOps.flip(image).convert('RGBA'), dtype=np.float32) / 255.,
seed,
steps,
True
)
case _:
raise Exception(f"Unsupported pipeline {pipeline}.")

Expand Down
160 changes: 83 additions & 77 deletions render_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,9 @@
import cycles
import numpy as np
import os
from .generator_process.actions.prompt_to_image import Pipeline, StepPreviewMode
from .generator_process.actions.prompt_to_image import Pipeline, StepPreviewMode, ImageGenerationResult
from .generator_process import Generator
import threading

update_render_passes_original = cycles.CyclesRender.update_render_passes
render_original = cycles.CyclesRender.render
Expand Down Expand Up @@ -33,91 +34,27 @@ def render(self, depsgraph):
self.report({"ERROR"}, f"Image dimensions must be multiples of 64 (e.x. 512x512, 512x768, ...) closest is {round(size_x/64)*64}x{round(size_y/64)*64}")
return result
render_result = self.begin_result(0, 0, size_x, size_y)
for original_layer in original_result.layers:
layer = None
for layer_i in render_result.layers:
if layer_i.name == original_layer.name:
layer = layer_i
for original_render_pass in original_layer.passes:
render_pass = None
for pass_i in layer.passes:
if pass_i.name == original_render_pass.name:
render_pass = pass_i
for layer in render_result.layers:
for render_pass in layer.passes:
if render_pass.name == "Dream Textures":
self.update_stats("Dream Textures", "Starting")

# step_count = int(scene.dream_textures_render_properties_prompt.strength * scene.dream_textures_render_properties_prompt.steps)

self.update_stats("Dream Textures", "Creating temporary image")
combined_pass_image = bpy.data.images.new("dream_textures_post_processing_temp", width=size_x, height=size_y)

rect = layer.passes["Combined"].rect

combined_pixels = np.empty((size_x * size_y, 4), dtype=np.float32)
rect.foreach_get(combined_pixels)

gen = Generator.shared()
self.update_stats("Dream Textures", "Applying color management transforms")
combined_pixels = gen.ocio_transform(
combined_pixels,
config_path=os.path.join(bpy.utils.resource_path('LOCAL'), 'datafiles/colormanagement/config.ocio'),
exposure=scene.view_settings.exposure,
gamma=scene.view_settings.gamma,
view_transform=scene.view_settings.view_transform,
display_device=scene.display_settings.display_device,
look=scene.view_settings.look,
inverse=False,
_block=True
).result()

self.update_stats("Dream Textures", "Generating...")

generated_args = scene.dream_textures_render_properties_prompt.generate_args()
generated_args['step_preview_mode'] = StepPreviewMode.NONE
generated_args['width'] = size_x
generated_args['height'] = size_y
pixels = gen.image_to_image(
image=(combined_pixels.reshape((size_x, size_y, 4)) * 255).astype(np.uint8),
**generated_args,
_block=True
).result()

# Perform an inverse transform so when Blender applies its transform everything looks correct.
self.update_stats("Dream Textures", "Applying inverse color management transforms")
pixels = gen.ocio_transform(
pixels,
config_path=os.path.join(bpy.utils.resource_path('LOCAL'), 'datafiles/colormanagement/config.ocio'),
exposure=scene.view_settings.exposure,
gamma=scene.view_settings.gamma,
view_transform=scene.view_settings.view_transform,
display_device=scene.display_settings.display_device,
look=scene.view_settings.look,
inverse=True,
_block=True
).result()

reshaped = pixels.reshape((size_x * size_y, 4))
render_pass.rect.foreach_set(reshaped)

# delete pointers before closing shared memory
del pixels
del combined_pixels
del reshaped

def cleanup():
bpy.data.images.remove(combined_pass_image)
bpy.app.timers.register(cleanup)
self.update_stats("Dream Textures", "Finished")
self._render_dream_textures_pass(layer, (size_x, size_y), scene, render_pass, render_result)
else:
pixels = np.empty((len(original_render_pass.rect), len(original_render_pass.rect[0])), dtype=np.float32)
original_render_pass.rect.foreach_get(pixels)
source_pass = None
for original_layer in original_result.layers:
if layer.name == original_layer.name:
for original_pass in original_layer.passes:
if original_pass.name == render_pass.name:
source_pass = original_pass
pixels = np.empty((len(source_pass.rect), len(source_pass.rect[0])), dtype=np.float32)
source_pass.rect.foreach_get(pixels)
render_pass.rect[:] = pixels
self.end_result(render_result)
except Exception as e:
print(e)
return result
return render
cycles.CyclesRender.render = render_decorator(cycles.CyclesRender.render)
cycles.CyclesRender._render_dream_textures_pass = _render_dream_textures_pass

# def del_decorator(original):
# def del_patch(self):
Expand All @@ -132,5 +69,74 @@ def unregister_render_pass():
cycles.CyclesRender.update_render_passes = update_render_passes_original
global render_original
cycles.CyclesRender.render = render_original
del cycles.CyclesRender._render_dream_textures_pass
# global del_original
# cycles.CyclesRender.__del__ = del_original

def _render_dream_textures_pass(self, layer, size, scene, render_pass, render_result):
self.update_stats("Dream Textures", "Starting")

rect = layer.passes["Combined"].rect

combined_pixels = np.empty((size[0] * size[1], 4), dtype=np.float32)
rect.foreach_get(combined_pixels)

gen = Generator.shared()
self.update_stats("Dream Textures", "Applying color management transforms")
combined_pixels = gen.ocio_transform(
combined_pixels,
config_path=os.path.join(bpy.utils.resource_path('LOCAL'), 'datafiles/colormanagement/config.ocio'),
exposure=scene.view_settings.exposure,
gamma=scene.view_settings.gamma,
view_transform=scene.view_settings.view_transform,
display_device=scene.display_settings.display_device,
look=scene.view_settings.look,
inverse=False
).result()

self.update_stats("Dream Textures", "Generating...")

generated_args = scene.dream_textures_render_properties_prompt.generate_args()
generated_args['width'] = size[0]
generated_args['height'] = size[1]
f = gen.image_to_image(
image=np.flipud(combined_pixels.reshape((size[1], size[0], 4)) * 255).astype(np.uint8),
**generated_args
)
event = threading.Event()
def on_step(_, step: ImageGenerationResult):
if step.final:
return
self.update_progress(step.step / generated_args['steps'])
if step.image is not None:
combined_pixels = step.image
render_pass.rect.foreach_set(combined_pixels.reshape((size[0] * size[1], 4)))
self.update_result(render_result) # This does not seem to have an effect.
def on_done(future):
nonlocal combined_pixels
result = future.result()
if isinstance(result, list):
result = result[-1]
combined_pixels = result.image
event.set()
f.add_response_callback(on_step)
f.add_done_callback(on_done)
event.wait()

# Perform an inverse transform so when Blender applies its transform everything looks correct.
self.update_stats("Dream Textures", "Applying inverse color management transforms")
combined_pixels = gen.ocio_transform(
combined_pixels.reshape((size[0] * size[1], 4)),
config_path=os.path.join(bpy.utils.resource_path('LOCAL'), 'datafiles/colormanagement/config.ocio'),
exposure=scene.view_settings.exposure,
gamma=scene.view_settings.gamma,
view_transform=scene.view_settings.view_transform,
display_device=scene.display_settings.display_device,
look=scene.view_settings.look,
inverse=True
).result()

combined_pixels = combined_pixels.reshape((size[0] * size[1], 4))
render_pass.rect.foreach_set(combined_pixels)

self.update_stats("Dream Textures", "Finished")
2 changes: 1 addition & 1 deletion requirements/dreamstudio.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
stability-sdk==0.2.6
stability-sdk
opencolorio
Loading