Skip to content

Commit

Permalink
...
Browse files Browse the repository at this point in the history
  • Loading branch information
adodge committed Mar 7, 2023
1 parent 4447175 commit 85597f8
Show file tree
Hide file tree
Showing 21 changed files with 632 additions and 68 deletions.
4 changes: 2 additions & 2 deletions backend/src/nodes/impl/stable_diffusion/types.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
from comfy import StableDiffusionModel, VAEModel, CLIPModel, LatentImage, Conditioning
from comfy import CLIPModel, Conditioning, LatentImage, StableDiffusionModel, VAEModel

__all__ = [
"StableDiffusionModel",
"VAEModel",
"CLIPModel",
"LatentImage",
"Conditioning",
]
]
2 changes: 1 addition & 1 deletion backend/src/nodes/nodes/builtin_categories.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@
from .image_filter import category as ImageFilterCategory
from .image_utility import category as ImageUtilityCategory
from .material_textures import category as MaterialTexturesCategory
from .stable_diffusion import category as StableDiffusionCategory
from .ncnn import category as NCNNCategory
from .onnx import category as ONNXCategory
from .pytorch import category as PyTorchCategory
from .stable_diffusion import category as StableDiffusionCategory
from .utility import category as UtilityCategory

builtin_categories = [
Expand Down
13 changes: 7 additions & 6 deletions backend/src/nodes/nodes/stable_diffusion/clip_encode.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,25 @@
from __future__ import annotations

from typing import Tuple, Optional
from typing import Optional

from ...impl.stable_diffusion.types import CLIPModel, Conditioning

from ...node_base import NodeBase
from ...node_factory import NodeFactory
from ...properties.inputs import TextAreaInput
from ...properties.inputs.stable_diffusion_inputs import CLIPModelInput
from . import category as StableDiffusionCategory
from ...properties.outputs.stable_diffusion_outputs import ConditioningOutput
from . import category as StableDiffusionCategory


@NodeFactory.register("chainner:stable_diffusion:clip_encode")
class CLIPEncodeNode(NodeBase):
def __init__(self):
super().__init__()
self.description = ""
self.inputs = [CLIPModelInput(),
TextAreaInput("Prompt").make_optional(),]
self.inputs = [
CLIPModelInput(),
TextAreaInput("Prompt").make_optional(),
]
self.outputs = [
ConditioningOutput(),
]
Expand All @@ -31,4 +32,4 @@ def __init__(self):
def run(self, clip: CLIPModel, prompt: Optional[str]) -> Conditioning:
prompt = prompt or ""
out = clip.encode(prompt)
return out
return out
30 changes: 30 additions & 0 deletions backend/src/nodes/nodes/stable_diffusion/conditioning_compose.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
from __future__ import annotations

from ...impl.stable_diffusion.types import Conditioning
from ...node_base import NodeBase
from ...node_factory import NodeFactory
from ...properties.inputs.stable_diffusion_inputs import ConditioningInput
from ...properties.outputs.stable_diffusion_outputs import ConditioningOutput
from . import category as StableDiffusionCategory


@NodeFactory.register("chainner:stable_diffusion:conditioning_compose")
class ConditinoingComposeNode(NodeBase):
def __init__(self):
super().__init__()
self.description = ""
self.inputs = [
ConditioningInput(),
ConditioningInput(),
]
self.outputs = [
ConditioningOutput(),
]

self.category = StableDiffusionCategory
self.name = "Compose"
self.icon = "PyTorch"
self.sub = "Conditioning"

def run(self, a: Conditioning, b: Conditioning) -> Conditioning:
return Conditioning.combine([a, b])
82 changes: 82 additions & 0 deletions backend/src/nodes/nodes/stable_diffusion/conditioning_set_area.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
from __future__ import annotations

from ...impl.stable_diffusion.types import Conditioning
from ...node_base import NodeBase
from ...node_factory import NodeFactory
from ...properties.inputs import SliderInput
from ...properties.inputs.stable_diffusion_inputs import ConditioningInput
from ...properties.outputs.stable_diffusion_outputs import ConditioningOutput
from . import category as StableDiffusionCategory


@NodeFactory.register("chainner:stable_diffusion:conditioning_set_area")
class ConditinoingComposeNode(NodeBase):
def __init__(self):
super().__init__()
self.description = ""
self.inputs = [
ConditioningInput(),
SliderInput(
"strength",
minimum=0,
maximum=10,
default=1,
slider_step=0.01,
controls_step=0.01,
),
SliderInput(
"width",
unit="px",
minimum=64,
maximum=4096,
default=512,
slider_step=64,
controls_step=64,
),
SliderInput(
"height",
unit="px",
minimum=64,
maximum=4096,
default=512,
slider_step=64,
controls_step=64,
),
SliderInput(
"x",
unit="px",
minimum=64,
maximum=4096,
default=512,
slider_step=64,
controls_step=64,
),
SliderInput(
"y",
unit="px",
minimum=64,
maximum=4096,
default=512,
slider_step=64,
controls_step=64,
),
]
self.outputs = [
ConditioningOutput(),
]

self.category = StableDiffusionCategory
self.name = "Set Area"
self.icon = "PyTorch"
self.sub = "Conditioning"

def run(
self,
cond: Conditioning,
strength: float,
width: int,
height: int,
x: int,
y: int,
) -> Conditioning:
return cond.set_area(width, height, x, y, strength)
13 changes: 4 additions & 9 deletions backend/src/nodes/nodes/stable_diffusion/empty_latent_image.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,11 @@
from __future__ import annotations

from typing import Tuple, Optional


from ...impl.stable_diffusion.types import CLIPModel, Conditioning, LatentImage

from ...impl.stable_diffusion.types import LatentImage
from ...node_base import NodeBase
from ...node_factory import NodeFactory
from ...properties.inputs import TextAreaInput, SliderInput
from ...properties.inputs.stable_diffusion_inputs import CLIPModelInput
from ...properties.inputs import SliderInput
from ...properties.outputs.stable_diffusion_outputs import LatentImageOutput
from . import category as StableDiffusionCategory
from ...properties.outputs.stable_diffusion_outputs import ConditioningOutput, LatentImageOutput


@NodeFactory.register("chainner:stable_diffusion:empty_latent_image")
Expand Down Expand Up @@ -47,4 +42,4 @@ def __init__(self):

def run(self, width: int, height: int) -> LatentImage:
img = LatentImage.empty(width, height)
return img
return img
53 changes: 36 additions & 17 deletions backend/src/nodes/nodes/stable_diffusion/k_sampler.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,22 @@
from __future__ import annotations

from typing import Tuple, Optional

import comfy

from ...impl.stable_diffusion.types import StableDiffusionModel, Conditioning, LatentImage

from ...impl.stable_diffusion.types import (
Conditioning,
LatentImage,
StableDiffusionModel,
)
from ...node_base import NodeBase, group
from ...node_factory import NodeFactory
from ...properties.inputs import TextAreaInput, SliderInput, NumberInput, EnumInput
from ...properties.inputs.stable_diffusion_inputs import CLIPModelInput, LatentImageInput, ConditioningInput, \
StableDiffusionModelInput
from ...properties.inputs import EnumInput, NumberInput, SliderInput
from ...properties.inputs.stable_diffusion_inputs import (
ConditioningInput,
LatentImageInput,
StableDiffusionModelInput,
)
from ...properties.outputs.stable_diffusion_outputs import LatentImageOutput
from . import category as StableDiffusionCategory
from ...properties.outputs.stable_diffusion_outputs import ConditioningOutput, LatentImageOutput


@NodeFactory.register("chainner:stable_diffusion:k_sampler")
Expand All @@ -28,7 +32,7 @@ def __init__(self):
SliderInput(
"Denoising Strength",
minimum=0,
default=0.75,
default=1,
maximum=1,
slider_step=0.01,
controls_step=0.1,
Expand Down Expand Up @@ -60,18 +64,33 @@ def __init__(self):
]

self.category = StableDiffusionCategory
self.name = "K-Sampler"
self.name = "Sample"
self.icon = "PyTorch"
self.sub = "Latent"

def run(self, model: StableDiffusionModel, positive: Conditioning, negative: Conditioning, latent_image: LatentImage,
denoising_strength: float, seed: int, steps: int, sampler: comfy.Sampler, scheduler: comfy.Scheduler,
cfg_scale: float) -> LatentImage:

def run(
self,
model: StableDiffusionModel,
positive: Conditioning,
negative: Conditioning,
latent_image: LatentImage,
denoising_strength: float,
seed: int,
steps: int,
sampler: comfy.Sampler,
scheduler: comfy.Scheduler,
cfg_scale: float,
) -> LatentImage:
img = model.sample(
positive=positive, negative=negative, latent_image=latent_image,
seed=seed, steps=steps, cfg_scale=cfg_scale,
sampler=sampler, scheduler=scheduler, denoise_strength=denoising_strength,
positive=positive,
negative=negative,
latent_image=latent_image,
seed=seed,
steps=steps,
cfg_scale=cfg_scale,
sampler=sampler,
scheduler=scheduler,
denoise_strength=denoising_strength,
)

return img
Loading

0 comments on commit 85597f8

Please sign in to comment.