From 3365008dfe5a7a46cbe76d8ad0d7efb054617733 Mon Sep 17 00:00:00 2001
From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com>
Date: Wed, 11 Mar 2026 18:53:55 +0200
Subject: [PATCH 1/4] feat(api-nodes): add Reve Image nodes (#12848)
---
comfy_api_nodes/apis/reve.py | 68 ++++++
comfy_api_nodes/nodes_reve.py | 395 +++++++++++++++++++++++++++++++++
comfy_api_nodes/util/client.py | 12 +-
3 files changed, 474 insertions(+), 1 deletion(-)
create mode 100644 comfy_api_nodes/apis/reve.py
create mode 100644 comfy_api_nodes/nodes_reve.py
diff --git a/comfy_api_nodes/apis/reve.py b/comfy_api_nodes/apis/reve.py
new file mode 100644
index 000000000000..c6b5a69d86b7
--- /dev/null
+++ b/comfy_api_nodes/apis/reve.py
@@ -0,0 +1,68 @@
+from pydantic import BaseModel, Field
+
+
+class RevePostprocessingOperation(BaseModel):
+ process: str = Field(..., description="The postprocessing operation: upscale or remove_background.")
+ upscale_factor: int | None = Field(
+ None,
+ description="Upscale factor (2, 3, or 4). Only used when process is upscale.",
+ ge=2,
+ le=4,
+ )
+
+
+class ReveImageCreateRequest(BaseModel):
+ prompt: str = Field(...)
+ aspect_ratio: str | None = Field(...)
+ version: str = Field(...)
+ test_time_scaling: int = Field(
+ ...,
+ description="If included, the model will spend more effort making better images. Values between 1 and 15.",
+ ge=1,
+ le=15,
+ )
+ postprocessing: list[RevePostprocessingOperation] | None = Field(
+ None, description="Optional postprocessing operations to apply after generation."
+ )
+
+
+class ReveImageEditRequest(BaseModel):
+ edit_instruction: str = Field(...)
+ reference_image: str = Field(..., description="A base64 encoded image to use as reference for the edit.")
+ aspect_ratio: str | None = Field(...)
+ version: str = Field(...)
+ test_time_scaling: int | None = Field(
+ ...,
+ description="If included, the model will spend more effort making better images. Values between 1 and 15.",
+ ge=1,
+ le=15,
+ )
+ postprocessing: list[RevePostprocessingOperation] | None = Field(
+ None, description="Optional postprocessing operations to apply after generation."
+ )
+
+
+class ReveImageRemixRequest(BaseModel):
+ prompt: str = Field(...)
+ reference_images: list[str] = Field(..., description="A list of 1-6 base64 encoded reference images.")
+ aspect_ratio: str | None = Field(...)
+ version: str = Field(...)
+ test_time_scaling: int | None = Field(
+ ...,
+ description="If included, the model will spend more effort making better images. Values between 1 and 15.",
+ ge=1,
+ le=15,
+ )
+ postprocessing: list[RevePostprocessingOperation] | None = Field(
+ None, description="Optional postprocessing operations to apply after generation."
+ )
+
+
+class ReveImageResponse(BaseModel):
+ image: str | None = Field(None, description="The base64 encoded image data.")
+ request_id: str | None = Field(None, description="A unique id for the request.")
+ credits_used: float | None = Field(None, description="The number of credits used for this request.")
+ version: str | None = Field(None, description="The specific model version used.")
+ content_violation: bool | None = Field(
+ None, description="Indicates whether the generated image violates the content policy."
+ )
diff --git a/comfy_api_nodes/nodes_reve.py b/comfy_api_nodes/nodes_reve.py
new file mode 100644
index 000000000000..608d9f058732
--- /dev/null
+++ b/comfy_api_nodes/nodes_reve.py
@@ -0,0 +1,395 @@
+from io import BytesIO
+
+from typing_extensions import override
+
+from comfy_api.latest import IO, ComfyExtension, Input
+from comfy_api_nodes.apis.reve import (
+ ReveImageCreateRequest,
+ ReveImageEditRequest,
+ ReveImageRemixRequest,
+ RevePostprocessingOperation,
+)
+from comfy_api_nodes.util import (
+ ApiEndpoint,
+ bytesio_to_image_tensor,
+ sync_op_raw,
+ tensor_to_base64_string,
+ validate_string,
+)
+
+
+def _build_postprocessing(upscale: dict, remove_background: bool) -> list[RevePostprocessingOperation] | None:
+ ops = []
+ if upscale["upscale"] == "enabled":
+ ops.append(
+ RevePostprocessingOperation(
+ process="upscale",
+ upscale_factor=upscale["upscale_factor"],
+ )
+ )
+ if remove_background:
+ ops.append(RevePostprocessingOperation(process="remove_background"))
+ return ops or None
+
+
+def _postprocessing_inputs():
+ return [
+ IO.DynamicCombo.Input(
+ "upscale",
+ options=[
+ IO.DynamicCombo.Option("disabled", []),
+ IO.DynamicCombo.Option(
+ "enabled",
+ [
+ IO.Int.Input(
+ "upscale_factor",
+ default=2,
+ min=2,
+ max=4,
+ step=1,
+ tooltip="Upscale factor (2x, 3x, or 4x).",
+ ),
+ ],
+ ),
+ ],
+ tooltip="Upscale the generated image. May add additional cost.",
+ ),
+ IO.Boolean.Input(
+ "remove_background",
+ default=False,
+ tooltip="Remove the background from the generated image. May add additional cost.",
+ ),
+ ]
+
+
+def _reve_price_extractor(headers: dict) -> float | None:
+ credits_used = headers.get("x-reve-credits-used")
+ if credits_used is not None:
+ return float(credits_used) / 524.48
+ return None
+
+
+def _reve_response_header_validator(headers: dict) -> None:
+ error_code = headers.get("x-reve-error-code")
+ if error_code:
+ raise ValueError(f"Reve API error: {error_code}")
+ if headers.get("x-reve-content-violation", "").lower() == "true":
+ raise ValueError("The generated image was flagged for content policy violation.")
+
+
+def _model_inputs(versions: list[str], aspect_ratios: list[str]):
+ return [
+ IO.DynamicCombo.Option(
+ version,
+ [
+ IO.Combo.Input(
+ "aspect_ratio",
+ options=aspect_ratios,
+ tooltip="Aspect ratio of the output image.",
+ ),
+ IO.Int.Input(
+ "test_time_scaling",
+ default=1,
+ min=1,
+ max=5,
+ step=1,
+ tooltip="Higher values produce better images but cost more credits.",
+ advanced=True,
+ ),
+ ],
+ )
+ for version in versions
+ ]
+
+
+class ReveImageCreateNode(IO.ComfyNode):
+
+ @classmethod
+ def define_schema(cls):
+ return IO.Schema(
+ node_id="ReveImageCreateNode",
+ display_name="Reve Image Create",
+ category="api node/image/Reve",
+ description="Generate images from text descriptions using Reve.",
+ inputs=[
+ IO.String.Input(
+ "prompt",
+ multiline=True,
+ default="",
+ tooltip="Text description of the desired image. Maximum 2560 characters.",
+ ),
+ IO.DynamicCombo.Input(
+ "model",
+ options=_model_inputs(
+ ["reve-create@20250915"],
+ aspect_ratios=["3:2", "16:9", "9:16", "2:3", "4:3", "3:4", "1:1"],
+ ),
+ tooltip="Model version to use for generation.",
+ ),
+ *_postprocessing_inputs(),
+ IO.Int.Input(
+ "seed",
+ default=0,
+ min=0,
+ max=2147483647,
+ control_after_generate=True,
+ tooltip="Seed controls whether the node should re-run; "
+ "results are non-deterministic regardless of seed.",
+ ),
+ ],
+ outputs=[IO.Image.Output()],
+ hidden=[
+ IO.Hidden.auth_token_comfy_org,
+ IO.Hidden.api_key_comfy_org,
+ IO.Hidden.unique_id,
+ ],
+ is_api_node=True,
+ price_badge=IO.PriceBadge(
+ expr="""{"type":"usd","usd":0.03432,"format":{"approximate":true,"note":"(base)"}}""",
+ ),
+ )
+
+ @classmethod
+ async def execute(
+ cls,
+ prompt: str,
+ model: dict,
+ upscale: dict,
+ remove_background: bool,
+ seed: int,
+ ) -> IO.NodeOutput:
+ validate_string(prompt, min_length=1, max_length=2560)
+ response = await sync_op_raw(
+ cls,
+ ApiEndpoint(
+ path="/proxy/reve/v1/image/create",
+ method="POST",
+ headers={"Accept": "image/webp"},
+ ),
+ as_binary=True,
+ price_extractor=_reve_price_extractor,
+ response_header_validator=_reve_response_header_validator,
+ data=ReveImageCreateRequest(
+ prompt=prompt,
+ aspect_ratio=model["aspect_ratio"],
+ version=model["model"],
+ test_time_scaling=model["test_time_scaling"],
+ postprocessing=_build_postprocessing(upscale, remove_background),
+ ),
+ )
+ return IO.NodeOutput(bytesio_to_image_tensor(BytesIO(response)))
+
+
+class ReveImageEditNode(IO.ComfyNode):
+
+ @classmethod
+ def define_schema(cls):
+ return IO.Schema(
+ node_id="ReveImageEditNode",
+ display_name="Reve Image Edit",
+ category="api node/image/Reve",
+ description="Edit images using natural language instructions with Reve.",
+ inputs=[
+ IO.Image.Input("image", tooltip="The image to edit."),
+ IO.String.Input(
+ "edit_instruction",
+ multiline=True,
+ default="",
+ tooltip="Text description of how to edit the image. Maximum 2560 characters.",
+ ),
+ IO.DynamicCombo.Input(
+ "model",
+ options=_model_inputs(
+ ["reve-edit@20250915", "reve-edit-fast@20251030"],
+ aspect_ratios=["auto", "16:9", "9:16", "3:2", "2:3", "4:3", "3:4", "1:1"],
+ ),
+ tooltip="Model version to use for editing.",
+ ),
+ *_postprocessing_inputs(),
+ IO.Int.Input(
+ "seed",
+ default=0,
+ min=0,
+ max=2147483647,
+ control_after_generate=True,
+ tooltip="Seed controls whether the node should re-run; "
+ "results are non-deterministic regardless of seed.",
+ ),
+ ],
+ outputs=[IO.Image.Output()],
+ hidden=[
+ IO.Hidden.auth_token_comfy_org,
+ IO.Hidden.api_key_comfy_org,
+ IO.Hidden.unique_id,
+ ],
+ is_api_node=True,
+ price_badge=IO.PriceBadge(
+ depends_on=IO.PriceBadgeDepends(
+ widgets=["model"],
+ ),
+ expr="""
+ (
+ $isFast := $contains(widgets.model, "fast");
+ $base := $isFast ? 0.01001 : 0.0572;
+ {"type": "usd", "usd": $base, "format": {"approximate": true, "note": "(base)"}}
+ )
+ """,
+ ),
+ )
+
+ @classmethod
+ async def execute(
+ cls,
+ image: Input.Image,
+ edit_instruction: str,
+ model: dict,
+ upscale: dict,
+ remove_background: bool,
+ seed: int,
+ ) -> IO.NodeOutput:
+ validate_string(edit_instruction, min_length=1, max_length=2560)
+ tts = model["test_time_scaling"]
+ ar = model["aspect_ratio"]
+ response = await sync_op_raw(
+ cls,
+ ApiEndpoint(
+ path="/proxy/reve/v1/image/edit",
+ method="POST",
+ headers={"Accept": "image/webp"},
+ ),
+ as_binary=True,
+ price_extractor=_reve_price_extractor,
+ response_header_validator=_reve_response_header_validator,
+ data=ReveImageEditRequest(
+ edit_instruction=edit_instruction,
+ reference_image=tensor_to_base64_string(image),
+ aspect_ratio=ar if ar != "auto" else None,
+ version=model["model"],
+ test_time_scaling=tts if tts and tts > 1 else None,
+ postprocessing=_build_postprocessing(upscale, remove_background),
+ ),
+ )
+ return IO.NodeOutput(bytesio_to_image_tensor(BytesIO(response)))
+
+
+class ReveImageRemixNode(IO.ComfyNode):
+
+ @classmethod
+ def define_schema(cls):
+ return IO.Schema(
+ node_id="ReveImageRemixNode",
+ display_name="Reve Image Remix",
+ category="api node/image/Reve",
+ description="Combine reference images with text prompts to create new images using Reve.",
+ inputs=[
+ IO.Autogrow.Input(
+ "reference_images",
+ template=IO.Autogrow.TemplatePrefix(
+ IO.Image.Input("image"),
+ prefix="image_",
+ min=1,
+ max=6,
+ ),
+ ),
+ IO.String.Input(
+ "prompt",
+ multiline=True,
+ default="",
+ tooltip="Text description of the desired image. "
+ "May include XML img tags to reference specific images by index, "
+ "e.g.
0,
1, etc.",
+ ),
+ IO.DynamicCombo.Input(
+ "model",
+ options=_model_inputs(
+ ["reve-remix@20250915", "reve-remix-fast@20251030"],
+ aspect_ratios=["auto", "16:9", "9:16", "3:2", "2:3", "4:3", "3:4", "1:1"],
+ ),
+ tooltip="Model version to use for remixing.",
+ ),
+ *_postprocessing_inputs(),
+ IO.Int.Input(
+ "seed",
+ default=0,
+ min=0,
+ max=2147483647,
+ control_after_generate=True,
+ tooltip="Seed controls whether the node should re-run; "
+ "results are non-deterministic regardless of seed.",
+ ),
+ ],
+ outputs=[IO.Image.Output()],
+ hidden=[
+ IO.Hidden.auth_token_comfy_org,
+ IO.Hidden.api_key_comfy_org,
+ IO.Hidden.unique_id,
+ ],
+ is_api_node=True,
+ price_badge=IO.PriceBadge(
+ depends_on=IO.PriceBadgeDepends(
+ widgets=["model"],
+ ),
+ expr="""
+ (
+ $isFast := $contains(widgets.model, "fast");
+ $base := $isFast ? 0.01001 : 0.0572;
+ {"type": "usd", "usd": $base, "format": {"approximate": true, "note": "(base)"}}
+ )
+ """,
+ ),
+ )
+
+ @classmethod
+ async def execute(
+ cls,
+ reference_images: IO.Autogrow.Type,
+ prompt: str,
+ model: dict,
+ upscale: dict,
+ remove_background: bool,
+ seed: int,
+ ) -> IO.NodeOutput:
+ validate_string(prompt, min_length=1, max_length=2560)
+ if not reference_images:
+ raise ValueError("At least one reference image is required.")
+ ref_base64_list = []
+ for key in reference_images:
+ ref_base64_list.append(tensor_to_base64_string(reference_images[key]))
+ if len(ref_base64_list) > 6:
+ raise ValueError("Maximum 6 reference images are allowed.")
+ tts = model["test_time_scaling"]
+ ar = model["aspect_ratio"]
+ response = await sync_op_raw(
+ cls,
+ ApiEndpoint(
+ path="/proxy/reve/v1/image/remix",
+ method="POST",
+ headers={"Accept": "image/webp"},
+ ),
+ as_binary=True,
+ price_extractor=_reve_price_extractor,
+ response_header_validator=_reve_response_header_validator,
+ data=ReveImageRemixRequest(
+ prompt=prompt,
+ reference_images=ref_base64_list,
+ aspect_ratio=ar if ar != "auto" else None,
+ version=model["model"],
+ test_time_scaling=tts if tts and tts > 1 else None,
+ postprocessing=_build_postprocessing(upscale, remove_background),
+ ),
+ )
+ return IO.NodeOutput(bytesio_to_image_tensor(BytesIO(response)))
+
+
+class ReveExtension(ComfyExtension):
+ @override
+ async def get_node_list(self) -> list[type[IO.ComfyNode]]:
+ return [
+ ReveImageCreateNode,
+ ReveImageEditNode,
+ ReveImageRemixNode,
+ ]
+
+
+async def comfy_entrypoint() -> ReveExtension:
+ return ReveExtension()
diff --git a/comfy_api_nodes/util/client.py b/comfy_api_nodes/util/client.py
index 79ffb77c14a6..9d730b81a4da 100644
--- a/comfy_api_nodes/util/client.py
+++ b/comfy_api_nodes/util/client.py
@@ -67,6 +67,7 @@ class _RequestConfig:
progress_origin_ts: float | None = None
price_extractor: Callable[[dict[str, Any]], float | None] | None = None
is_rate_limited: Callable[[int, Any], bool] | None = None
+ response_header_validator: Callable[[dict[str, str]], None] | None = None
@dataclass
@@ -202,11 +203,13 @@ async def sync_op_raw(
monitor_progress: bool = True,
max_retries_on_rate_limit: int = 16,
is_rate_limited: Callable[[int, Any], bool] | None = None,
+ response_header_validator: Callable[[dict[str, str]], None] | None = None,
) -> dict[str, Any] | bytes:
"""
Make a single network request.
- If as_binary=False (default): returns JSON dict (or {'_raw': ''} if non-JSON).
- If as_binary=True: returns bytes.
+ - response_header_validator: optional callback receiving response headers dict
"""
if isinstance(data, BaseModel):
data = data.model_dump(exclude_none=True)
@@ -232,6 +235,7 @@ async def sync_op_raw(
price_extractor=price_extractor,
max_retries_on_rate_limit=max_retries_on_rate_limit,
is_rate_limited=is_rate_limited,
+ response_header_validator=response_header_validator,
)
return await _request_base(cfg, expect_binary=as_binary)
@@ -769,6 +773,12 @@ async def _monitor(stop_evt: asyncio.Event, start_ts: float):
cfg.node_cls, cfg.wait_label, int(now - start_time), cfg.estimated_total
)
bytes_payload = bytes(buff)
+ resp_headers = {k.lower(): v for k, v in resp.headers.items()}
+ if cfg.price_extractor:
+ with contextlib.suppress(Exception):
+ extracted_price = cfg.price_extractor(resp_headers)
+ if cfg.response_header_validator:
+ cfg.response_header_validator(resp_headers)
operation_succeeded = True
final_elapsed_seconds = int(time.monotonic() - start_time)
request_logger.log_request_response(
@@ -776,7 +786,7 @@ async def _monitor(stop_evt: asyncio.Event, start_ts: float):
request_method=method,
request_url=url,
response_status_code=resp.status,
- response_headers=dict(resp.headers),
+ response_headers=resp_headers,
response_content=bytes_payload,
)
return bytes_payload
From 4f4f8659c205069f74da8ac47378a5b1c0e142ca Mon Sep 17 00:00:00 2001
From: Adi Borochov <58855640+adiborochov@users.noreply.github.com>
Date: Wed, 11 Mar 2026 19:04:13 +0200
Subject: [PATCH 2/4] fix: guard torch.AcceleratorError for compatibility with
torch < 2.8.0 (#12874)
* fix: guard torch.AcceleratorError for compatibility with torch < 2.8.0
torch.AcceleratorError was introduced in PyTorch 2.8.0. Accessing it
directly raises AttributeError on older versions. Use a try/except
fallback at module load time, consistent with the existing pattern used
for OOM_EXCEPTION.
* fix: address review feedback for AcceleratorError compat
- Fall back to RuntimeError instead of type(None) for ACCELERATOR_ERROR,
consistent with OOM_EXCEPTION fallback pattern and valid for except clauses
- Add "out of memory" message introspection for RuntimeError fallback case
- Use RuntimeError directly in discard_cuda_async_error except clause
---------
---
comfy/model_management.py | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/comfy/model_management.py b/comfy/model_management.py
index 81550c790409..81c89b180a0f 100644
--- a/comfy/model_management.py
+++ b/comfy/model_management.py
@@ -270,10 +270,15 @@ def mac_version():
except:
OOM_EXCEPTION = Exception
+try:
+ ACCELERATOR_ERROR = torch.AcceleratorError
+except AttributeError:
+ ACCELERATOR_ERROR = RuntimeError
+
def is_oom(e):
if isinstance(e, OOM_EXCEPTION):
return True
- if isinstance(e, torch.AcceleratorError) and getattr(e, 'error_code', None) == 2:
+ if isinstance(e, ACCELERATOR_ERROR) and (getattr(e, 'error_code', None) == 2 or "out of memory" in str(e).lower()):
discard_cuda_async_error()
return True
return False
@@ -1275,7 +1280,7 @@ def discard_cuda_async_error():
b = torch.tensor([1], dtype=torch.uint8, device=get_torch_device())
_ = a + b
synchronize()
- except torch.AcceleratorError:
+ except RuntimeError:
#Dump it! We already know about it from the synchronous return
pass
From f6274c06b4e7bce8adbc1c60ae5a4c168825a614 Mon Sep 17 00:00:00 2001
From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com>
Date: Wed, 11 Mar 2026 13:37:31 -0700
Subject: [PATCH 3/4] Fix issue with batch_size > 1 on some models. (#12892)
---
comfy/ldm/flux/layers.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/comfy/ldm/flux/layers.py b/comfy/ldm/flux/layers.py
index e20d498f8528..e28d704b4352 100644
--- a/comfy/ldm/flux/layers.py
+++ b/comfy/ldm/flux/layers.py
@@ -144,9 +144,9 @@ def apply_mod(tensor, m_mult, m_add=None, modulation_dims=None):
return tensor * m_mult
else:
for d in modulation_dims:
- tensor[:, d[0]:d[1]] *= m_mult[:, d[2]]
+ tensor[:, d[0]:d[1]] *= m_mult[:, d[2]:d[2] + 1]
if m_add is not None:
- tensor[:, d[0]:d[1]] += m_add[:, d[2]]
+ tensor[:, d[0]:d[1]] += m_add[:, d[2]:d[2] + 1]
return tensor
From abc87d36693b007bdbdab5ee753ccea6326acb34 Mon Sep 17 00:00:00 2001
From: Comfy Org PR Bot
Date: Thu, 12 Mar 2026 06:04:51 +0900
Subject: [PATCH 4/4] Bump comfyui-frontend-package to 1.41.15 (#12891)
---------
Co-authored-by: Alexander Brown
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 89cd994e95f0..ffa5fa37696a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-comfyui-frontend-package==1.39.19
+comfyui-frontend-package==1.41.15
comfyui-workflow-templates==0.9.18
comfyui-embedded-docs==0.4.3
torch