Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1,240 changes: 620 additions & 620 deletions dream_layer_backend/dream_layer.py

Large diffs are not rendered by default.

19 changes: 19 additions & 0 deletions dream_layer_backend/img2img_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@
from img2img_workflow import transform_to_img2img_workflow
from shared_utils import COMFY_API_URL
from dream_layer_backend_utils.fetch_advanced_models import get_controlnet_models
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from run_registry import get_registry
from run_registry import create_run_config_from_generation_data
from dataclasses import asdict

Expand Down Expand Up @@ -181,6 +184,22 @@ def handle_img2img():
logger.info(f" Subfolder: {img.get('subfolder', 'None')}")
logger.info(f" URL: {img.get('url')}")

# Save run to registry
try:
registry = get_registry()
run_config = {
**data,
'generation_type': 'img2img',
'workflow': workflow,
'workflow_version': '1.0.0',
'output_images': comfy_response.get("all_images", [])
}
run_id = registry.save_run(run_config)
logger.info(f"✅ Run saved with ID: {run_id}")
except Exception as save_error:
logger.warning(f"⚠️ Failed to save run: {str(save_error)}")
# Don't fail the request if saving fails
=======
Comment on lines +187 to +202
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Fix syntax error in try-except block

There's a syntax error at line 202 where the try block is not properly closed. The ======= appears to be a merge conflict marker that wasn't resolved.

         # Save run to registry
         try:
             registry = get_registry()
             run_config = {
                 **data,
                 'generation_type': 'img2img',
                 'workflow': workflow,
                 'workflow_version': '1.0.0',
                 'output_images': comfy_response.get("all_images", [])
             }
             run_id = registry.save_run(run_config)
             logger.info(f"✅ Run saved with ID: {run_id}")
         except Exception as save_error:
             logger.warning(f"⚠️ Failed to save run: {str(save_error)}")
             # Don't fail the request if saving fails
-=======
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
# Save run to registry
try:
registry = get_registry()
run_config = {
**data,
'generation_type': 'img2img',
'workflow': workflow,
'workflow_version': '1.0.0',
'output_images': comfy_response.get("all_images", [])
}
run_id = registry.save_run(run_config)
logger.info(f"✅ Run saved with ID: {run_id}")
except Exception as save_error:
logger.warning(f"⚠️ Failed to save run: {str(save_error)}")
# Don't fail the request if saving fails
=======
# Save run to registry
try:
registry = get_registry()
run_config = {
**data,
'generation_type': 'img2img',
'workflow': workflow,
'workflow_version': '1.0.0',
'output_images': comfy_response.get("all_images", [])
}
run_id = registry.save_run(run_config)
logger.info(f"✅ Run saved with ID: {run_id}")
except Exception as save_error:
logger.warning(f"⚠️ Failed to save run: {str(save_error)}")
# Don't fail the request if saving fails
🧰 Tools
🪛 Ruff (0.12.2)

202-202: SyntaxError: Expected except or finally after try block


202-202: SyntaxError: Expected a statement


202-202: SyntaxError: Expected a statement


202-202: SyntaxError: Expected a statement

🤖 Prompt for AI Agents
In dream_layer_backend/img2img_server.py around lines 187 to 202 there is an
unresolved merge conflict marker ("=======") inside the try/except that breaks
syntax; remove the conflict marker and any surrounding conflict markers (e.g.
<<<<<<<, >>>>>>>) so the try block and except block are properly closed and
indented, ensuring the except body (logging the save_error) remains and
execution continues as intended; then run a quick lint or run the module to
confirm no syntax errors remain.

# Extract generated image filenames
generated_images = []
if comfy_response.get("generated_images"):
Expand Down
Empty file.
168 changes: 168 additions & 0 deletions dream_layer_backend/test_run_registry_demo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
#!/usr/bin/env python3
"""
Demo script to test the Run Registry functionality
This simulates what happens when a generation completes
"""

import json
import time
from run_registry import get_registry

def simulate_txt2img_generation():
"""Simulate a text-to-image generation and save to registry"""

# Simulate the config that would come from a real generation
generation_config = {
'prompt': 'A majestic mountain landscape at sunset, highly detailed, 8k',
'negative_prompt': 'blurry, low quality, distorted',
'model': 'stable-diffusion-xl-base-1.0',
'vae': 'sdxl-vae-fp16-fix',
'loras': [
{'name': 'detail-enhancer', 'strength': 0.7},
{'name': 'landscape-style', 'strength': 0.5}
],
'controlnet': {
'enabled': False,
'model': None
},
'seed': 2024,
'sampler': 'DPM++ 2M Karras',
'scheduler': 'karras',
'steps': 30,
'cfg_scale': 7.5,
'width': 1024,
'height': 1024,
'batch_size': 1,
'generation_type': 'txt2img',
'workflow': {
'name': 'txt2img_workflow',
'nodes': ['KSampler', 'VAEDecode', 'SaveImage']
},
'workflow_version': '1.2.0'
}

# Save to registry (this is what txt2img_server.py does)
registry = get_registry()
run_id = registry.save_run(generation_config)

print(f"✅ Saved txt2img generation run: {run_id}")
return run_id

def simulate_img2img_generation():
"""Simulate an image-to-image generation and save to registry"""

generation_config = {
'prompt': 'Transform to cyberpunk style, neon lights',
'negative_prompt': 'realistic, photographic',
'model': 'dreamshaper-8',
'vae': 'vae-ft-mse-840000',
'loras': [
{'name': 'cyberpunk-style', 'strength': 0.9}
],
'controlnet': {
'enabled': True,
'model': 'control_v11p_sd15_canny',
'strength': 0.75
},
'seed': -1, # Random seed
'sampler': 'Euler a',
'steps': 25,
'cfg_scale': 8.0,
'width': 512,
'height': 768,
'denoising_strength': 0.65,
'generation_type': 'img2img',
'workflow': {
'name': 'img2img_workflow',
'nodes': ['LoadImage', 'KSampler', 'VAEDecode', 'SaveImage']
},
'workflow_version': '1.1.0'
}

registry = get_registry()
run_id = registry.save_run(generation_config)

print(f"✅ Saved img2img generation run: {run_id}")
return run_id

def test_registry_operations():
"""Test various registry operations"""

registry = get_registry()

# Clear any existing runs for a clean test
registry.clear_all_runs()
print("🧹 Cleared all existing runs\n")

# Simulate multiple generations
print("📝 Simulating generation runs...")
txt2img_id = simulate_txt2img_generation()
time.sleep(0.1) # Small delay to ensure different timestamps
img2img_id = simulate_img2img_generation()

# Add a few more for testing pagination
for i in range(3):
config = {
'prompt': f'Test prompt {i+1}',
'model': f'test-model-{i+1}',
'generation_type': 'txt2img' if i % 2 == 0 else 'img2img',
'seed': 1000 + i,
'steps': 20 + i
}
registry.save_run(config)
print(f"✅ Saved test run {i+1}")
Comment on lines +104 to +113
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

issue (code-quality): Avoid loops in tests. (no-loop-in-tests)

ExplanationAvoid complex code, like loops, in test functions.

Google's software engineering guidelines says:
"Clear tests are trivially correct upon inspection"
To reach that avoid complex code in tests:

  • loops
  • conditionals

Some ways to fix this:

  • Use parametrized tests to get rid of the loop.
  • Move the complex logic into helpers.
  • Move the complex part into pytest fixtures.

Complexity is most often introduced in the form of logic. Logic is defined via the imperative parts of programming languages such as operators, loops, and conditionals. When a piece of code contains logic, you need to do a bit of mental computation to determine its result instead of just reading it off of the screen. It doesn't take much logic to make a test more difficult to reason about.

Software Engineering at Google / Don't Put Logic in Tests


print("\n📊 Testing retrieval operations...")

# Test getting all runs
all_runs = registry.get_runs(limit=10)
print(f"Total runs in registry: {len(all_runs)}")

# Display run summaries
print("\n📋 Run summaries:")
for run in all_runs[:3]: # Show first 3
print(f" - ID: {run['id'][:8]}...")
print(f" Prompt: {run['prompt'][:50]}...")
print(f" Model: {run['model']}")
print(f" Type: {run['generation_type']}")
print()
Comment on lines +123 to +128
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

issue (code-quality): Avoid loops in tests. (no-loop-in-tests)

ExplanationAvoid complex code, like loops, in test functions.

Google's software engineering guidelines says:
"Clear tests are trivially correct upon inspection"
To reach that avoid complex code in tests:

  • loops
  • conditionals

Some ways to fix this:

  • Use parametrized tests to get rid of the loop.
  • Move the complex logic into helpers.
  • Move the complex part into pytest fixtures.

Complexity is most often introduced in the form of logic. Logic is defined via the imperative parts of programming languages such as operators, loops, and conditionals. When a piece of code contains logic, you need to do a bit of mental computation to determine its result instead of just reading it off of the screen. It doesn't take much logic to make a test more difficult to reason about.

Software Engineering at Google / Don't Put Logic in Tests


# Test getting specific run details
print("🔍 Testing detailed run retrieval...")
detailed_run = registry.get_run(txt2img_id)
if detailed_run:
print(f"Retrieved run {txt2img_id[:8]}...")
print(f" - Prompt: {detailed_run['prompt']}")
print(f" - Sampler: {detailed_run['sampler']}")
print(f" - Steps: {detailed_run['steps']}")
print(f" - CFG Scale: {detailed_run['cfg_scale']}")
print(f" - LoRAs: {len(detailed_run.get('loras', []))} loaded")
Comment on lines +133 to +139
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

issue (code-quality): Avoid conditionals in tests. (no-conditionals-in-tests)

ExplanationAvoid complex code, like conditionals, in test functions.

Google's software engineering guidelines says:
"Clear tests are trivially correct upon inspection"
To reach that avoid complex code in tests:

  • loops
  • conditionals

Some ways to fix this:

  • Use parametrized tests to get rid of the loop.
  • Move the complex logic into helpers.
  • Move the complex part into pytest fixtures.

Complexity is most often introduced in the form of logic. Logic is defined via the imperative parts of programming languages such as operators, loops, and conditionals. When a piece of code contains logic, you need to do a bit of mental computation to determine its result instead of just reading it off of the screen. It doesn't take much logic to make a test more difficult to reason about.

Software Engineering at Google / Don't Put Logic in Tests


# Test deletion
print("\n🗑️ Testing deletion...")
success = registry.delete_run(img2img_id)
if success:
print(f"Successfully deleted run {img2img_id[:8]}...")
Comment on lines +144 to +145
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

issue (code-quality): Avoid conditionals in tests. (no-conditionals-in-tests)

ExplanationAvoid complex code, like conditionals, in test functions.

Google's software engineering guidelines says:
"Clear tests are trivially correct upon inspection"
To reach that avoid complex code in tests:

  • loops
  • conditionals

Some ways to fix this:

  • Use parametrized tests to get rid of the loop.
  • Move the complex logic into helpers.
  • Move the complex part into pytest fixtures.

Complexity is most often introduced in the form of logic. Logic is defined via the imperative parts of programming languages such as operators, loops, and conditionals. When a piece of code contains logic, you need to do a bit of mental computation to determine its result instead of just reading it off of the screen. It doesn't take much logic to make a test more difficult to reason about.

Software Engineering at Google / Don't Put Logic in Tests


# Verify deletion
deleted_run = registry.get_run(img2img_id)
if deleted_run is None:
print("✅ Deletion verified - run no longer exists")
Comment on lines +149 to +150
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

issue (code-quality): Avoid conditionals in tests. (no-conditionals-in-tests)

ExplanationAvoid complex code, like conditionals, in test functions.

Google's software engineering guidelines says:
"Clear tests are trivially correct upon inspection"
To reach that avoid complex code in tests:

  • loops
  • conditionals

Some ways to fix this:

  • Use parametrized tests to get rid of the loop.
  • Move the complex logic into helpers.
  • Move the complex part into pytest fixtures.

Complexity is most often introduced in the form of logic. Logic is defined via the imperative parts of programming languages such as operators, loops, and conditionals. When a piece of code contains logic, you need to do a bit of mental computation to determine its result instead of just reading it off of the screen. It doesn't take much logic to make a test more difficult to reason about.

Software Engineering at Google / Don't Put Logic in Tests


# Final count
final_runs = registry.get_runs(limit=10)
print(f"\n📈 Final run count: {len(final_runs)}")

print("\n✨ All tests completed successfully!")
print("\nThe Run Registry is working correctly and ready for use.")
print("You can now:")
print(" 1. Start the backend server to expose the API endpoints")
print(" 2. Start the frontend to see the UI at /runs")
print(" 3. Generate images to automatically save runs")

if __name__ == "__main__":
print("=" * 60)
print("RUN REGISTRY FUNCTIONALITY TEST")
print("=" * 60)
print()
test_registry_operations()
21 changes: 21 additions & 0 deletions dream_layer_backend/txt2img_server.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,18 @@
from flask import Flask, request, jsonify
from flask_cors import CORS
import json
from txt2img_workflow import transform_to_txt2img_workflow
from shared_utils import send_to_comfyui, interrupt_workflow
import os
import requests
from dream_layer import get_directories
from dream_layer_backend_utils import interrupt_workflow
from shared_utils import send_to_comfyui
from dream_layer_backend_utils.fetch_advanced_models import get_controlnet_models
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from run_registry import get_registry
Comment on lines +12 to +15
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

⚠️ Potential issue

Avoid modifying sys.path - use proper imports instead.

Same issue as in img2img_server.py. For consistency across the codebase:

-import sys
-import os
-sys.path.append(os.path.dirname(os.path.abspath(__file__)))
-from run_registry import get_registry
+import os
+from .run_registry import get_registry
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from run_registry import get_registry
import os
from .run_registry import get_registry
🤖 Prompt for AI Agents
In dream_layer_backend/txt2img_server.py around lines 7 to 10, avoid modifying
sys.path directly to import run_registry. Instead, refactor the import statement
to use a proper relative or absolute import based on the package structure,
ensuring consistency with img2img_server.py and the rest of the codebase.

from PIL import Image, ImageDraw
from txt2img_workflow import transform_to_txt2img_workflow
from run_registry import create_run_config_from_generation_data
Expand Down Expand Up @@ -79,6 +85,21 @@ def handle_txt2img():
"message": comfy_response["error"]
}), 500

# Save run to registry
try:
registry = get_registry()
run_config = {
**data,
'generation_type': 'txt2img',
'workflow': workflow,
'workflow_version': '1.0.0',
'output_images': comfy_response.get("all_images", [])
}
run_id = registry.save_run(run_config)
print(f"✅ Run saved with ID: {run_id}")
except Exception as save_error:
print(f"⚠️ Failed to save run: {str(save_error)}")
# Don't fail the request if saving fails
# Extract generated image filenames
generated_images = []
if comfy_response.get("all_images"):
Expand Down
139 changes: 139 additions & 0 deletions dream_layer_backend/verify_pr_fixes.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
#!/usr/bin/env python3
"""
Verification script for PR feedback fixes
Tests the three main issues that were addressed:
1. time.sleep() removal
2. list_runs/get_runs method compatibility
3. controlnet/controlnets naming alignment
"""

import json
import sys
from run_registry import RunRegistry

def test_controlnets_naming():
"""Test that controlnet is properly mapped to controlnets"""
print("\n🔍 Testing controlnet -> controlnets mapping...")

registry = RunRegistry()

# Test with 'controlnet' in input (backward compatibility)
config_with_controlnet = {
'prompt': 'Test with controlnet',
'controlnet': {
'enabled': True,
'model': 'canny',
'units': [{'strength': 1.0}]
}
}

run_id = registry.save_run(config_with_controlnet)
saved_run = registry.get_run(run_id)

# Check that it's saved as 'controlnets'
assert 'controlnets' in saved_run, "❌ 'controlnets' key not found in saved run"
assert saved_run['controlnets']['enabled'] == True, "❌ controlnets data not properly saved"
print(" ✅ 'controlnet' input properly mapped to 'controlnets' in storage")

# Clean up
registry.delete_run(run_id)

return True

def test_list_runs_alias():
"""Test that list_runs works as an alias for get_runs"""
print("\n🔍 Testing list_runs/get_runs compatibility...")

registry = RunRegistry()

# Save a few test runs
run_ids = []
for i in range(3):
config = {
'prompt': f'Test run {i}',
'model': f'model-{i}'
}
run_ids.append(registry.save_run(config))

# Test both methods return the same results
runs_via_get = registry.get_runs(limit=10)
runs_via_list = registry.list_runs(limit=10)

assert len(runs_via_get) == len(runs_via_list), "❌ get_runs and list_runs return different counts"
assert runs_via_get[0]['id'] == runs_via_list[0]['id'], "❌ get_runs and list_runs return different data"

print(" ✅ list_runs() works as an alias for get_runs()")
print(f" ✅ Both methods return {len(runs_via_get)} runs")

# Clean up
for run_id in run_ids:
registry.delete_run(run_id)

return True

def check_time_sleep_removed():
"""Check that time.sleep was removed from dream_layer.py"""
print("\n🔍 Checking time.sleep() removal...")

with open('dream_layer.py', 'r') as f:
content = f.read()

# Check line 288 area for the fix
lines = content.split('\n')
for i, line in enumerate(lines[285:295], start=286):
if 'time.sleep(1)' in line:
print(f" ❌ time.sleep(1) still found on line {i}")
return False
if 'pass # Continue checking without delay' in line:
print(f" ✅ time.sleep(1) properly removed and replaced with pass statement")
return True

print(" ✅ No problematic time.sleep(1) found in connection retry loop")
return True

def main():
"""Run all verification tests"""
print("=" * 60)
print("PR FEEDBACK FIXES VERIFICATION")
print("=" * 60)

all_passed = True

# Test 1: time.sleep removal
try:
if not check_time_sleep_removed():
all_passed = False
except Exception as e:
print(f" ❌ Error checking time.sleep: {e}")
all_passed = False

# Test 2: list_runs/get_runs compatibility
try:
if not test_list_runs_alias():
all_passed = False
except Exception as e:
print(f" ❌ Error testing list_runs alias: {e}")
all_passed = False

# Test 3: controlnet/controlnets naming
try:
if not test_controlnets_naming():
all_passed = False
except Exception as e:
print(f" ❌ Error testing controlnets naming: {e}")
all_passed = False

print("\n" + "=" * 60)
if all_passed:
print("✨ ALL PR FEEDBACK ISSUES HAVE BEEN FIXED! ✨")
print("\nSummary of fixes:")
print("1. ✅ Removed unnecessary time.sleep(1) from connection retry")
print("2. ✅ Added list_runs() alias for get_runs() compatibility")
print("3. ✅ Fixed controlnet -> controlnets naming for frontend")
return 0
else:
print("❌ Some issues remain. Please review the output above.")
return 1

if __name__ == "__main__":
sys.exit(main())
3 changes: 3 additions & 0 deletions dream_layer_frontend/src/App.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
import { BrowserRouter, Routes, Route } from "react-router-dom";
import Index from "./pages/Index";
import NotFound from "./pages/NotFound";
import { RunRegistry } from "./pages/RunRegistry";

const queryClient = new QueryClient();

Expand All @@ -17,6 +18,8 @@ const App = () => (
<BrowserRouter>
<Routes>
<Route path="/" element={<Index />} />
<Route path="/runs" element={<RunRegistry />} />
<Route path="/runs/:id" element={<RunRegistry />} />
{/* ADD ALL CUSTOM ROUTES ABOVE THE CATCH-ALL "*" ROUTE */}
<Route path="*" element={<NotFound />} />
</Routes>
Expand Down
Loading