Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
40 commits
Select commit Hold shift + click to select a range
3b24b90
Merge pull request #24 from lightningpixel/main
lightningpixel Mar 19, 2026
bb1fb1e
feat (setup): add data folder picker to first-run setup
Mar 19, 2026
d10bd00
Merge pull request #25 from lightningpixel/feat/user-data-dir-setup
lightningpixel Mar 19, 2026
4c7bce6
feat(setup): add dependencies custom folder
Mar 20, 2026
c50d475
Merge pull request #28 from lightningpixel/feat/add-dependencies-cust…
lightningpixel Mar 20, 2026
4c9a4d3
error modal + fix dev mode to use embedded Python and data dir
Mar 20, 2026
b1c5bc0
Merge pull request #29 from lightningpixel/fix/dev-setup
lightningpixel Mar 20, 2026
ffd51f0
feat(setting): add session-based log rotation and logs viewer in sett…
Mar 20, 2026
80e9799
Merge pull request #30 from lightningpixel/feat/setting-session-logs
lightningpixel Mar 20, 2026
02b699b
fix(generate): disable generate without model, add image clear button
Mar 20, 2026
4533959
Merge pull request #31 from lightningpixel/fix/ux-improvements
lightningpixel Mar 20, 2026
944459a
fix(generate): rename screenshot default filename
Mar 20, 2026
94bc467
Merge pull request #32 from lightningpixel/fix/screenshot-filename
lightningpixel Mar 20, 2026
d0697d7
feat(models): show file count and current filename during model download
Mar 20, 2026
d7ee075
Merge pull request #33 from lightningpixel/feat/model-download-improv…
lightningpixel Mar 20, 2026
1107c60
fix(setup): isolate Python env using bundled python-build-standalone …
Mar 20, 2026
15cd0b7
Merge pull request #37 from lightningpixel/fix/python-env-isolation
lightningpixel Mar 20, 2026
cbf2b89
feat(structure): add auto-updater with patch version
Mar 21, 2026
02271a1
Merge pull request #38 from lightningpixel/feat/auto-updater
lightningpixel Mar 21, 2026
38caac7
fix(settings): serve workspace files dynamically and unload models on…
Mar 21, 2026
1e58b63
Merge pull request #39 from lightningpixel/fix/dynamic-workspace-and-…
lightningpixel Mar 21, 2026
0a018a6
fix package json
Mar 21, 2026
bfc1d11
Merge pull request #40 from lightningpixel/fix/fix-package-json
lightningpixel Mar 21, 2026
b4fc7dd
fix(generate): free memory button and model unload-all endpoint
Mar 21, 2026
39ed876
Merge pull request #41 from lightningpixel/fix/free-memory-button
lightningpixel Mar 21, 2026
75dad36
feat(generate): add stop generation button with cooperative cancellation
Mar 21, 2026
2e98604
Merge pull request #42 from lightningpixel/feat/cancel-generation
lightningpixel Mar 21, 2026
7605047
feat(models): pass hf_skip_prefixes through download pipeline and fix…
Mar 22, 2026
cb12565
Merge pull request #43 from lightningpixel/feat/model-download-improv…
lightningpixel Mar 22, 2026
04f3014
fix(generate): black screen when viewing a deleted workspace model
Mar 22, 2026
7b5a34b
Merge pull request #44 from lightningpixel/fix/viewer-crash-on-delete…
lightningpixel Mar 22, 2026
0a48b71
feat(models): add show in explorer button to model cards
Mar 22, 2026
502474a
Merge pull request #45 from lightningpixel/feat/model-card-open-folder
lightningpixel Mar 22, 2026
d2e1236
fix(generate): improve memory release on unload and disable free-memo…
Mar 22, 2026
b05e402
Merge pull request #46 from lightningpixel/fix/memory-release-and-ui-…
lightningpixel Mar 22, 2026
57f936a
fix(workspace): prevent workspace model selection while a generation …
Mar 22, 2026
6fff7de
Merge pull request #47 from lightningpixel/fix/block-workspace-click-…
lightningpixel Mar 22, 2026
0f89276
fix release pipeline
Mar 22, 2026
91ef8b7
Merge pull request #48 from lightningpixel/fix/fix-release-pipeline
lightningpixel Mar 22, 2026
081b2c9
dump version 0.2.0
Mar 22, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 38 additions & 39 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,26 @@ permissions:
contents: write

jobs:
# ─── Create a single release ────────────────────────────────────────────────
create-release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4

- name: Create release
run: |
gh release create ${{ github.ref_name }} \
--title "Modly Beta ${{ github.ref_name }}" \
--generate-notes \
--latest \
--repo ${{ github.repository }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}

# ─── Windows build ────────────────────────────────────────────────────────────
build-windows:
needs: create-release
runs-on: windows-latest
steps:
- name: Checkout
Expand All @@ -29,18 +47,26 @@ jobs:
run: node scripts/download-python-embed.js

- name: Build & Package (Windows)
shell: bash
run: npx electron-vite build && npx electron-builder --win --publish never
env:
CSC_IDENTITY_AUTO_DISCOVERY: false

- name: Upload Windows artifact
uses: actions/upload-artifact@v4
with:
name: windows-installer
path: dist/*.exe
- name: Upload assets to release
shell: bash
run: |
gh release upload ${{ github.ref_name }} \
dist/*.exe \
dist/*.exe.blockmap \
dist/latest.yml \
--repo ${{ github.repository }} \
--clobber
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}

# ─── Linux build ─────────────────────────────────────────────────────────────
build-linux:
needs: create-release
runs-on: ubuntu-latest
steps:
- name: Checkout
Expand All @@ -55,42 +81,15 @@ jobs:
- name: Install dependencies
run: npm install

- name: Install Linux build dependencies
run: sudo apt-get install -y rpm

- name: Build & Package (Linux)
run: npx electron-vite build && npx electron-builder --linux --publish never

- name: Upload Linux artifact
uses: actions/upload-artifact@v4
with:
name: linux-installer
path: dist/*.AppImage

# ─── Create GitHub Release ───────────────────────────────────────────────────
release:
needs: [build-windows, build-linux]
runs-on: ubuntu-latest
steps:
- name: Download Windows artifact
uses: actions/download-artifact@v4
with:
name: windows-installer
path: artifacts/windows

- name: Download Linux artifact
uses: actions/download-artifact@v4
with:
name: linux-installer
path: artifacts/linux

- name: Create GitHub Release
- name: Upload assets to release
run: |
gh release create ${{ github.ref_name }} \
artifacts/windows/*.exe \
artifacts/linux/*.AppImage \
--title "Modly Beta ${{ github.ref_name }}" \
--generate-notes \
--repo ${{ github.repository }}
gh release upload ${{ github.ref_name }} \
dist/*.AppImage \
dist/latest-linux.yml \
--repo ${{ github.repository }} \
--clobber
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
16 changes: 11 additions & 5 deletions api/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@
from contextlib import asynccontextmanager
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
from fastapi import HTTPException

from routers import generation, model, optimize, status, settings, extensions, export

Expand All @@ -22,7 +23,7 @@ async def lifespan(app: FastAPI):

app = FastAPI(
title="Modly API",
version="0.1.3",
version="0.2.0",
lifespan=lifespan,
)

Expand All @@ -41,6 +42,11 @@ async def lifespan(app: FastAPI):
app.include_router(extensions.router, prefix="/extensions")
app.include_router(export.router, prefix="/export")

# Serve generated GLB files from workspace
from services.generator_registry import WORKSPACE_DIR
app.mount("/workspace", StaticFiles(directory=str(WORKSPACE_DIR)), name="workspace")
# Serve generated files from workspace — dynamic so path changes take effect immediately
@app.get("/workspace/{full_path:path}")
async def serve_workspace_file(full_path: str):
import services.generator_registry as reg
file_path = reg.WORKSPACE_DIR / full_path
if not file_path.exists() or not file_path.is_file():
raise HTTPException(status_code=404, detail="File not found")
return FileResponse(str(file_path))
36 changes: 34 additions & 2 deletions api/routers/generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import uuid
from typing import Dict
from fastapi import APIRouter, File, Form, UploadFile, HTTPException, BackgroundTasks
from services.generators.base import smooth_progress
from services.generators.base import smooth_progress, GenerationCancelled

import re as _re
from services.generator_registry import generator_registry, WORKSPACE_DIR
Expand All @@ -13,6 +13,8 @@
router = APIRouter(tags=["generation"])

_jobs: Dict[str, JobStatus] = {}
_cancelled: set = set()
_cancel_events: Dict[str, threading.Event] = {}


@router.post("/from-image")
Expand Down Expand Up @@ -71,6 +73,7 @@ async def generate_from_image(

job = JobStatus(job_id=job_id, status="pending", progress=0)
_jobs[job_id] = job
_cancel_events[job_id] = threading.Event()

background_tasks.add_task(_run_generation, job_id, image_bytes, params, collection)

Expand All @@ -86,6 +89,19 @@ async def job_status(job_id: str):
return job


@router.post("/cancel/{job_id}")
async def cancel_job(job_id: str):
job = _jobs.get(job_id)
if not job:
raise HTTPException(404, f"Job {job_id} not found")
_cancelled.add(job_id)
if job_id in _cancel_events:
_cancel_events[job_id].set()
if job.status in ("pending", "running"):
job.status = "cancelled"
return {"cancelled": True}


async def _run_generation(job_id: str, image_bytes: bytes, params: dict, collection: str = "Default") -> None:
job = _jobs[job_id]
job.status = "running"
Expand Down Expand Up @@ -118,20 +134,36 @@ def progress_cb(pct: int, step: str = "") -> None:
else:
gen = await loop.run_in_executor(None, generator_registry.get_active)

if job_id in _cancelled:
return

# Direct output to the collection subfolder
coll_dir = WORKSPACE_DIR / collection
coll_dir.mkdir(parents=True, exist_ok=True)
gen.outputs_dir = coll_dir

cancel_event = _cancel_events.get(job_id)
import inspect
supports_cancel = "cancel_event" in inspect.signature(gen.generate).parameters
output_path = await loop.run_in_executor(
None,
lambda: gen.generate(image_bytes, params, progress_cb),
lambda: gen.generate(image_bytes, params, progress_cb, cancel_event)
if supports_cancel
else gen.generate(image_bytes, params, progress_cb),
)

if job_id in _cancelled:
return

job.status = "done"
job.progress = 100
job.output_url = f"/workspace/{collection}/{output_path.name}"

except GenerationCancelled:
job.status = "cancelled"
except Exception as exc:
if job_id in _cancelled:
return
tb = traceback.format_exc()
print(f"[Generation ERROR] {exc}\n{tb}")
job.status = "error"
Expand Down
40 changes: 34 additions & 6 deletions api/routers/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,23 @@ async def switch_model(model_id: str):
raise HTTPException(400, str(e))


@router.post("/unload-all")
async def unload_all_models():
"""Unloads all models from memory to free VRAM/RAM."""
generator_registry.unload_all()
# Force Python to release memory back to the OS
import gc
gc.collect()
try:
import ctypes, sys
if sys.platform == "win32":
k32 = ctypes.windll.kernel32
k32.SetProcessWorkingSetSizeEx(k32.GetCurrentProcess(), -1, -1, 0)
except Exception:
pass
return {"unloaded": True}


@router.post("/unload/{model_id}")
async def unload_model(model_id: str):
"""Unloads a model from memory so its files can be safely deleted."""
Expand All @@ -51,19 +68,30 @@ async def unload_model(model_id: str):


@router.get("/hf-download")
async def hf_download(repo_id: str, model_id: str):
async def hf_download(repo_id: str, model_id: str, skip_prefixes: Optional[str] = None):
"""
Streams a HuggingFace Hub model download via SSE.
Downloads into MODELS_DIR / model_id applying the filtering
declared in the extension manifest (hf_skip_prefixes).

skip_prefixes: JSON-encoded list of path prefixes to exclude (passed from Electron).
Falls back to registry manifest if not provided.

SSE format: data: {"percent": 0-100, "file": "...", "status": "..."}
"""
import json as _json
dest_dir = str(MODELS_DIR / model_id)
try:
skip_list = generator_registry.get_manifest(model_id).get("hf_skip_prefixes", [])
except KeyError:
skip_list = []
# Prefer skip_prefixes passed directly from the client (authoritative, no registry dep)
if skip_prefixes:
try:
skip_list = _json.loads(skip_prefixes)
except Exception:
skip_list = []
else:
try:
skip_list = generator_registry.get_manifest(model_id).get("hf_skip_prefixes", [])
except KeyError:
skip_list = []

async def stream():
loop = asyncio.get_running_loop()
Expand Down Expand Up @@ -105,7 +133,7 @@ def _dl(f=filename):

# Reserve 1-95 for file downloads, leave 95-100 for finalisation
pct = 1 + round((i + 1) / total * 94)
yield _fmt({"percent": pct, "file": filename})
yield _fmt({"percent": pct, "file": filename, "fileIndex": i + 1, "totalFiles": total})

yield _fmt({"percent": 100, "status": "done"})

Expand Down
2 changes: 1 addition & 1 deletion api/schemas/generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

class JobStatus(BaseModel):
job_id: str
status: Literal["pending", "running", "done", "error"]
status: Literal["pending", "running", "done", "error", "cancelled"]
progress: int = 0 # 0–100
step: Optional[str] = None # Human-readable current step
output_url: Optional[str] = None
Expand Down
1 change: 1 addition & 0 deletions api/services/generator_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -327,6 +327,7 @@ def update_paths(self, models_dir: Optional[Path], workspace_dir: Optional[Path]
import services.generator_registry as _self_module

if models_dir is not None:
self.unload_all()
models_dir.mkdir(parents=True, exist_ok=True)
_self_module.MODELS_DIR = models_dir
for model_id, gen in self._generators.items():
Expand Down
30 changes: 30 additions & 0 deletions api/services/generators/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@
from typing import Callable, Optional


class GenerationCancelled(Exception):
"""Raised by generators when a cancel_event is set mid-generation."""


def smooth_progress(
progress_cb: Callable[[int, str], None],
start: int,
Expand Down Expand Up @@ -73,6 +77,25 @@ def load(self) -> None:
def unload(self) -> None:
"""Release memory. Can be overridden if needed."""
self._model = None
import gc
gc.collect()
try:
import torch
if torch.cuda.is_available():
torch.cuda.empty_cache()
except ImportError:
pass
# Force the OS to reclaim unused memory from this process
try:
import ctypes
import sys
if sys.platform == "win32":
kernel32 = ctypes.windll.kernel32
kernel32.SetProcessWorkingSetSizeEx(
kernel32.GetCurrentProcess(), -1, -1, 0
)
except Exception:
pass

def is_loaded(self) -> bool:
return self._model is not None
Expand All @@ -87,14 +110,21 @@ def generate(
image_bytes: bytes,
params: dict,
progress_cb: Optional[Callable[[int, str], None]] = None,
cancel_event: Optional[threading.Event] = None,
) -> Path:
"""
Starts 3D generation from an image.
Returns the path to the generated .glb file.
progress_cb(percent: int, step_label: str)
cancel_event: set this to interrupt generation between steps.
"""
...

def _check_cancelled(self, cancel_event: Optional[threading.Event]) -> None:
"""Raises GenerationCancelled if cancel_event is set."""
if cancel_event and cancel_event.is_set():
raise GenerationCancelled()

# ------------------------------------------------------------------ #
# Parameter schema (for the UI)
# ------------------------------------------------------------------ #
Expand Down
9 changes: 0 additions & 9 deletions api/services/generators/hunyuan3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,15 +69,6 @@ def load(self) -> None:
self._model = pipeline
print(f"[Hunyuan3DGenerator] Loaded on {device}.")

def unload(self) -> None:
super().unload()
try:
import torch
if torch.cuda.is_available():
torch.cuda.empty_cache()
except ImportError:
pass

# ------------------------------------------------------------------ #
# Inference
# ------------------------------------------------------------------ #
Expand Down
Loading
Loading