Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
52 commits
Select commit Hold shift + click to select a range
e7aea56
Add performer-poster-backdrop plugin (#648)
worryaboutstuff Dec 23, 2025
1fb1317
Update AI Overhaul plugin for 0.9.1 (#647)
skier233 Dec 23, 2025
497b028
Add community link
DogmaDragon Dec 23, 2025
c10e0d2
Update ModernDark theme to latest version (#649)
DirtyRacer1337 Dec 24, 2025
b470716
[RandomButton] Correct random sort logic (#650)
Servbot91 Dec 29, 2025
1a9a777
[PythonToolsInstaller] Update python version to 3.13 (#651)
adtdev20 Dec 29, 2025
359ebe7
Add FunscriptHaven plugin (#652)
Jan 5, 2026
c1a0224
Add community forum link
DogmaDragon Jan 5, 2026
c981bc4
[imageGalleryNav]: Fix issues with some image filter types. (#654)
WeedLordVegeta420 Jan 16, 2026
69e44b2
Update AI Overhaul plugin for 0.9.2 (#655)
skier233 Jan 16, 2026
2a07190
initial commit of AHavenVLMConnector (#657)
Jan 27, 2026
aa9ec71
Add community forum link
DogmaDragon Jan 27, 2026
9781f6b
Add community link
DogmaDragon Jan 27, 2026
6416844
[stashdb-performer-gallery] Add Fragment for find_image (#662)
ouenascender Feb 4, 2026
f926955
Add another rename plugin (#668)
DogmaDragon Feb 16, 2026
d183fc3
[SFW Switch] Added State Memorization (#669)
Servbot91 Feb 18, 2026
0587f0d
[tagCopyPaste] Refactor to use PluginApi.patch. (#672)
WeedLordVegeta420 Feb 19, 2026
2e41ce5
Update vlm-engine dependency version to 0.9.3 (#674)
Feb 25, 2026
884f27b
[The Porn DB Markers] Fix pagination bug causing scenes to be skipped…
rustyinfinity Feb 26, 2026
16aeef5
AdulttimeInteractiveDL.py: Fix url in jerkbuddies.com to jerk-buddies…
gimmeliina Mar 4, 2026
ce30de4
[SFW Switch] Additional Fields Patched, Additional Plugin Integration…
Servbot91 Mar 5, 2026
a3eb5e0
[CJ's Card Tweaks] Enhancement: Extend to Include Stash Count (#671)
Stash-KennyG Mar 5, 2026
13ecf3f
[Haven VLM Connector] pydantic version to latest (#683)
Mar 15, 2026
f64c2b7
Add mobileWallLayout plugin (#680)
speckofthecosmos Mar 15, 2026
8bb6b1d
Add Group to TriggerType pattern
DogmaDragon Mar 15, 2026
4f5c3e3
Add forum link
DogmaDragon Mar 15, 2026
41a8ac2
[Haven VLM Connector] fix silent failure bug (#684)
Mar 15, 2026
2ff8501
Add GroupAutoScraper plugin (#682)
Stash-KennyG Mar 17, 2026
fc6d239
Update schema
DogmaDragon Mar 17, 2026
2112d4e
Add forum link
DogmaDragon Mar 17, 2026
74b0986
Update AI Overhaul plugin for 0.9.3 (#685)
skier233 Mar 20, 2026
1662751
[SFW Switch] Audio handling + Additional plugin support (#679)
Servbot91 Mar 20, 2026
bf07797
[Haven VLM Connector] release v1.1.0 (#686)
HavenCTO Mar 22, 2026
6ea6117
[SFW Switch] Additional Plugin Support: Deck Viewer, minor fix (#689)
Servbot91 Mar 26, 2026
3113dda
Closes #691
DogmaDragon Mar 30, 2026
bfd6a59
Merge branch 'main' of https://github.com/stashapp/CommunityScripts
DogmaDragon Mar 30, 2026
311495f
Fix typo
DogmaDragon Mar 30, 2026
eb219df
[mobileWallLayout] Fix layout applying to narrow desktop windows (#692)
speckofthecosmos Apr 3, 2026
898a4bd
Remove duplicate plugin.schema.json (#690)
puc9 Apr 3, 2026
e4182de
Add Image Blackout plugin (#693)
tuttledidit Apr 5, 2026
aa06f6f
Add scene-to-imageclip plugin (#694)
speckofthecosmos Apr 5, 2026
c512db2
Add community forum links
DogmaDragon Apr 5, 2026
eff9999
[Haven VLM Connector] Fix breaking change due to torchvision release …
HavenCTO Apr 6, 2026
68dbccb
Add SmartResolve plugin for duplicate scene workflows (#697)
Stash-KennyG Apr 9, 2026
a47c7a8
Add community forum link
DogmaDragon Apr 9, 2026
73e1efe
[Extra Performer Info] Bugfixes, setting user agent (#698)
Tweeticoats Apr 10, 2026
d6f4db8
Add scene URLs support for StashDB Submission Helper (#701)
Emilo2 Apr 15, 2026
9cf72b5
Add Group Details plugin for Group Cards (#700)
Stash-KennyG Apr 15, 2026
e1e7522
Add forum link
DogmaDragon Apr 15, 2026
8f7c991
performer-poster-backdrop: Stash v0.31.1 compatibility (hide duplicat…
Apr 16, 2026
ef437f2
performer-poster-backdrop: ignore local example/ debug folder
Apr 16, 2026
e01fed0
Delete plugins/performer-poster-backdrop/.gitignore
Stash-KennyG Apr 16, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -1,15 +1,4 @@
{
"yaml.validate": true,
"yaml.disableAdditionalProperties": true,
"yaml.completion": true,
"yaml.extension.recommendations": true,
"yaml.hover": true,
"yaml.format.singleQuote": false,
"yaml.format.printWidth": 120,
"yaml.format.proseWrap": "always",
"yaml.schemas": {
"schema/plugin.schema.json": ["/plugins/**", "/themes/**"]
},
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
Expand All @@ -18,5 +7,16 @@
},
"[yaml]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
}
},
"yaml.completion": true,
"yaml.disableAdditionalProperties": true,
"yaml.extension.recommendations": true,
"yaml.format.printWidth": 120,
"yaml.format.proseWrap": "always",
"yaml.format.singleQuote": false,
"yaml.hover": true,
"yaml.schemas": {
"./validator/plugin.schema.json": ["/plugins/**", "/themes/**"]
},
"yaml.validate": true
}
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
3 changes: 2 additions & 1 deletion archive/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,5 @@

1. [renamerOnUpdate](./renamerOnUpdate) - issue [#483](https://github.com/stashapp/CommunityScripts/issues/483)
2. [visage](./visage/) - issue [#532](https://github.com/stashapp/CommunityScripts/issues/532)
3. [stashRealBooru](./stashRealbooru/) - issue [#540](https://github.com/stashapp/CommunityScripts/issues/540)
3. [stashRealBooru](./stashRealbooru/) - issue [#540](https://github.com/stashapp/CommunityScripts/issues/540)
4. [AITagger](./AITagger/) - issue [#691](https://github.com/stashapp/CommunityScripts/issues/691)
12 changes: 12 additions & 0 deletions plugins/AHavenVLMConnector/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Changelog

All notable changes to the A Haven VLM Connector project will be documented in this file.

## [1.1.1] - 2026-04-05
### Fixes
- Mitigate TorchVision v0.26.0 release https://github.com/Haven-hvn/haven-vlm-engine-package/commit/7a3a6f9dd931237c93c5205f3d31df3e285ae21d

## [1.0.0] - 2025-06-29

### Added
- **Initial release**
145 changes: 145 additions & 0 deletions plugins/AHavenVLMConnector/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
# A Haven VLM Connector

https://discourse.stashapp.cc/t/haven-vlm-connector/5464

A StashApp plugin for Vision-Language Model (VLM) based content tagging and analysis. This plugin is designed with a **local-first philosophy**, empowering users to run analysis on their own hardware (using CPU or GPU) and their local network. It also supports cloud-based VLM endpoints for additional flexibility. The Haven VLM Engine provides advanced automatic content detection and tagging, delivering superior accuracy compared to traditional image classification methods.

## Features

- **Local Network Empowerment**: Distribute processing across home/office computers without cloud dependencies
- **Context-Aware Detection**: Leverages Vision-Language Models' understanding of visual relationships
- **Advanced Dependency Management**: Uses PythonDepManager for automatic dependency installation
- **Enjoying Funscript Haven?** Check out more tools and projects at https://github.com/Haven-hvn

## Requirements

- Python 3.8+
- StashApp
- PythonDepManager plugin (automatically handles dependencies)
- OpenAI-compatible VLM endpoints (local or cloud-based)

## Installation

1. Clone or download this plugin to your StashApp plugins directory
2. Ensure PythonDepManager is installed in your StashApp plugins
3. Configure your VLM endpoints in `haven_vlm_config.py` (local network endpoints recommended)
4. Restart StashApp

The plugin automatically manages all dependencies.

## Why Local-First?

- **Complete Control**: Process sensitive content on your own hardware
- **Cost Effective**: Avoid cloud processing fees by using existing resources
- **Flexible Scaling**: Add more computers to your local network for increased capacity
- **Privacy Focused**: Keep your media completely private
- **Hybrid Options**: Combine local and cloud endpoints for optimal flexibility

```mermaid
graph LR
A[User's Computer] --> B[Local GPU Machine]
A --> C[Local CPU Machine 1]
A --> D[Local CPU Machine 2]
A --> E[Cloud Endpoint]
```

## Configuration

### Easy Setup with LM Studio

[LM Studio](https://lmstudio.ai/) provides the easiest way to configure local endpoints:

1. Download and install [LM Studio](https://lmstudio.ai/)
2. [Search for or download](https://huggingface.co/models) a vision-capable model; tested with : (in order of high to low accuracy) zai-org/glm-4.6v-flash, huihui-mistral-small-3.2-24b-instruct-2506-abliterated-v2, qwen/qwen3-vl-8b, lfm2.5-vl
3. Load your desired Model
4. On the developer tab start the local server using the start toggle
5. Optionally click the Settings gear then toggle *Serve on local network*
5. Optionally configure `haven_vlm_config.py`:

By default locahost is included in the config, **remove cloud endpoint if you don't want automatic failover**
```python
{
"base_url": "http://localhost:1234/v1", # LM Studio default
"api_key": "", # API key not required
"name": "lm-studio-local",
"weight": 5,
"is_fallback": False
}
```

### Tag Configuration

```python
"tag_list": [
"Basketball point", "Foul", "Break-away", "Turnover"
]
```

### Processing Settings

```python
VIDEO_FRAME_INTERVAL = 2.0 # Process every 2 seconds
CONCURRENT_TASK_LIMIT = 8 # Adjust based on local hardware
```

## Usage

### Tag Videos
1. Tag scenes with `VLM_TagMe`
2. Run "Tag Videos" task
3. Plugin processes content using local/network resources

### Performance Tips
- Start with 2-3 local machines for load balancing
- Assign higher weights to GPU-enabled machines
- Adjust `CONCURRENT_TASK_LIMIT` based on total system resources
- Use SSD storage for better I/O performance

## File Structure

```
AHavenVLMConnector/
├── ahavenvlmconnector.yml
├── haven_vlm_connector.py
├── haven_vlm_config.py
├── haven_vlm_engine.py
├── haven_media_handler.py
├── haven_vlm_utility.py
├── requirements.txt
└── README.md
```

## Troubleshooting

### Local Network Setup
- Ensure firewalls allow communication between machines
- Verify all local endpoints are running VLM services
- Use static IPs for local machines
- Check `http://local-machine-ip:port/v1` responds correctly

### Performance Optimization
- **Distribute Load**: Use multiple mid-range machines instead of one high-end
- **GPU Prioritization**: Assign highest weight to GPU machines
- **Network Speed**: Use wired Ethernet connections for faster transfer
- **Resource Monitoring**: Watch system resources during processing

## Development

### Adding Local Endpoints
1. Install VLM service on network machines
2. Add endpoint configuration with local IPs
3. Set appropriate weights based on hardware capability

### Custom Models
Use any OpenAI-compatible models that support:
- POST requests to `/v1/chat/completions`
- Vision capabilities with image input
- Local deployment options

### Log Messages

Check StashApp logs for detailed processing information and error messages.

## License

This project is part of the StashApp Community Scripts collection.
22 changes: 22 additions & 0 deletions plugins/AHavenVLMConnector/ahavenvlmconnector.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
name: Haven VLM Connector
# requires: PythonDepManager
description: Tag videos with Vision-Language Models using any OpenAI-compatible VLM endpoint
version: 1.1.1
url: https://discourse.stashapp.cc/t/haven-vlm-connector/5464
exec:
- python
- "{pluginDir}/haven_vlm_connector.py"
interface: raw
tasks:
- name: Tag Videos
description: Run VLM analysis on videos with VLM_TagMe tag
defaultArgs:
mode: tag_videos
- name: Collect Incorrect Markers and Images
description: Collects data from markers and images that were VLM tagged but were manually marked with VLM_Incorrect due to the VLM making a mistake. This will collect the data and output as a file which can be used to improve the VLM models.
defaultArgs:
mode: collect_incorrect_markers
- name: Find Marker Settings
description: Find Optimal Marker Settings based on a video that has manually tuned markers and has been processed by the VLM previously. Only 1 video should have VLM_TagMe before running.
defaultArgs:
mode: find_marker_settings
98 changes: 98 additions & 0 deletions plugins/AHavenVLMConnector/exit_tracker.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
"""
Comprehensive sys.exit tracking module
Instruments all sys.exit() calls with full call stack and context
"""

import sys
import traceback
from typing import Optional

# Store original sys.exit
original_exit = sys.exit

# Track if we've already patched
_exit_tracker_patched = False

def install_exit_tracker(logger=None) -> None:
"""
Install the exit tracker by monkey-patching sys.exit

Args:
logger: Optional logger instance (will use fallback print if None)
"""
global _exit_tracker_patched, original_exit

if _exit_tracker_patched:
return

# Store original if not already stored
if hasattr(sys, 'exit') and sys.exit is not original_exit:
original_exit = sys.exit

def tracked_exit(code: int = 0) -> None:
"""Track sys.exit() calls with full call stack"""
# Get current stack trace (not from exception, but current call stack)
stack = traceback.extract_stack()

# Format the stack trace, excluding this tracking function
stack_lines = []
for frame in stack:
# Skip internal Python frames and this tracker
if ('tracked_exit' not in frame.filename and
'/usr/lib' not in frame.filename and
'/System/Library' not in frame.filename and
'exit_tracker.py' not in frame.filename):
stack_lines.append(
f" File \"{frame.filename}\", line {frame.lineno}, in {frame.name}\n {frame.line}"
)

# Take last 15 frames to see the full call chain
stack_str = '\n'.join(stack_lines[-15:])

# Get current exception info if available
exc_info = sys.exc_info()
exc_str = ""
if exc_info[0] is not None:
exc_str = f"\n Active Exception: {exc_info[0].__name__}: {exc_info[1]}"

# Build the error message
error_msg = f"""[DEBUG_EXIT_CODE] ==========================================
[DEBUG_EXIT_CODE] sys.exit() called with code: {code}
[DEBUG_EXIT_CODE] Call stack (last 15 frames):
{stack_str}
{exc_str}
[DEBUG_EXIT_CODE] =========================================="""

# Log using provided logger or fallback to print
if logger:
try:
logger.error(error_msg)
except Exception as log_error:
print(f"[EXIT_TRACKER_LOGGER_ERROR] Failed to log: {log_error}")
print(error_msg)
else:
print(error_msg)

# Call original exit
original_exit(code)

# Install the tracker
sys.exit = tracked_exit
_exit_tracker_patched = True

if logger:
logger.debug("[DEBUG_EXIT_CODE] Exit tracker installed successfully")
else:
print("[DEBUG_EXIT_CODE] Exit tracker installed successfully")

def uninstall_exit_tracker() -> None:
"""Uninstall the exit tracker and restore original sys.exit"""
global _exit_tracker_patched, original_exit

if _exit_tracker_patched:
sys.exit = original_exit
_exit_tracker_patched = False

# Auto-install on import (can be disabled by calling uninstall_exit_tracker())
if not _exit_tracker_patched:
install_exit_tracker()
Loading