diff --git a/src/agents/rbi_agent_pp.py b/src/agents/rbi_agent_pp.py
new file mode 100644
index 000000000..30935a546
--- /dev/null
+++ b/src/agents/rbi_agent_pp.py
@@ -0,0 +1,1313 @@
+"""
+π Moon Dev's RBI AI v3.0 PARALLEL PROCESSOR π
+Built with love by Moon Dev π
+
+PARALLEL PROCESSING: Run up to 5 backtests simultaneously!
+- Each thread processes a different trading idea
+- Thread-safe colored output
+- Rate limiting to avoid API throttling
+- Massively faster than sequential processing
+
+HOW IT WORKS:
+1. Reads trading ideas from ideas.txt
+2. Spawns up to MAX_PARALLEL_THREADS workers
+3. Each thread independently: Research β Backtest β Debug β Optimize
+4. All threads run simultaneously until target returns are hit
+5. Thread-safe file naming with unique 2-digit thread IDs
+
+NEW FEATURES:
+- π¨ Color-coded output per thread (Thread 1 = cyan, Thread 2 = magenta, etc.)
+- β±οΈ Rate limiting to avoid API throttling
+- π Thread-safe file operations
+- π Real-time progress tracking across all threads
+- πΎ Clean file organization with thread IDs in names
+
+Required Setup:
+1. Conda environment 'tflow' with backtesting packages
+2. Set MAX_PARALLEL_THREADS (default: 5)
+3. Run and watch all ideas process in parallel! ππ°
+
+IMPORTANT: Each thread is fully independent and won't interfere with others!
+"""
+
+# Import execution functionality
+import subprocess
+import json
+from pathlib import Path
+
+# Core imports
+import os
+import time
+import re
+import hashlib
+import csv
+from datetime import datetime
+from termcolor import cprint
+import sys
+from dotenv import load_dotenv
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from threading import Lock, Semaphore
+
+# Load environment variables FIRST
+load_dotenv()
+print("β
Environment variables loaded")
+
+# Add config values directly to avoid import issues
+AI_TEMPERATURE = 0.7
+AI_MAX_TOKENS = 4000
+
+# Import model factory with proper path handling
+import sys
+sys.path.append('/Users/md/Dropbox/dev/github/moon-dev-ai-agents-for-trading')
+
+try:
+ from src.models import model_factory
+ print("β
Successfully imported model_factory")
+except ImportError as e:
+ print(f"β οΈ Could not import model_factory: {e}")
+ sys.exit(1)
+
+# ============================================
+# π― PARALLEL PROCESSING CONFIGURATION
+# ============================================
+MAX_PARALLEL_THREADS = 5 # How many ideas to process simultaneously
+RATE_LIMIT_DELAY = 2 # Seconds to wait between API calls (per thread)
+RATE_LIMIT_GLOBAL_DELAY = 0.5 # Global delay between any API calls
+
+# Thread color mapping
+THREAD_COLORS = {
+ 0: "cyan",
+ 1: "magenta",
+ 2: "yellow",
+ 3: "green",
+ 4: "blue"
+}
+
+# Global locks
+console_lock = Lock()
+api_lock = Lock()
+file_lock = Lock()
+
+# Rate limiter
+rate_limiter = Semaphore(MAX_PARALLEL_THREADS)
+
+# Model Configurations (same as v3)
+RESEARCH_CONFIG = {
+ "type": "xai",
+ "name": "grok-4-fast-reasoning"
+}
+
+BACKTEST_CONFIG = {
+ "type": "xai",
+ "name": "grok-4-fast-reasoning"
+}
+
+DEBUG_CONFIG = {
+ "type": "xai",
+ "name": "grok-4-fast-reasoning"
+}
+
+PACKAGE_CONFIG = {
+ "type": "xai",
+ "name": "grok-4-fast-reasoning"
+}
+
+OPTIMIZE_CONFIG = {
+ "type": "xai",
+ "name": "grok-4-fast-reasoning"
+}
+
+# π― PROFIT TARGET CONFIGURATION
+TARGET_RETURN = 50 # Target return in %
+SAVE_IF_OVER_RETURN = 1.0 # Save backtest to CSV and folders if return > this % (Moon Dev's threshold!)
+CONDA_ENV = "tflow"
+MAX_DEBUG_ITERATIONS = 10
+MAX_OPTIMIZATION_ITERATIONS = 10
+EXECUTION_TIMEOUT = 300 # 5 minutes
+
+# DeepSeek Configuration
+DEEPSEEK_BASE_URL = "https://api.deepseek.com"
+
+# Get today's date for organizing outputs
+TODAY_DATE = datetime.now().strftime("%m_%d_%Y")
+
+# Update data directory paths - Parallel version uses its own folder
+PROJECT_ROOT = Path(__file__).parent.parent
+DATA_DIR = PROJECT_ROOT / "data/rbi_pp"
+TODAY_DIR = DATA_DIR / TODAY_DATE
+RESEARCH_DIR = TODAY_DIR / "research"
+BACKTEST_DIR = TODAY_DIR / "backtests"
+PACKAGE_DIR = TODAY_DIR / "backtests_package"
+WORKING_BACKTEST_DIR = TODAY_DIR / "backtests_working" # Moon Dev's working iterations!
+FINAL_BACKTEST_DIR = TODAY_DIR / "backtests_final"
+OPTIMIZATION_DIR = TODAY_DIR / "backtests_optimized"
+CHARTS_DIR = TODAY_DIR / "charts"
+EXECUTION_DIR = TODAY_DIR / "execution_results"
+PROCESSED_IDEAS_LOG = DATA_DIR / "processed_ideas.log"
+STATS_CSV = DATA_DIR / "backtest_stats.csv" # Moon Dev's stats tracker!
+
+IDEAS_FILE = DATA_DIR / "ideas.txt"
+
+# Create main directories if they don't exist
+for dir in [DATA_DIR, TODAY_DIR, RESEARCH_DIR, BACKTEST_DIR, PACKAGE_DIR,
+ WORKING_BACKTEST_DIR, FINAL_BACKTEST_DIR, OPTIMIZATION_DIR, CHARTS_DIR, EXECUTION_DIR]:
+ dir.mkdir(parents=True, exist_ok=True)
+
+# ============================================
+# π¨ THREAD-SAFE PRINTING
+# ============================================
+
+def thread_print(message, thread_id, color=None, attrs=None):
+ """Thread-safe colored print with thread ID prefix"""
+ if color is None:
+ color = THREAD_COLORS.get(thread_id % 5, "white")
+
+ with console_lock:
+ prefix = f"[T{thread_id:02d}]"
+ cprint(f"{prefix} {message}", color, attrs=attrs)
+
+def thread_print_status(thread_id, phase, message):
+ """Print status update for a specific phase"""
+ color = THREAD_COLORS.get(thread_id % 5, "white")
+ with console_lock:
+ cprint(f"[T{thread_id:02d}] {phase}: {message}", color)
+
+# ============================================
+# π RATE LIMITING
+# ============================================
+
+def rate_limited_api_call(func, thread_id, *args, **kwargs):
+ """
+ Wrapper for API calls with rate limiting
+ - Per-thread rate limiting (RATE_LIMIT_DELAY)
+ - Global rate limiting (RATE_LIMIT_GLOBAL_DELAY)
+ """
+ # Global rate limit (quick check)
+ with api_lock:
+ time.sleep(RATE_LIMIT_GLOBAL_DELAY)
+
+ # Execute the API call
+ result = func(*args, **kwargs)
+
+ # Per-thread rate limit
+ time.sleep(RATE_LIMIT_DELAY)
+
+ return result
+
+# ============================================
+# π PROMPTS (Same as v3)
+# ============================================
+
+RESEARCH_PROMPT = """
+You are Moon Dev's Research AI π
+
+IMPORTANT NAMING RULES:
+1. Create a UNIQUE TWO-WORD NAME for this specific strategy
+2. The name must be DIFFERENT from any generic names like "TrendFollower" or "MomentumStrategy"
+3. First word should describe the main approach (e.g., Adaptive, Neural, Quantum, Fractal, Dynamic)
+4. Second word should describe the specific technique (e.g., Reversal, Breakout, Oscillator, Divergence)
+5. Make the name SPECIFIC to this strategy's unique aspects
+
+Examples of good names:
+- "AdaptiveBreakout" for a strategy that adjusts breakout levels
+- "FractalMomentum" for a strategy using fractal analysis with momentum
+- "QuantumReversal" for a complex mean reversion strategy
+- "NeuralDivergence" for a strategy focusing on divergence patterns
+
+BAD names to avoid:
+- "TrendFollower" (too generic)
+- "SimpleMoving" (too basic)
+- "PriceAction" (too vague)
+
+Output format must start with:
+STRATEGY_NAME: [Your unique two-word name]
+
+Then analyze the trading strategy content and create detailed instructions.
+Focus on:
+1. Key strategy components
+2. Entry/exit rules
+3. Risk management
+4. Required indicators
+
+Your complete output must follow this format:
+STRATEGY_NAME: [Your unique two-word name]
+
+STRATEGY_DETAILS:
+[Your detailed analysis]
+
+Remember: The name must be UNIQUE and SPECIFIC to this strategy's approach!
+"""
+
+BACKTEST_PROMPT = """
+You are Moon Dev's Backtest AI π ONLY SEND BACK CODE, NO OTHER TEXT.
+Create a backtesting.py implementation for the strategy.
+USE BACKTESTING.PY
+Include:
+1. All necessary imports
+2. Strategy class with indicators
+3. Entry/exit logic
+4. Risk management
+5. your size should be 1,000,000
+6. If you need indicators use TA lib or pandas TA.
+
+IMPORTANT DATA HANDLING:
+1. Clean column names by removing spaces: data.columns = data.columns.str.strip().str.lower()
+2. Drop any unnamed columns: data = data.drop(columns=[col for col in data.columns if 'unnamed' in col.lower()])
+3. Ensure proper column mapping to match backtesting requirements:
+ - Required columns: 'Open', 'High', 'Low', 'Close', 'Volume'
+ - Use proper case (capital first letter)
+
+FOR THE PYTHON BACKTESTING LIBRARY USE BACKTESTING.PY AND SEND BACK ONLY THE CODE, NO OTHER TEXT.
+
+INDICATOR CALCULATION RULES:
+1. ALWAYS use self.I() wrapper for ANY indicator calculations
+2. Use talib functions instead of pandas operations:
+ - Instead of: self.data.Close.rolling(20).mean()
+ - Use: self.I(talib.SMA, self.data.Close, timeperiod=20)
+3. For swing high/lows use talib.MAX/MIN:
+ - Instead of: self.data.High.rolling(window=20).max()
+ - Use: self.I(talib.MAX, self.data.High, timeperiod=20)
+
+BACKTEST EXECUTION ORDER:
+1. Run initial backtest with default parameters first
+2. Print full stats using print(stats) and print(stats._strategy)
+3. no optimization code needed, just print the final stats, make sure full stats are printed, not just part or some. stats = bt.run() print(stats) is an example of the last line of code. no need for plotting ever.
+
+do not creeate charts to plot this, just print stats. no charts needed.
+
+CRITICAL POSITION SIZING RULE:
+When calculating position sizes in backtesting.py, the size parameter must be either:
+1. A fraction between 0 and 1 (for percentage of equity)
+2. A whole number (integer) of units
+
+The common error occurs when calculating position_size = risk_amount / risk, which results in floating-point numbers. Always use:
+position_size = int(round(position_size))
+
+Example fix:
+β self.buy(size=3546.0993) # Will fail
+β
self.buy(size=int(round(3546.0993))) # Will work
+
+RISK MANAGEMENT:
+1. Always calculate position sizes based on risk percentage
+2. Use proper stop loss and take profit calculations
+4. Print entry/exit signals with Moon Dev themed messages
+
+If you need indicators use TA lib or pandas TA.
+
+Use this data path: /Users/md/Dropbox/dev/github/moon-dev-ai-agents-for-trading/src/data/rbi/BTC-USD-15m.csv
+the above data head looks like below
+datetime, open, high, low, close, volume,
+2023-01-01 00:00:00, 16531.83, 16532.69, 16509.11, 16510.82, 231.05338022,
+2023-01-01 00:15:00, 16509.78, 16534.66, 16509.11, 16533.43, 308.12276951,
+
+Always add plenty of Moon Dev themed debug prints with emojis to make debugging easier! π β¨ π
+
+FOR THE PYTHON BACKTESTING LIBRARY USE BACKTESTING.PY AND SEND BACK ONLY THE CODE, NO OTHER TEXT.
+ONLY SEND BACK CODE, NO OTHER TEXT.
+"""
+
+DEBUG_PROMPT = """
+You are Moon Dev's Debug AI π
+Fix technical issues in the backtest code WITHOUT changing the strategy logic.
+
+CRITICAL ERROR TO FIX:
+{error_message}
+
+CRITICAL DATA LOADING REQUIREMENTS:
+The CSV file has these exact columns after processing:
+- datetime, open, high, low, close, volume (all lowercase after .str.lower())
+- After capitalization: Datetime, Open, High, Low, Close, Volume
+
+CRITICAL BACKTESTING REQUIREMENTS:
+1. Data Loading Rules:
+ - Use data.columns.str.strip().str.lower() to clean columns
+ - Drop unnamed columns: data.drop(columns=[col for col in data.columns if 'unnamed' in col.lower()])
+ - Rename columns properly: data.rename(columns={{'open': 'Open', 'high': 'High', 'low': 'Low', 'close': 'Close', 'volume': 'Volume'}})
+ - Set datetime as index: data = data.set_index(pd.to_datetime(data['datetime']))
+
+2. Position Sizing Rules:
+ - Must be either a fraction (0 < size < 1) for percentage of equity
+ - OR a positive whole number (round integer) for units
+ - NEVER use floating point numbers for unit-based sizing
+
+3. Indicator Issues:
+ - Cannot use .shift() on backtesting indicators
+ - Use array indexing like indicator[-2] for previous values
+ - All indicators must be wrapped in self.I()
+
+4. Position Object Issues:
+ - Position object does NOT have .entry_price attribute
+ - Use self.trades[-1].entry_price if you need entry price from last trade
+ - Available position attributes: .size, .pl, .pl_pct
+ - For partial closes: use self.position.close() without parameters (closes entire position)
+ - For stop losses: use sl= parameter in buy/sell calls, not in position.close()
+
+5. No Trades Issue (Signals but no execution):
+ - If strategy prints "ENTRY SIGNAL" but shows 0 trades, the self.buy() call is not executing
+ - Common causes: invalid size parameter, insufficient cash, missing self.buy() call
+ - Ensure self.buy() is actually called in the entry condition block
+ - Check size parameter: must be fraction (0-1) or positive integer
+ - Verify cash/equity is sufficient for the trade size
+
+Focus on:
+1. KeyError issues with column names
+2. Syntax errors and import statements
+3. Indicator calculation methods
+4. Data loading and preprocessing
+5. Position object attribute errors (.entry_price, .close() parameters)
+
+DO NOT change strategy logic, entry/exit conditions, or risk management rules.
+
+Return the complete fixed code with Moon Dev themed debug prints! π β¨
+ONLY SEND BACK CODE, NO OTHER TEXT.
+"""
+
+PACKAGE_PROMPT = """
+You are Moon Dev's Package AI π
+Your job is to ensure the backtest code NEVER uses ANY backtesting.lib imports or functions.
+
+β STRICTLY FORBIDDEN:
+1. from backtesting.lib import *
+2. import backtesting.lib
+3. from backtesting.lib import crossover
+4. ANY use of backtesting.lib
+
+β
REQUIRED REPLACEMENTS:
+1. For crossover detection:
+ Instead of: backtesting.lib.crossover(a, b)
+ Use: (a[-2] < b[-2] and a[-1] > b[-1]) # for bullish crossover
+ (a[-2] > b[-2] and a[-1] < b[-1]) # for bearish crossover
+
+2. For indicators:
+ - Use talib for all standard indicators (SMA, RSI, MACD, etc.)
+ - Use pandas-ta for specialized indicators
+ - ALWAYS wrap in self.I()
+
+3. For signal generation:
+ - Use numpy/pandas boolean conditions
+ - Use rolling window comparisons with array indexing
+ - Use mathematical comparisons (>, <, ==)
+
+Example conversions:
+β from backtesting.lib import crossover
+β if crossover(fast_ma, slow_ma):
+β
if fast_ma[-2] < slow_ma[-2] and fast_ma[-1] > slow_ma[-1]:
+
+β self.sma = self.I(backtesting.lib.SMA, self.data.Close, 20)
+β
self.sma = self.I(talib.SMA, self.data.Close, timeperiod=20)
+
+IMPORTANT: Scan the ENTIRE code for any backtesting.lib usage and replace ALL instances!
+Return the complete fixed code with proper Moon Dev themed debug prints! π β¨
+ONLY SEND BACK CODE, NO OTHER TEXT.
+"""
+
+OPTIMIZE_PROMPT = """
+You are Moon Dev's Optimization AI π
+Your job is to IMPROVE the strategy to achieve higher returns while maintaining good risk management.
+
+CURRENT PERFORMANCE:
+Return [%]: {current_return}%
+TARGET RETURN: {target_return}%
+
+YOUR MISSION: Optimize this strategy to hit the target return!
+
+OPTIMIZATION TECHNIQUES TO CONSIDER:
+1. **Entry Optimization:**
+ - Tighten entry conditions to catch better setups
+ - Add filters to avoid low-quality signals
+ - Use multiple timeframe confirmation
+ - Add volume/momentum filters
+
+2. **Exit Optimization:**
+ - Improve take profit levels
+ - Add trailing stops
+ - Use dynamic position sizing based on volatility
+ - Scale out of positions
+
+3. **Risk Management:**
+ - Adjust position sizing
+ - Use volatility-based position sizing (ATR)
+ - Add maximum drawdown limits
+ - Improve stop loss placement
+
+4. **Indicator Optimization:**
+ - Fine-tune indicator parameters
+ - Add complementary indicators
+ - Use indicator divergence
+ - Combine multiple timeframes
+
+5. **Market Regime Filters:**
+ - Add trend filters
+ - Avoid choppy/ranging markets
+ - Only trade in favorable conditions
+
+IMPORTANT RULES:
+- DO NOT break the code structure
+- Keep all Moon Dev debug prints
+- Maintain proper backtesting.py format
+- Use self.I() for all indicators
+- Position sizes must be int or fraction (0-1)
+- Focus on REALISTIC improvements (no curve fitting!)
+- Explain your optimization changes in comments
+
+Return the COMPLETE optimized code with Moon Dev themed comments explaining what you improved! π β¨
+ONLY SEND BACK CODE, NO OTHER TEXT.
+"""
+
+# ============================================
+# π οΈ HELPER FUNCTIONS (with thread safety)
+# ============================================
+
+def parse_return_from_output(stdout: str, thread_id: int) -> float:
+ """Extract the Return [%] from backtest output"""
+ try:
+ match = re.search(r'Return \[%\]\s+([-\d.]+)', stdout)
+ if match:
+ return_pct = float(match.group(1))
+ thread_print(f"π Extracted return: {return_pct}%", thread_id)
+ return return_pct
+ else:
+ thread_print("β οΈ Could not find Return [%] in output", thread_id, "yellow")
+ return None
+ except Exception as e:
+ thread_print(f"β Error parsing return: {str(e)}", thread_id, "red")
+ return None
+
+def parse_all_stats_from_output(stdout: str, thread_id: int) -> dict:
+ """
+ π Moon Dev's Stats Parser - Extract all key stats from backtest output!
+ Returns dict with: return_pct, buy_hold_pct, max_drawdown_pct, sharpe, sortino, expectancy
+ """
+ stats = {
+ 'return_pct': None,
+ 'buy_hold_pct': None,
+ 'max_drawdown_pct': None,
+ 'sharpe': None,
+ 'sortino': None,
+ 'expectancy': None
+ }
+
+ try:
+ # Return [%]
+ match = re.search(r'Return \[%\]\s+([-\d.]+)', stdout)
+ if match:
+ stats['return_pct'] = float(match.group(1))
+
+ # Buy & Hold Return [%]
+ match = re.search(r'Buy & Hold Return \[%\]\s+([-\d.]+)', stdout)
+ if match:
+ stats['buy_hold_pct'] = float(match.group(1))
+
+ # Max. Drawdown [%]
+ match = re.search(r'Max\. Drawdown \[%\]\s+([-\d.]+)', stdout)
+ if match:
+ stats['max_drawdown_pct'] = float(match.group(1))
+
+ # Sharpe Ratio
+ match = re.search(r'Sharpe Ratio\s+([-\d.]+)', stdout)
+ if match:
+ stats['sharpe'] = float(match.group(1))
+
+ # Sortino Ratio
+ match = re.search(r'Sortino Ratio\s+([-\d.]+)', stdout)
+ if match:
+ stats['sortino'] = float(match.group(1))
+
+ # Expectancy [%] (or Avg. Trade [%])
+ match = re.search(r'Expectancy \[%\]\s+([-\d.]+)', stdout)
+ if not match:
+ match = re.search(r'Avg\. Trade \[%\]\s+([-\d.]+)', stdout)
+ if match:
+ stats['expectancy'] = float(match.group(1))
+
+ thread_print(f"π Extracted {sum(1 for v in stats.values() if v is not None)}/6 stats", thread_id)
+ return stats
+
+ except Exception as e:
+ thread_print(f"β Error parsing stats: {str(e)}", thread_id, "red")
+ return stats
+
+def log_stats_to_csv(strategy_name: str, iteration: int, thread_id: int, stats: dict, file_path: str) -> None:
+ """
+ π Moon Dev's CSV Logger - Thread-safe stats logging!
+ Appends backtest stats to CSV for easy analysis and comparison
+ """
+ try:
+ with file_lock:
+ # Create CSV with headers if it doesn't exist
+ file_exists = STATS_CSV.exists()
+
+ with open(STATS_CSV, 'a', newline='') as f:
+ writer = csv.writer(f)
+
+ # Write header if new file
+ if not file_exists:
+ writer.writerow([
+ 'Strategy Name',
+ 'Iteration',
+ 'Thread ID',
+ 'Return %',
+ 'Buy & Hold %',
+ 'Max Drawdown %',
+ 'Sharpe Ratio',
+ 'Sortino Ratio',
+ 'Expectancy %',
+ 'File Path',
+ 'Timestamp'
+ ])
+ thread_print("π Created new stats CSV with headers", thread_id, "green")
+
+ # Write stats row
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ writer.writerow([
+ strategy_name,
+ iteration,
+ f"T{thread_id:02d}",
+ stats.get('return_pct', 'N/A'),
+ stats.get('buy_hold_pct', 'N/A'),
+ stats.get('max_drawdown_pct', 'N/A'),
+ stats.get('sharpe', 'N/A'),
+ stats.get('sortino', 'N/A'),
+ stats.get('expectancy', 'N/A'),
+ str(file_path),
+ timestamp
+ ])
+
+ thread_print(f"β
Logged stats to CSV (Return: {stats.get('return_pct', 'N/A')}%)", thread_id, "green")
+
+ except Exception as e:
+ thread_print(f"β Error logging to CSV: {str(e)}", thread_id, "red")
+
+def save_backtest_if_threshold_met(code: str, stats: dict, strategy_name: str, iteration: int, thread_id: int, phase: str = "debug") -> bool:
+ """
+ π Moon Dev's Threshold Checker - Save backtests that pass the return threshold!
+
+ Args:
+ code: The backtest code to save
+ stats: Dict of parsed stats
+ strategy_name: Name of the strategy
+ iteration: Current iteration number
+ thread_id: Thread ID
+ phase: "debug", "opt", or "final" to determine filename
+
+ Returns:
+ True if saved (threshold met), False otherwise
+ """
+ return_pct = stats.get('return_pct')
+
+ # Check if return meets threshold
+ if return_pct is None or return_pct <= SAVE_IF_OVER_RETURN:
+ thread_print(f"β οΈ Return {return_pct}% β€ {SAVE_IF_OVER_RETURN}% threshold - not saving", thread_id, "yellow")
+ return False
+
+ try:
+ # Determine filename based on phase
+ if phase == "debug":
+ filename = f"T{thread_id:02d}_{strategy_name}_DEBUG_v{iteration}_{return_pct:.1f}pct.py"
+ elif phase == "opt":
+ filename = f"T{thread_id:02d}_{strategy_name}_OPT_v{iteration}_{return_pct:.1f}pct.py"
+ else: # final
+ filename = f"T{thread_id:02d}_{strategy_name}_FINAL_{return_pct:.1f}pct.py"
+
+ # Save to WORKING folder
+ working_file = WORKING_BACKTEST_DIR / filename
+ with file_lock:
+ with open(working_file, 'w') as f:
+ f.write(code)
+
+ # Save to FINAL folder (same logic per Moon Dev's request)
+ final_file = FINAL_BACKTEST_DIR / filename
+ with file_lock:
+ with open(final_file, 'w') as f:
+ f.write(code)
+
+ thread_print(f"πΎ Saved to working & final! Return: {return_pct:.2f}%", thread_id, "green", attrs=['bold'])
+
+ # Log to CSV
+ log_stats_to_csv(strategy_name, iteration, thread_id, stats, str(working_file))
+
+ return True
+
+ except Exception as e:
+ thread_print(f"β Error saving backtest: {str(e)}", thread_id, "red")
+ return False
+
+def execute_backtest(file_path: str, strategy_name: str, thread_id: int) -> dict:
+ """Execute a backtest file in conda environment and capture output"""
+ thread_print(f"π Executing: {strategy_name}", thread_id)
+
+ if not os.path.exists(file_path):
+ raise FileNotFoundError(f"File not found: {file_path}")
+
+ cmd = [
+ "conda", "run", "-n", CONDA_ENV,
+ "python", str(file_path)
+ ]
+
+ start_time = datetime.now()
+
+ result = subprocess.run(
+ cmd,
+ capture_output=True,
+ text=True,
+ timeout=EXECUTION_TIMEOUT
+ )
+
+ execution_time = (datetime.now() - start_time).total_seconds()
+
+ output = {
+ "success": result.returncode == 0,
+ "return_code": result.returncode,
+ "stdout": result.stdout,
+ "stderr": result.stderr,
+ "execution_time": execution_time,
+ "timestamp": datetime.now().isoformat()
+ }
+
+ # Save execution results with thread ID
+ result_file = EXECUTION_DIR / f"T{thread_id:02d}_{strategy_name}_{datetime.now().strftime('%H%M%S')}.json"
+ with file_lock:
+ with open(result_file, 'w') as f:
+ json.dump(output, f, indent=2)
+
+ if output['success']:
+ thread_print(f"β
Backtest executed in {execution_time:.2f}s!", thread_id, "green")
+ else:
+ thread_print(f"β Backtest failed: {output['return_code']}", thread_id, "red")
+
+ return output
+
+def parse_execution_error(execution_result: dict) -> str:
+ """Extract meaningful error message for debug agent"""
+ if execution_result.get('stderr'):
+ return execution_result['stderr'].strip()
+ return execution_result.get('error', 'Unknown error')
+
+def get_idea_hash(idea: str) -> str:
+ """Generate a unique hash for an idea to track processing status"""
+ return hashlib.md5(idea.encode('utf-8')).hexdigest()
+
+def is_idea_processed(idea: str) -> bool:
+ """Check if an idea has already been processed (thread-safe)"""
+ if not PROCESSED_IDEAS_LOG.exists():
+ return False
+
+ idea_hash = get_idea_hash(idea)
+
+ with file_lock:
+ with open(PROCESSED_IDEAS_LOG, 'r') as f:
+ processed_hashes = [line.strip().split(',')[0] for line in f if line.strip()]
+
+ return idea_hash in processed_hashes
+
+def log_processed_idea(idea: str, strategy_name: str, thread_id: int) -> None:
+ """Log an idea as processed with timestamp and strategy name (thread-safe)"""
+ idea_hash = get_idea_hash(idea)
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+
+ with file_lock:
+ if not PROCESSED_IDEAS_LOG.exists():
+ PROCESSED_IDEAS_LOG.parent.mkdir(parents=True, exist_ok=True)
+ with open(PROCESSED_IDEAS_LOG, 'w') as f:
+ f.write("# Moon Dev's RBI AI - Processed Ideas Log π\n")
+ f.write("# Format: hash,timestamp,thread_id,strategy_name,idea_snippet\n")
+
+ idea_snippet = idea[:50].replace(',', ';') + ('...' if len(idea) > 50 else '')
+ with open(PROCESSED_IDEAS_LOG, 'a') as f:
+ f.write(f"{idea_hash},{timestamp},T{thread_id:02d},{strategy_name},{idea_snippet}\n")
+
+ thread_print(f"π Logged processed idea: {strategy_name}", thread_id, "green")
+
+def has_nan_results(execution_result: dict) -> bool:
+ """Check if backtest results contain NaN values indicating no trades"""
+ if not execution_result.get('success'):
+ return False
+
+ stdout = execution_result.get('stdout', '')
+
+ nan_indicators = [
+ '# Trades 0',
+ 'Win Rate [%] NaN',
+ 'Exposure Time [%] 0.0',
+ 'Return [%] 0.0'
+ ]
+
+ nan_count = sum(1 for indicator in nan_indicators if indicator in stdout)
+ return nan_count >= 2
+
+def analyze_no_trades_issue(execution_result: dict) -> str:
+ """Analyze why strategy shows signals but no trades"""
+ stdout = execution_result.get('stdout', '')
+
+ if 'ENTRY SIGNAL' in stdout and '# Trades 0' in stdout:
+ return "Strategy is generating entry signals but self.buy() calls are not executing. This usually means: 1) Position sizing issues (size parameter invalid), 2) Insufficient cash/equity, 3) Logic preventing buy execution, or 4) Missing actual self.buy() call in the code. The strategy prints signals but never calls self.buy()."
+
+ elif '# Trades 0' in stdout:
+ return "Strategy executed but took 0 trades, resulting in NaN values. The entry conditions are likely too restrictive or there are logic errors preventing trade execution."
+
+ return "Strategy executed but took 0 trades, resulting in NaN values. Please adjust the strategy logic to actually generate trading signals and take trades."
+
+def chat_with_model(system_prompt, user_content, model_config, thread_id):
+ """Chat with AI model using model factory with rate limiting"""
+ def _api_call():
+ model = model_factory.get_model(model_config["type"], model_config["name"])
+ if not model:
+ raise ValueError(f"π¨ Could not initialize {model_config['type']} {model_config['name']} model!")
+
+ if model_config["type"] == "ollama":
+ response = model.generate_response(
+ system_prompt=system_prompt,
+ user_content=user_content,
+ temperature=AI_TEMPERATURE
+ )
+ if isinstance(response, str):
+ return response
+ if hasattr(response, 'content'):
+ return response.content
+ return str(response)
+ else:
+ response = model.generate_response(
+ system_prompt=system_prompt,
+ user_content=user_content,
+ temperature=AI_TEMPERATURE,
+ max_tokens=AI_MAX_TOKENS
+ )
+ if not response:
+ raise ValueError("Model returned None response")
+ return response.content
+
+ # Apply rate limiting
+ return rate_limited_api_call(_api_call, thread_id)
+
+def clean_model_output(output, content_type="text"):
+ """Clean model output by removing thinking tags and extracting code from markdown"""
+ cleaned_output = output
+
+ if "" in output and "" in output:
+ clean_content = output.split("")[-1].strip()
+ if not clean_content:
+ import re
+ clean_content = re.sub(r'.*?', '', output, flags=re.DOTALL).strip()
+ if clean_content:
+ cleaned_output = clean_content
+
+ if content_type == "code" and "```" in cleaned_output:
+ try:
+ import re
+ code_blocks = re.findall(r'```python\n(.*?)\n```', cleaned_output, re.DOTALL)
+ if not code_blocks:
+ code_blocks = re.findall(r'```(?:python)?\n(.*?)\n```', cleaned_output, re.DOTALL)
+ if code_blocks:
+ cleaned_output = "\n\n".join(code_blocks)
+ except Exception as e:
+ thread_print(f"β Error extracting code: {str(e)}", 0, "red")
+
+ return cleaned_output
+
+# ============================================
+# π€ AI AGENT FUNCTIONS (Thread-safe versions)
+# ============================================
+
+def research_strategy(content, thread_id):
+ """Research AI: Analyzes and creates trading strategy"""
+ thread_print_status(thread_id, "π RESEARCH", "Starting analysis...")
+
+ output = chat_with_model(
+ RESEARCH_PROMPT,
+ content,
+ RESEARCH_CONFIG,
+ thread_id
+ )
+
+ if output:
+ output = clean_model_output(output, "text")
+
+ strategy_name = "UnknownStrategy"
+ if "STRATEGY_NAME:" in output:
+ try:
+ name_section = output.split("STRATEGY_NAME:")[1].strip()
+ if "\n\n" in name_section:
+ strategy_name = name_section.split("\n\n")[0].strip()
+ else:
+ strategy_name = name_section.split("\n")[0].strip()
+
+ strategy_name = re.sub(r'[^\w\s-]', '', strategy_name)
+ strategy_name = re.sub(r'[\s]+', '', strategy_name)
+
+ thread_print(f"β
Strategy: {strategy_name}", thread_id, "green")
+ except Exception as e:
+ thread_print(f"β οΈ Error extracting strategy name: {str(e)}", thread_id, "yellow")
+
+ # Add thread ID to filename
+ filepath = RESEARCH_DIR / f"T{thread_id:02d}_{strategy_name}_strategy.txt"
+ with file_lock:
+ with open(filepath, 'w') as f:
+ f.write(output)
+
+ return output, strategy_name
+ return None, None
+
+def create_backtest(strategy, strategy_name, thread_id):
+ """Backtest AI: Creates backtest implementation"""
+ thread_print_status(thread_id, "π BACKTEST", "Creating backtest code...")
+
+ output = chat_with_model(
+ BACKTEST_PROMPT,
+ f"Create a backtest for this strategy:\n\n{strategy}",
+ BACKTEST_CONFIG,
+ thread_id
+ )
+
+ if output:
+ output = clean_model_output(output, "code")
+
+ filepath = BACKTEST_DIR / f"T{thread_id:02d}_{strategy_name}_BT.py"
+ with file_lock:
+ with open(filepath, 'w') as f:
+ f.write(output)
+
+ thread_print(f"π₯ Backtest code saved", thread_id, "green")
+ return output
+ return None
+
+def package_check(backtest_code, strategy_name, thread_id):
+ """Package AI: Ensures correct indicator packages are used"""
+ thread_print_status(thread_id, "π¦ PACKAGE", "Checking imports...")
+
+ output = chat_with_model(
+ PACKAGE_PROMPT,
+ f"Check and fix indicator packages in this code:\n\n{backtest_code}",
+ PACKAGE_CONFIG,
+ thread_id
+ )
+
+ if output:
+ output = clean_model_output(output, "code")
+
+ filepath = PACKAGE_DIR / f"T{thread_id:02d}_{strategy_name}_PKG.py"
+ with file_lock:
+ with open(filepath, 'w') as f:
+ f.write(output)
+
+ thread_print(f"π¦ Package check complete", thread_id, "green")
+ return output
+ return None
+
+def debug_backtest(backtest_code, error_message, strategy_name, thread_id, iteration=1):
+ """Debug AI: Fixes technical issues in backtest code"""
+ thread_print_status(thread_id, f"π§ DEBUG #{iteration}", "Fixing errors...")
+
+ debug_prompt_with_error = DEBUG_PROMPT.format(error_message=error_message)
+
+ output = chat_with_model(
+ debug_prompt_with_error,
+ f"Fix this backtest code:\n\n{backtest_code}",
+ DEBUG_CONFIG,
+ thread_id
+ )
+
+ if output:
+ output = clean_model_output(output, "code")
+
+ # π Moon Dev: Save debug iterations to BACKTEST_DIR, not FINAL
+ # Only threshold-passing backtests go to FINAL/WORKING folders!
+ filepath = BACKTEST_DIR / f"T{thread_id:02d}_{strategy_name}_DEBUG_v{iteration}.py"
+ with file_lock:
+ with open(filepath, 'w') as f:
+ f.write(output)
+
+ thread_print(f"π§ Debug iteration {iteration} complete", thread_id, "green")
+ return output
+ return None
+
+def optimize_strategy(backtest_code, current_return, target_return, strategy_name, thread_id, iteration=1):
+ """Optimization AI: Improves strategy to hit target return"""
+ thread_print_status(thread_id, f"π― OPTIMIZE #{iteration}", f"{current_return}% β {target_return}%")
+
+ optimize_prompt_with_stats = OPTIMIZE_PROMPT.format(
+ current_return=current_return,
+ target_return=target_return
+ )
+
+ output = chat_with_model(
+ optimize_prompt_with_stats,
+ f"Optimize this backtest code to hit the target:\n\n{backtest_code}",
+ OPTIMIZE_CONFIG,
+ thread_id
+ )
+
+ if output:
+ output = clean_model_output(output, "code")
+
+ filepath = OPTIMIZATION_DIR / f"T{thread_id:02d}_{strategy_name}_OPT_v{iteration}.py"
+ with file_lock:
+ with open(filepath, 'w') as f:
+ f.write(output)
+
+ thread_print(f"π― Optimization {iteration} complete", thread_id, "green")
+ return output
+ return None
+
+# ============================================
+# π PARALLEL PROCESSING CORE
+# ============================================
+
+def process_trading_idea_parallel(idea: str, thread_id: int) -> dict:
+ """
+ Process a single trading idea with full Research β Backtest β Debug β Optimize pipeline
+ This is the worker function for each parallel thread
+ """
+ try:
+ thread_print(f"π Starting processing", thread_id, attrs=['bold'])
+
+ # Phase 1: Research
+ strategy, strategy_name = research_strategy(idea, thread_id)
+
+ if not strategy:
+ thread_print("β Research failed", thread_id, "red")
+ return {"success": False, "error": "Research failed", "thread_id": thread_id}
+
+ log_processed_idea(idea, strategy_name, thread_id)
+
+ # Phase 2: Backtest
+ backtest = create_backtest(strategy, strategy_name, thread_id)
+
+ if not backtest:
+ thread_print("β Backtest failed", thread_id, "red")
+ return {"success": False, "error": "Backtest failed", "thread_id": thread_id}
+
+ # Phase 3: Package Check
+ package_checked = package_check(backtest, strategy_name, thread_id)
+
+ if not package_checked:
+ thread_print("β Package check failed", thread_id, "red")
+ return {"success": False, "error": "Package check failed", "thread_id": thread_id}
+
+ package_file = PACKAGE_DIR / f"T{thread_id:02d}_{strategy_name}_PKG.py"
+
+ # Phase 4: Execution Loop
+ debug_iteration = 0
+ current_code = package_checked
+ current_file = package_file
+ error_history = []
+
+ while debug_iteration < MAX_DEBUG_ITERATIONS:
+ thread_print_status(thread_id, "π EXECUTE", f"Attempt {debug_iteration + 1}/{MAX_DEBUG_ITERATIONS}")
+
+ execution_result = execute_backtest(current_file, strategy_name, thread_id)
+
+ if execution_result['success']:
+ if has_nan_results(execution_result):
+ thread_print("β οΈ No trades taken", thread_id, "yellow")
+
+ error_message = analyze_no_trades_issue(execution_result)
+ debug_iteration += 1
+
+ if debug_iteration < MAX_DEBUG_ITERATIONS:
+ debugged_code = debug_backtest(
+ current_code,
+ error_message,
+ strategy_name,
+ thread_id,
+ debug_iteration
+ )
+
+ if not debugged_code:
+ thread_print("β Debug AI failed", thread_id, "red")
+ return {"success": False, "error": "Debug failed", "thread_id": thread_id}
+
+ current_code = debugged_code
+ # π Moon Dev: Update to match new debug file location
+ current_file = BACKTEST_DIR / f"T{thread_id:02d}_{strategy_name}_DEBUG_v{debug_iteration}.py"
+ continue
+ else:
+ thread_print(f"β Max debug iterations reached", thread_id, "red")
+ return {"success": False, "error": "Max debug iterations", "thread_id": thread_id}
+ else:
+ # SUCCESS! Code executes with trades!
+ thread_print("π BACKTEST SUCCESSFUL!", thread_id, "green", attrs=['bold'])
+
+ # π Moon Dev: Parse ALL stats, not just return!
+ all_stats = parse_all_stats_from_output(execution_result['stdout'], thread_id)
+ current_return = all_stats.get('return_pct')
+
+ if current_return is None:
+ thread_print("β οΈ Could not parse return", thread_id, "yellow")
+ final_file = FINAL_BACKTEST_DIR / f"T{thread_id:02d}_{strategy_name}_BTFinal_WORKING.py"
+ with file_lock:
+ with open(final_file, 'w') as f:
+ f.write(current_code)
+ break
+
+ # π Moon Dev: Check threshold and save if met!
+ save_backtest_if_threshold_met(
+ current_code,
+ all_stats,
+ strategy_name,
+ debug_iteration,
+ thread_id,
+ phase="debug"
+ )
+
+ thread_print(f"π Return: {current_return}% | Target: {TARGET_RETURN}%", thread_id)
+
+ if current_return >= TARGET_RETURN:
+ # TARGET HIT!
+ thread_print("πππ TARGET HIT! πππ", thread_id, "green", attrs=['bold'])
+
+ # π Moon Dev: Save to OPTIMIZATION_DIR for target hits
+ final_file = OPTIMIZATION_DIR / f"T{thread_id:02d}_{strategy_name}_TARGET_HIT_{current_return}pct.py"
+ with file_lock:
+ with open(final_file, 'w') as f:
+ f.write(current_code)
+
+ return {
+ "success": True,
+ "thread_id": thread_id,
+ "strategy_name": strategy_name,
+ "return": current_return,
+ "target_hit": True
+ }
+ else:
+ # Need to optimize
+ gap = TARGET_RETURN - current_return
+ thread_print(f"π Need {gap}% more - Starting optimization", thread_id)
+
+ optimization_iteration = 0
+ optimization_code = current_code
+ best_return = current_return
+ best_code = current_code
+
+ while optimization_iteration < MAX_OPTIMIZATION_ITERATIONS:
+ optimization_iteration += 1
+
+ optimized_code = optimize_strategy(
+ optimization_code,
+ best_return,
+ TARGET_RETURN,
+ strategy_name,
+ thread_id,
+ optimization_iteration
+ )
+
+ if not optimized_code:
+ thread_print("β Optimization AI failed", thread_id, "red")
+ break
+
+ opt_file = OPTIMIZATION_DIR / f"T{thread_id:02d}_{strategy_name}_OPT_v{optimization_iteration}.py"
+ opt_result = execute_backtest(opt_file, strategy_name, thread_id)
+
+ if not opt_result['success'] or has_nan_results(opt_result):
+ thread_print(f"β οΈ Optimization {optimization_iteration} failed", thread_id, "yellow")
+ continue
+
+ # π Moon Dev: Parse ALL stats from optimization!
+ opt_stats = parse_all_stats_from_output(opt_result['stdout'], thread_id)
+ new_return = opt_stats.get('return_pct')
+
+ if new_return is None:
+ continue
+
+ change = new_return - best_return
+ thread_print(f"π Opt {optimization_iteration}: {new_return}% ({change:+.2f}%)", thread_id)
+
+ if new_return > best_return:
+ thread_print(f"β
Improved by {change:.2f}%!", thread_id, "green")
+ best_return = new_return
+ best_code = optimized_code
+ optimization_code = optimized_code
+
+ # π Moon Dev: Check threshold and save if met!
+ save_backtest_if_threshold_met(
+ optimized_code,
+ opt_stats,
+ strategy_name,
+ optimization_iteration,
+ thread_id,
+ phase="opt"
+ )
+
+ if new_return >= TARGET_RETURN:
+ thread_print("πππ TARGET HIT VIA OPTIMIZATION! πππ", thread_id, "green", attrs=['bold'])
+
+ final_file = OPTIMIZATION_DIR / f"T{thread_id:02d}_{strategy_name}_TARGET_HIT_{new_return}pct.py"
+ with file_lock:
+ with open(final_file, 'w') as f:
+ f.write(best_code)
+
+ return {
+ "success": True,
+ "thread_id": thread_id,
+ "strategy_name": strategy_name,
+ "return": new_return,
+ "target_hit": True,
+ "optimizations": optimization_iteration
+ }
+
+ # Max optimization iterations reached
+ thread_print(f"β οΈ Max optimizations reached. Best: {best_return}%", thread_id, "yellow")
+
+ best_file = OPTIMIZATION_DIR / f"T{thread_id:02d}_{strategy_name}_BEST_{best_return}pct.py"
+ with file_lock:
+ with open(best_file, 'w') as f:
+ f.write(best_code)
+
+ return {
+ "success": True,
+ "thread_id": thread_id,
+ "strategy_name": strategy_name,
+ "return": best_return,
+ "target_hit": False
+ }
+ else:
+ # Execution failed
+ error_message = parse_execution_error(execution_result)
+
+ error_signature = error_message.split('\n')[-1] if '\n' in error_message else error_message
+ if error_signature in error_history:
+ thread_print(f"π Repeated error detected - stopping", thread_id, "red")
+ return {"success": False, "error": "Repeated error", "thread_id": thread_id}
+
+ error_history.append(error_signature)
+ debug_iteration += 1
+
+ if debug_iteration < MAX_DEBUG_ITERATIONS:
+ debugged_code = debug_backtest(
+ current_code,
+ error_message,
+ strategy_name,
+ thread_id,
+ debug_iteration
+ )
+
+ if not debugged_code:
+ thread_print("β Debug AI failed", thread_id, "red")
+ return {"success": False, "error": "Debug failed", "thread_id": thread_id}
+
+ current_code = debugged_code
+ # π Moon Dev: Update to match new debug file location
+ current_file = BACKTEST_DIR / f"T{thread_id:02d}_{strategy_name}_DEBUG_v{debug_iteration}.py"
+ else:
+ thread_print(f"β Max debug iterations reached", thread_id, "red")
+ return {"success": False, "error": "Max debug iterations", "thread_id": thread_id}
+
+ return {"success": True, "thread_id": thread_id}
+
+ except Exception as e:
+ thread_print(f"β FATAL ERROR: {str(e)}", thread_id, "red", attrs=['bold'])
+ return {"success": False, "error": str(e), "thread_id": thread_id}
+
+def main():
+ """Main parallel processing orchestrator"""
+ cprint(f"\n{'='*60}", "cyan", attrs=['bold'])
+ cprint(f"π Moon Dev's RBI AI v3.0 PARALLEL PROCESSOR π", "cyan", attrs=['bold'])
+ cprint(f"{'='*60}", "cyan", attrs=['bold'])
+
+ cprint(f"\nπ
Date: {TODAY_DATE}", "magenta")
+ cprint(f"π― Target Return: {TARGET_RETURN}%", "green", attrs=['bold'])
+ cprint(f"π Max Parallel Threads: {MAX_PARALLEL_THREADS}", "yellow", attrs=['bold'])
+ cprint(f"π Conda env: {CONDA_ENV}", "cyan")
+ cprint(f"π Data dir: {DATA_DIR}", "magenta")
+ cprint(f"π Ideas file: {IDEAS_FILE}\n", "magenta")
+
+ if not IDEAS_FILE.exists():
+ cprint("β ideas.txt not found! Creating template...", "red")
+ IDEAS_FILE.parent.mkdir(parents=True, exist_ok=True)
+ with open(IDEAS_FILE, 'w') as f:
+ f.write("# Add your trading ideas here (one per line)\n")
+ f.write("# Can be YouTube URLs, PDF links, or text descriptions\n")
+ f.write("# Lines starting with # are ignored\n\n")
+ f.write("Create a simple RSI strategy that buys when RSI < 30 and sells when RSI > 70\n")
+ f.write("Momentum strategy using 20/50 SMA crossover with volume confirmation\n")
+ cprint(f"π Created template ideas.txt at: {IDEAS_FILE}", "yellow")
+ cprint("π‘ Add your trading ideas and run again!", "yellow")
+ return
+
+ with open(IDEAS_FILE, 'r') as f:
+ ideas = [line.strip() for line in f if line.strip() and not line.startswith('#')]
+
+ total_ideas = len(ideas)
+ already_processed = sum(1 for idea in ideas if is_idea_processed(idea))
+ new_ideas = total_ideas - already_processed
+
+ cprint(f"π― Total ideas: {total_ideas}", "cyan")
+ cprint(f"β
Already processed: {already_processed}", "green")
+ cprint(f"π New to process: {new_ideas}\n", "yellow", attrs=['bold'])
+
+ if new_ideas == 0:
+ cprint("π All ideas have been processed!", "green", attrs=['bold'])
+ return
+
+ # Filter out already processed ideas
+ ideas_to_process = [(i, idea) for i, idea in enumerate(ideas) if not is_idea_processed(idea)]
+
+ cprint(f"π Starting parallel processing with {MAX_PARALLEL_THREADS} threads...\n", "cyan", attrs=['bold'])
+
+ start_time = datetime.now()
+
+ # Process ideas in parallel
+ with ThreadPoolExecutor(max_workers=MAX_PARALLEL_THREADS) as executor:
+ # Submit all ideas as futures with thread IDs
+ futures = {
+ executor.submit(process_trading_idea_parallel, idea, thread_id): (thread_id, idea)
+ for thread_id, idea in ideas_to_process
+ }
+
+ # Track results
+ results = []
+ completed = 0
+
+ # Process completed futures as they finish
+ for future in as_completed(futures):
+ thread_id, idea = futures[future]
+ completed += 1
+
+ try:
+ result = future.result()
+ results.append(result)
+
+ with console_lock:
+ cprint(f"\n{'='*60}", "green")
+ cprint(f"β
Thread {thread_id:02d} COMPLETED ({completed}/{len(futures)})", "green", attrs=['bold'])
+ if result.get('success'):
+ if result.get('target_hit'):
+ cprint(f"π― TARGET HIT: {result.get('strategy_name')} @ {result.get('return')}%", "green", attrs=['bold'])
+ else:
+ cprint(f"π Best return: {result.get('return', 'N/A')}%", "yellow")
+ else:
+ cprint(f"β Failed: {result.get('error', 'Unknown error')}", "red")
+ cprint(f"{'='*60}\n", "green")
+
+ except Exception as e:
+ with console_lock:
+ cprint(f"\nβ Thread {thread_id:02d} raised exception: {str(e)}", "red", attrs=['bold'])
+ results.append({"success": False, "thread_id": thread_id, "error": str(e)})
+
+ total_time = (datetime.now() - start_time).total_seconds()
+
+ # Final summary
+ cprint(f"\n{'='*60}", "cyan", attrs=['bold'])
+ cprint(f"π PARALLEL PROCESSING COMPLETE!", "cyan", attrs=['bold'])
+ cprint(f"{'='*60}", "cyan", attrs=['bold'])
+
+ cprint(f"\nβ±οΈ Total time: {total_time:.2f}s", "magenta")
+ cprint(f"π Ideas processed: {len(results)}", "cyan")
+
+ successful = [r for r in results if r.get('success')]
+ failed = [r for r in results if not r.get('success')]
+ targets_hit = [r for r in successful if r.get('target_hit')]
+
+ cprint(f"β
Successful: {len(successful)}", "green")
+ cprint(f"π― Targets hit: {len(targets_hit)}", "green", attrs=['bold'])
+ cprint(f"β Failed: {len(failed)}", "red")
+
+ if targets_hit:
+ cprint(f"\nπ STRATEGIES THAT HIT TARGET {TARGET_RETURN}%:", "green", attrs=['bold'])
+ for r in targets_hit:
+ cprint(f" β’ {r.get('strategy_name')}: {r.get('return')}%", "green")
+
+ cprint(f"\n⨠All results saved to: {TODAY_DIR}", "cyan")
+ cprint(f"{'='*60}\n", "cyan", attrs=['bold'])
+
+if __name__ == "__main__":
+ main()
diff --git a/src/agents/rbi_agent_pp_multi.py b/src/agents/rbi_agent_pp_multi.py
new file mode 100644
index 000000000..4816484a8
--- /dev/null
+++ b/src/agents/rbi_agent_pp_multi.py
@@ -0,0 +1,1464 @@
+"""
+π Moon Dev's RBI AI v3.0 PARALLEL PROCESSOR + MULTI-DATA TESTING π
+Built with love by Moon Dev π
+
+PARALLEL PROCESSING + MULTI-DATA VALIDATION: Run up to 5 backtests simultaneously,
+each tested on 25+ different data sources!
+
+- Each thread processes a different trading idea
+- Thread-safe colored output
+- Rate limiting to avoid API throttling
+- Massively faster than sequential processing
+- π AUTOMATIC MULTI-DATA TESTING on 25+ data sources (BTC, ETH, SOL, AAPL, TSLA, ES, NQ, etc.)
+
+HOW IT WORKS:
+1. Reads trading ideas from ideas.txt
+2. Spawns up to MAX_PARALLEL_THREADS workers
+3. Each thread independently: Research β Backtest β Debug β Optimize
+4. π Each successful backtest automatically tests on 25+ data sources!
+5. All threads run simultaneously until target returns are hit
+6. Thread-safe file naming with unique 2-digit thread IDs
+7. π Multi-data results saved to ./results/ folders for each strategy
+
+NEW FEATURES:
+- π¨ Color-coded output per thread (Thread 1 = cyan, Thread 2 = magenta, etc.)
+- β±οΈ Rate limiting to avoid API throttling
+- π Thread-safe file operations
+- π Real-time progress tracking across all threads
+- πΎ Clean file organization with thread IDs in names
+- π π MULTI-DATA TESTING: Validates strategies on 25+ assets/timeframes automatically!
+- π π CSV results showing performance across all data sources
+
+Required Setup:
+1. Conda environment 'tflow' with backtesting packages
+2. Set MAX_PARALLEL_THREADS (default: 5)
+3. Multi-data tester at: /Users/md/Dropbox/dev/github/moon-dev-trading-bots/backtests/multi_data_tester.py
+4. Run and watch all ideas process in parallel with multi-data validation! ππ°
+
+IMPORTANT: Each thread is fully independent and won't interfere with others!
+"""
+
+# Import execution functionality
+import subprocess
+import json
+from pathlib import Path
+
+# Core imports
+import os
+import time
+import re
+import hashlib
+import csv
+import pandas as pd
+from datetime import datetime
+from termcolor import cprint
+import sys
+import argparse # π Moon Dev: For command-line args
+from dotenv import load_dotenv
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from threading import Lock, Semaphore
+
+# Load environment variables FIRST
+load_dotenv()
+print("β
Environment variables loaded")
+
+# Add config values directly to avoid import issues
+AI_TEMPERATURE = 0.7
+AI_MAX_TOKENS = 4000
+
+# Import model factory with proper path handling
+import sys
+sys.path.append('/Users/md/Dropbox/dev/github/moon-dev-ai-agents-for-trading')
+
+try:
+ from src.models import model_factory
+ print("β
Successfully imported model_factory")
+except ImportError as e:
+ print(f"β οΈ Could not import model_factory: {e}")
+ sys.exit(1)
+
+# ============================================
+# π― PARALLEL PROCESSING CONFIGURATION
+# ============================================
+MAX_PARALLEL_THREADS = 18 # How many ideas to process simultaneously
+RATE_LIMIT_DELAY = .5 # Seconds to wait between API calls (per thread)
+RATE_LIMIT_GLOBAL_DELAY = 0.5 # Global delay between any API calls
+
+# Thread color mapping
+THREAD_COLORS = {
+ 0: "cyan",
+ 1: "magenta",
+ 2: "yellow",
+ 3: "green",
+ 4: "blue"
+}
+
+# Global locks
+console_lock = Lock()
+api_lock = Lock()
+file_lock = Lock()
+
+# Rate limiter
+rate_limiter = Semaphore(MAX_PARALLEL_THREADS)
+
+# Model Configurations (same as v3)
+RESEARCH_CONFIG = {
+ "type": "xai",
+ "name": "grok-4-fast-reasoning"
+}
+
+BACKTEST_CONFIG = {
+ "type": "xai",
+ "name": "grok-4-fast-reasoning"
+}
+
+DEBUG_CONFIG = {
+ "type": "xai",
+ "name": "grok-4-fast-reasoning"
+}
+
+PACKAGE_CONFIG = {
+ "type": "xai",
+ "name": "grok-4-fast-reasoning"
+}
+
+OPTIMIZE_CONFIG = {
+ "type": "xai",
+ "name": "grok-4-fast-reasoning"
+}
+
+# π― PROFIT TARGET CONFIGURATION
+TARGET_RETURN = 50 # Target return in %
+SAVE_IF_OVER_RETURN = 1.0 # Save backtest to CSV and folders if return > this % (Moon Dev's threshold!)
+CONDA_ENV = "tflow"
+MAX_DEBUG_ITERATIONS = 10
+MAX_OPTIMIZATION_ITERATIONS = 10
+EXECUTION_TIMEOUT = 300 # 5 minutes
+
+# DeepSeek Configuration
+DEEPSEEK_BASE_URL = "https://api.deepseek.com"
+
+# Get today's date for organizing outputs
+TODAY_DATE = datetime.now().strftime("%m_%d_%Y")
+
+# Update data directory paths - Parallel Multi-Data version uses its own folder
+PROJECT_ROOT = Path(__file__).parent.parent
+DATA_DIR = PROJECT_ROOT / "data/rbi_pp_multi"
+TODAY_DIR = DATA_DIR / TODAY_DATE
+RESEARCH_DIR = TODAY_DIR / "research"
+BACKTEST_DIR = TODAY_DIR / "backtests"
+PACKAGE_DIR = TODAY_DIR / "backtests_package"
+WORKING_BACKTEST_DIR = TODAY_DIR / "backtests_working" # Moon Dev's working iterations!
+FINAL_BACKTEST_DIR = TODAY_DIR / "backtests_final"
+OPTIMIZATION_DIR = TODAY_DIR / "backtests_optimized"
+CHARTS_DIR = TODAY_DIR / "charts"
+EXECUTION_DIR = TODAY_DIR / "execution_results"
+PROCESSED_IDEAS_LOG = DATA_DIR / "processed_ideas.log"
+STATS_CSV = DATA_DIR / "backtest_stats.csv" # Moon Dev's stats tracker!
+
+IDEAS_FILE = DATA_DIR / "ideas.txt"
+
+# Create main directories if they don't exist
+for dir in [DATA_DIR, TODAY_DIR, RESEARCH_DIR, BACKTEST_DIR, PACKAGE_DIR,
+ WORKING_BACKTEST_DIR, FINAL_BACKTEST_DIR, OPTIMIZATION_DIR, CHARTS_DIR, EXECUTION_DIR]:
+ dir.mkdir(parents=True, exist_ok=True)
+
+# ============================================
+# π¨ THREAD-SAFE PRINTING
+# ============================================
+
+def thread_print(message, thread_id, color=None, attrs=None):
+ """Thread-safe colored print with thread ID prefix"""
+ if color is None:
+ color = THREAD_COLORS.get(thread_id % 5, "white")
+
+ with console_lock:
+ prefix = f"[T{thread_id:02d}]"
+ cprint(f"{prefix} {message}", color, attrs=attrs)
+
+def thread_print_status(thread_id, phase, message):
+ """Print status update for a specific phase"""
+ color = THREAD_COLORS.get(thread_id % 5, "white")
+ with console_lock:
+ cprint(f"[T{thread_id:02d}] {phase}: {message}", color)
+
+# ============================================
+# π RATE LIMITING
+# ============================================
+
+def rate_limited_api_call(func, thread_id, *args, **kwargs):
+ """
+ Wrapper for API calls with rate limiting
+ - Per-thread rate limiting (RATE_LIMIT_DELAY)
+ - Global rate limiting (RATE_LIMIT_GLOBAL_DELAY)
+ """
+ # Global rate limit (quick check)
+ with api_lock:
+ time.sleep(RATE_LIMIT_GLOBAL_DELAY)
+
+ # Execute the API call
+ result = func(*args, **kwargs)
+
+ # Per-thread rate limit
+ time.sleep(RATE_LIMIT_DELAY)
+
+ return result
+
+# ============================================
+# π PROMPTS (Same as v3)
+# ============================================
+
+RESEARCH_PROMPT = """
+You are Moon Dev's Research AI π
+
+IMPORTANT NAMING RULES:
+1. Create a UNIQUE TWO-WORD NAME for this specific strategy
+2. The name must be DIFFERENT from any generic names like "TrendFollower" or "MomentumStrategy"
+3. First word should describe the main approach (e.g., Adaptive, Neural, Quantum, Fractal, Dynamic)
+4. Second word should describe the specific technique (e.g., Reversal, Breakout, Oscillator, Divergence)
+5. Make the name SPECIFIC to this strategy's unique aspects
+
+Examples of good names:
+- "AdaptiveBreakout" for a strategy that adjusts breakout levels
+- "FractalMomentum" for a strategy using fractal analysis with momentum
+- "QuantumReversal" for a complex mean reversion strategy
+- "NeuralDivergence" for a strategy focusing on divergence patterns
+
+BAD names to avoid:
+- "TrendFollower" (too generic)
+- "SimpleMoving" (too basic)
+- "PriceAction" (too vague)
+
+Output format must start with:
+STRATEGY_NAME: [Your unique two-word name]
+
+Then analyze the trading strategy content and create detailed instructions.
+Focus on:
+1. Key strategy components
+2. Entry/exit rules
+3. Risk management
+4. Required indicators
+
+Your complete output must follow this format:
+STRATEGY_NAME: [Your unique two-word name]
+
+STRATEGY_DETAILS:
+[Your detailed analysis]
+
+Remember: The name must be UNIQUE and SPECIFIC to this strategy's approach!
+"""
+
+BACKTEST_PROMPT = """
+You are Moon Dev's Backtest AI π ONLY SEND BACK CODE, NO OTHER TEXT.
+Create a backtesting.py implementation for the strategy.
+USE BACKTESTING.PY
+Include:
+1. All necessary imports
+2. Strategy class with indicators
+3. Entry/exit logic
+4. Risk management
+5. your size should be 1,000,000
+6. If you need indicators use TA lib or pandas TA.
+
+IMPORTANT DATA HANDLING:
+1. Clean column names by removing spaces: data.columns = data.columns.str.strip().str.lower()
+2. Drop any unnamed columns: data = data.drop(columns=[col for col in data.columns if 'unnamed' in col.lower()])
+3. Ensure proper column mapping to match backtesting requirements:
+ - Required columns: 'Open', 'High', 'Low', 'Close', 'Volume'
+ - Use proper case (capital first letter)
+
+FOR THE PYTHON BACKTESTING LIBRARY USE BACKTESTING.PY AND SEND BACK ONLY THE CODE, NO OTHER TEXT.
+
+INDICATOR CALCULATION RULES:
+1. ALWAYS use self.I() wrapper for ANY indicator calculations
+2. Use talib functions instead of pandas operations:
+ - Instead of: self.data.Close.rolling(20).mean()
+ - Use: self.I(talib.SMA, self.data.Close, timeperiod=20)
+3. For swing high/lows use talib.MAX/MIN:
+ - Instead of: self.data.High.rolling(window=20).max()
+ - Use: self.I(talib.MAX, self.data.High, timeperiod=20)
+
+BACKTEST EXECUTION ORDER:
+1. Run initial backtest with default parameters first
+2. Print full stats using print(stats) and print(stats._strategy)
+3. no optimization code needed, just print the final stats, make sure full stats are printed, not just part or some. stats = bt.run() print(stats) is an example of the last line of code. no need for plotting ever.
+
+β NEVER USE bt.plot() - IT CAUSES TIMEOUTS IN PARALLEL PROCESSING!
+β NO PLOTTING, NO CHARTS, NO VISUALIZATIONS!
+β
ONLY PRINT STATS TO CONSOLE!
+
+CRITICAL POSITION SIZING RULE:
+When calculating position sizes in backtesting.py, the size parameter must be either:
+1. A fraction between 0 and 1 (for percentage of equity)
+2. A whole number (integer) of units
+
+The common error occurs when calculating position_size = risk_amount / risk, which results in floating-point numbers. Always use:
+position_size = int(round(position_size))
+
+Example fix:
+β self.buy(size=3546.0993) # Will fail
+β
self.buy(size=int(round(3546.0993))) # Will work
+
+RISK MANAGEMENT:
+1. Always calculate position sizes based on risk percentage
+2. Use proper stop loss and take profit calculations
+4. Print entry/exit signals with Moon Dev themed messages
+
+If you need indicators use TA lib or pandas TA.
+
+Use this data path: /Users/md/Dropbox/dev/github/moon-dev-ai-agents-for-trading/src/data/rbi/BTC-USD-15m.csv
+the above data head looks like below
+datetime, open, high, low, close, volume,
+2023-01-01 00:00:00, 16531.83, 16532.69, 16509.11, 16510.82, 231.05338022,
+2023-01-01 00:15:00, 16509.78, 16534.66, 16509.11, 16533.43, 308.12276951,
+
+Always add plenty of Moon Dev themed debug prints with emojis to make debugging easier! π β¨ π
+
+MULTI-DATA TESTING REQUIREMENT:
+At the VERY END of your code (after all strategy definitions), you MUST add this EXACT block:
+
+```python
+# π MOON DEV'S MULTI-DATA TESTING FRAMEWORK π
+# Tests this strategy on 25+ data sources automatically!
+if __name__ == "__main__":
+ import sys
+ import os
+
+ # Import the multi-data tester from Moon Dev's trading bots repo
+ sys.path.append('/Users/md/Dropbox/dev/github/moon-dev-trading-bots/backtests')
+ from multi_data_tester import test_on_all_data
+
+ print("\\n" + "="*80)
+ print("π MOON DEV'S MULTI-DATA BACKTEST - Testing on 25+ Data Sources!")
+ print("="*80)
+
+ # Test this strategy on all configured data sources
+ # This will test on: BTC, ETH, SOL (multiple timeframes), AAPL, TSLA, ES, NQ, GOOG, NVDA
+ # IMPORTANT: verbose=False to prevent plotting (causes timeouts in parallel processing!)
+ results = test_on_all_data(YourStrategyClassName, 'YourStrategyName', verbose=False)
+
+ if results is not None:
+ print("\\nβ
Multi-data testing complete! Results saved in ./results/ folder")
+ print(f"π Tested on {len(results)} different data sources")
+ else:
+ print("\\nβ οΈ No results generated - check for errors above")
+```
+
+IMPORTANT: Replace 'YourStrategyClassName' with your actual strategy class name!
+IMPORTANT: Replace 'YourStrategyName' with a descriptive name for the CSV output!
+
+FOR THE PYTHON BACKTESTING LIBRARY USE BACKTESTING.PY AND SEND BACK ONLY THE CODE, NO OTHER TEXT.
+ONLY SEND BACK CODE, NO OTHER TEXT.
+"""
+
+DEBUG_PROMPT = """
+You are Moon Dev's Debug AI π
+Fix technical issues in the backtest code WITHOUT changing the strategy logic.
+
+CRITICAL ERROR TO FIX:
+{error_message}
+
+CRITICAL DATA LOADING REQUIREMENTS:
+The CSV file has these exact columns after processing:
+- datetime, open, high, low, close, volume (all lowercase after .str.lower())
+- After capitalization: Datetime, Open, High, Low, Close, Volume
+
+CRITICAL BACKTESTING REQUIREMENTS:
+1. Data Loading Rules:
+ - Use data.columns.str.strip().str.lower() to clean columns
+ - Drop unnamed columns: data.drop(columns=[col for col in data.columns if 'unnamed' in col.lower()])
+ - Rename columns properly: data.rename(columns={{'open': 'Open', 'high': 'High', 'low': 'Low', 'close': 'Close', 'volume': 'Volume'}})
+ - Set datetime as index: data = data.set_index(pd.to_datetime(data['datetime']))
+
+2. Position Sizing Rules:
+ - Must be either a fraction (0 < size < 1) for percentage of equity
+ - OR a positive whole number (round integer) for units
+ - NEVER use floating point numbers for unit-based sizing
+
+3. Indicator Issues:
+ - Cannot use .shift() on backtesting indicators
+ - Use array indexing like indicator[-2] for previous values
+ - All indicators must be wrapped in self.I()
+
+4. Position Object Issues:
+ - Position object does NOT have .entry_price attribute
+ - Use self.trades[-1].entry_price if you need entry price from last trade
+ - Available position attributes: .size, .pl, .pl_pct
+ - For partial closes: use self.position.close() without parameters (closes entire position)
+ - For stop losses: use sl= parameter in buy/sell calls, not in position.close()
+
+5. No Trades Issue (Signals but no execution):
+ - If strategy prints "ENTRY SIGNAL" but shows 0 trades, the self.buy() call is not executing
+ - Common causes: invalid size parameter, insufficient cash, missing self.buy() call
+ - Ensure self.buy() is actually called in the entry condition block
+ - Check size parameter: must be fraction (0-1) or positive integer
+ - Verify cash/equity is sufficient for the trade size
+
+Focus on:
+1. KeyError issues with column names
+2. Syntax errors and import statements
+3. Indicator calculation methods
+4. Data loading and preprocessing
+5. Position object attribute errors (.entry_price, .close() parameters)
+
+DO NOT change strategy logic, entry/exit conditions, or risk management rules.
+
+Return the complete fixed code with Moon Dev themed debug prints! π β¨
+ONLY SEND BACK CODE, NO OTHER TEXT.
+"""
+
+PACKAGE_PROMPT = """
+You are Moon Dev's Package AI π
+Your job is to ensure the backtest code NEVER uses ANY backtesting.lib imports or functions.
+
+β STRICTLY FORBIDDEN:
+1. from backtesting.lib import *
+2. import backtesting.lib
+3. from backtesting.lib import crossover
+4. ANY use of backtesting.lib
+
+β
REQUIRED REPLACEMENTS:
+1. For crossover detection:
+ Instead of: backtesting.lib.crossover(a, b)
+ Use: (a[-2] < b[-2] and a[-1] > b[-1]) # for bullish crossover
+ (a[-2] > b[-2] and a[-1] < b[-1]) # for bearish crossover
+
+2. For indicators:
+ - Use talib for all standard indicators (SMA, RSI, MACD, etc.)
+ - Use pandas-ta for specialized indicators
+ - ALWAYS wrap in self.I()
+
+3. For signal generation:
+ - Use numpy/pandas boolean conditions
+ - Use rolling window comparisons with array indexing
+ - Use mathematical comparisons (>, <, ==)
+
+Example conversions:
+β from backtesting.lib import crossover
+β if crossover(fast_ma, slow_ma):
+β
if fast_ma[-2] < slow_ma[-2] and fast_ma[-1] > slow_ma[-1]:
+
+β self.sma = self.I(backtesting.lib.SMA, self.data.Close, 20)
+β
self.sma = self.I(talib.SMA, self.data.Close, timeperiod=20)
+
+IMPORTANT: Scan the ENTIRE code for any backtesting.lib usage and replace ALL instances!
+Return the complete fixed code with proper Moon Dev themed debug prints! π β¨
+ONLY SEND BACK CODE, NO OTHER TEXT.
+"""
+
+OPTIMIZE_PROMPT = """
+You are Moon Dev's Optimization AI π
+Your job is to IMPROVE the strategy to achieve higher returns while maintaining good risk management.
+
+CURRENT PERFORMANCE:
+Return [%]: {current_return}%
+TARGET RETURN: {target_return}%
+
+YOUR MISSION: Optimize this strategy to hit the target return!
+
+OPTIMIZATION TECHNIQUES TO CONSIDER:
+1. **Entry Optimization:**
+ - Tighten entry conditions to catch better setups
+ - Add filters to avoid low-quality signals
+ - Use multiple timeframe confirmation
+ - Add volume/momentum filters
+
+2. **Exit Optimization:**
+ - Improve take profit levels
+ - Add trailing stops
+ - Use dynamic position sizing based on volatility
+ - Scale out of positions
+
+3. **Risk Management:**
+ - Adjust position sizing
+ - Use volatility-based position sizing (ATR)
+ - Add maximum drawdown limits
+ - Improve stop loss placement
+
+4. **Indicator Optimization:**
+ - Fine-tune indicator parameters
+ - Add complementary indicators
+ - Use indicator divergence
+ - Combine multiple timeframes
+
+5. **Market Regime Filters:**
+ - Add trend filters
+ - Avoid choppy/ranging markets
+ - Only trade in favorable conditions
+
+IMPORTANT RULES:
+- DO NOT break the code structure
+- Keep all Moon Dev debug prints
+- Maintain proper backtesting.py format
+- Use self.I() for all indicators
+- Position sizes must be int or fraction (0-1)
+- Focus on REALISTIC improvements (no curve fitting!)
+- Explain your optimization changes in comments
+
+Return the COMPLETE optimized code with Moon Dev themed comments explaining what you improved! π β¨
+ONLY SEND BACK CODE, NO OTHER TEXT.
+"""
+
+# ============================================
+# π οΈ HELPER FUNCTIONS (with thread safety)
+# ============================================
+
+def parse_return_from_output(stdout: str, thread_id: int) -> float:
+ """Extract the Return [%] from backtest output"""
+ try:
+ match = re.search(r'Return \[%\]\s+([-\d.]+)', stdout)
+ if match:
+ return_pct = float(match.group(1))
+ thread_print(f"π Extracted return: {return_pct}%", thread_id)
+ return return_pct
+ else:
+ thread_print("β οΈ Could not find Return [%] in output", thread_id, "yellow")
+ return None
+ except Exception as e:
+ thread_print(f"β Error parsing return: {str(e)}", thread_id, "red")
+ return None
+
+def parse_all_stats_from_output(stdout: str, thread_id: int) -> dict:
+ """
+ π Moon Dev's Stats Parser - Extract all key stats from backtest output!
+ Returns dict with: return_pct, buy_hold_pct, max_drawdown_pct, sharpe, sortino, expectancy, trades
+ """
+ stats = {
+ 'return_pct': None,
+ 'buy_hold_pct': None,
+ 'max_drawdown_pct': None,
+ 'sharpe': None,
+ 'sortino': None,
+ 'expectancy': None,
+ 'trades': None
+ }
+
+ try:
+ # Return [%]
+ match = re.search(r'Return \[%\]\s+([-\d.]+)', stdout)
+ if match:
+ stats['return_pct'] = float(match.group(1))
+
+ # Buy & Hold Return [%]
+ match = re.search(r'Buy & Hold Return \[%\]\s+([-\d.]+)', stdout)
+ if match:
+ stats['buy_hold_pct'] = float(match.group(1))
+
+ # Max. Drawdown [%]
+ match = re.search(r'Max\. Drawdown \[%\]\s+([-\d.]+)', stdout)
+ if match:
+ stats['max_drawdown_pct'] = float(match.group(1))
+
+ # Sharpe Ratio
+ match = re.search(r'Sharpe Ratio\s+([-\d.]+)', stdout)
+ if match:
+ stats['sharpe'] = float(match.group(1))
+
+ # Sortino Ratio
+ match = re.search(r'Sortino Ratio\s+([-\d.]+)', stdout)
+ if match:
+ stats['sortino'] = float(match.group(1))
+
+ # Expectancy [%] (or Avg. Trade [%])
+ match = re.search(r'Expectancy \[%\]\s+([-\d.]+)', stdout)
+ if not match:
+ match = re.search(r'Avg\. Trade \[%\]\s+([-\d.]+)', stdout)
+ if match:
+ stats['expectancy'] = float(match.group(1))
+
+ # # Trades
+ match = re.search(r'# Trades\s+(\d+)', stdout)
+ if match:
+ stats['trades'] = int(match.group(1))
+
+ thread_print(f"π Extracted {sum(1 for v in stats.values() if v is not None)}/7 stats", thread_id)
+ return stats
+
+ except Exception as e:
+ thread_print(f"β Error parsing stats: {str(e)}", thread_id, "red")
+ return stats
+
+def log_stats_to_csv(strategy_name: str, thread_id: int, stats: dict, file_path: str, data_source: str = "BTC-USD-15m.csv") -> None:
+ """
+ π Moon Dev's CSV Logger - Thread-safe stats logging!
+ Appends backtest stats to CSV for easy analysis and comparison
+ Now includes data source tracking for multi-data testing!
+ """
+ try:
+ with file_lock:
+ # Create CSV with headers if it doesn't exist
+ file_exists = STATS_CSV.exists()
+
+ with open(STATS_CSV, 'a', newline='') as f:
+ writer = csv.writer(f)
+
+ # Write header if new file
+ if not file_exists:
+ writer.writerow([
+ 'Strategy Name',
+ 'Thread ID',
+ 'Return %',
+ 'Buy & Hold %',
+ 'Max Drawdown %',
+ 'Sharpe Ratio',
+ 'Sortino Ratio',
+ 'EV %', # π Moon Dev: Changed from Expectancy %
+ 'Trades', # π Moon Dev: Added # Trades
+ 'File Path',
+ 'Data', # π Moon Dev: Changed from Data Source
+ 'Time' # π Moon Dev: Changed from Timestamp
+ ])
+ thread_print("π Created new stats CSV with headers", thread_id, "green")
+
+ # Write stats row
+ # π Moon Dev: Format time as "10/25 06:30"
+ timestamp = datetime.now().strftime("%m/%d %H:%M")
+ writer.writerow([
+ strategy_name,
+ f"T{thread_id:02d}",
+ stats.get('return_pct', 'N/A'),
+ stats.get('buy_hold_pct', 'N/A'),
+ stats.get('max_drawdown_pct', 'N/A'),
+ stats.get('sharpe', 'N/A'),
+ stats.get('sortino', 'N/A'),
+ stats.get('expectancy', 'N/A'),
+ stats.get('trades', 'N/A'), # π Moon Dev: Added # Trades
+ str(file_path),
+ data_source,
+ timestamp
+ ])
+
+ thread_print(f"β
Logged stats to CSV (Return: {stats.get('return_pct', 'N/A')}% on {data_source})", thread_id, "green")
+
+ except Exception as e:
+ thread_print(f"β Error logging to CSV: {str(e)}", thread_id, "red")
+
+def parse_and_log_multi_data_results(strategy_name: str, thread_id: int, backtest_file_path: Path) -> None:
+ """
+ π Moon Dev's Multi-Data Results Parser!
+ Parses the multi-data testing results CSV and logs all results that pass the threshold
+
+ Args:
+ strategy_name: Name of the strategy
+ thread_id: Thread ID
+ backtest_file_path: Path to the backtest file that was executed
+ """
+ try:
+ # Multi-data results are saved in ./results/ relative to the backtest file
+ backtest_dir = backtest_file_path.parent
+ results_dir = backtest_dir / "results"
+ results_csv = results_dir / f"{strategy_name}.csv"
+
+ # Check if results exist
+ if not results_csv.exists():
+ thread_print(f"β οΈ No multi-data results found at {results_csv}", thread_id, "yellow")
+ return
+
+ # Read the results CSV
+ df = pd.read_csv(results_csv)
+
+ thread_print(f"π Found {len(df)} multi-data test results", thread_id, "cyan")
+
+ # Filter for results that pass the threshold
+ passing_results = df[df['Return_%'] > SAVE_IF_OVER_RETURN]
+
+ if len(passing_results) == 0:
+ thread_print(f"β οΈ No multi-data results passed {SAVE_IF_OVER_RETURN}% threshold", thread_id, "yellow")
+ return
+
+ thread_print(f"β
{len(passing_results)} data sources passed threshold!", thread_id, "green", attrs=['bold'])
+
+ # Log each passing result to the main stats CSV
+ for idx, row in passing_results.iterrows():
+ stats = {
+ 'return_pct': row['Return_%'],
+ 'buy_hold_pct': row.get('Buy_Hold_%', None),
+ 'max_drawdown_pct': row.get('Max_DD_%', None),
+ 'sharpe': row.get('Sharpe', None),
+ 'sortino': row.get('Sortino', None),
+ 'expectancy': row.get('Expectancy_%', None),
+ 'trades': row.get('Trades', None) # π Moon Dev: Added # Trades
+ }
+
+ data_source = row['Data_Source']
+
+ # Log to CSV with the specific data source
+ log_stats_to_csv(
+ strategy_name,
+ thread_id,
+ stats,
+ str(backtest_file_path),
+ data_source=data_source
+ )
+
+ thread_print(f"πΎ Logged {len(passing_results)} multi-data results to CSV!", thread_id, "green", attrs=['bold'])
+
+ except Exception as e:
+ thread_print(f"β Error parsing multi-data results: {str(e)}", thread_id, "red")
+
+def save_backtest_if_threshold_met(code: str, stats: dict, strategy_name: str, iteration: int, thread_id: int, phase: str = "debug") -> bool:
+ """
+ π Moon Dev's Threshold Checker - Save backtests that pass the return threshold!
+
+ Args:
+ code: The backtest code to save
+ stats: Dict of parsed stats
+ strategy_name: Name of the strategy
+ iteration: Current iteration number
+ thread_id: Thread ID
+ phase: "debug", "opt", or "final" to determine filename
+
+ Returns:
+ True if saved (threshold met), False otherwise
+ """
+ return_pct = stats.get('return_pct')
+
+ # Check if return meets threshold
+ if return_pct is None or return_pct <= SAVE_IF_OVER_RETURN:
+ thread_print(f"β οΈ Return {return_pct}% β€ {SAVE_IF_OVER_RETURN}% threshold - not saving", thread_id, "yellow")
+ return False
+
+ try:
+ # Determine filename based on phase
+ if phase == "debug":
+ filename = f"T{thread_id:02d}_{strategy_name}_DEBUG_v{iteration}_{return_pct:.1f}pct.py"
+ elif phase == "opt":
+ filename = f"T{thread_id:02d}_{strategy_name}_OPT_v{iteration}_{return_pct:.1f}pct.py"
+ else: # final
+ filename = f"T{thread_id:02d}_{strategy_name}_FINAL_{return_pct:.1f}pct.py"
+
+ # Save to WORKING folder
+ working_file = WORKING_BACKTEST_DIR / filename
+ with file_lock:
+ with open(working_file, 'w') as f:
+ f.write(code)
+
+ # Save to FINAL folder (same logic per Moon Dev's request)
+ final_file = FINAL_BACKTEST_DIR / filename
+ with file_lock:
+ with open(final_file, 'w') as f:
+ f.write(code)
+
+ thread_print(f"πΎ Saved to working & final! Return: {return_pct:.2f}%", thread_id, "green", attrs=['bold'])
+
+ # Log to CSV
+ log_stats_to_csv(strategy_name, thread_id, stats, str(working_file))
+
+ return True
+
+ except Exception as e:
+ thread_print(f"β Error saving backtest: {str(e)}", thread_id, "red")
+ return False
+
+def execute_backtest(file_path: str, strategy_name: str, thread_id: int) -> dict:
+ """Execute a backtest file in conda environment and capture output"""
+ thread_print(f"π Executing: {strategy_name}", thread_id)
+
+ if not os.path.exists(file_path):
+ raise FileNotFoundError(f"File not found: {file_path}")
+
+ cmd = [
+ "conda", "run", "-n", CONDA_ENV,
+ "python", str(file_path)
+ ]
+
+ start_time = datetime.now()
+
+ result = subprocess.run(
+ cmd,
+ capture_output=True,
+ text=True,
+ timeout=EXECUTION_TIMEOUT
+ )
+
+ execution_time = (datetime.now() - start_time).total_seconds()
+
+ output = {
+ "success": result.returncode == 0,
+ "return_code": result.returncode,
+ "stdout": result.stdout,
+ "stderr": result.stderr,
+ "execution_time": execution_time,
+ "timestamp": datetime.now().isoformat()
+ }
+
+ # Save execution results with thread ID
+ result_file = EXECUTION_DIR / f"T{thread_id:02d}_{strategy_name}_{datetime.now().strftime('%H%M%S')}.json"
+ with file_lock:
+ with open(result_file, 'w') as f:
+ json.dump(output, f, indent=2)
+
+ if output['success']:
+ thread_print(f"β
Backtest executed in {execution_time:.2f}s!", thread_id, "green")
+ else:
+ thread_print(f"β Backtest failed: {output['return_code']}", thread_id, "red")
+
+ return output
+
+def parse_execution_error(execution_result: dict) -> str:
+ """Extract meaningful error message for debug agent"""
+ if execution_result.get('stderr'):
+ return execution_result['stderr'].strip()
+ return execution_result.get('error', 'Unknown error')
+
+def get_idea_hash(idea: str) -> str:
+ """Generate a unique hash for an idea to track processing status"""
+ return hashlib.md5(idea.encode('utf-8')).hexdigest()
+
+def is_idea_processed(idea: str) -> bool:
+ """Check if an idea has already been processed (thread-safe)"""
+ if not PROCESSED_IDEAS_LOG.exists():
+ return False
+
+ idea_hash = get_idea_hash(idea)
+
+ with file_lock:
+ with open(PROCESSED_IDEAS_LOG, 'r') as f:
+ processed_hashes = [line.strip().split(',')[0] for line in f if line.strip()]
+
+ return idea_hash in processed_hashes
+
+def log_processed_idea(idea: str, strategy_name: str, thread_id: int) -> None:
+ """Log an idea as processed with timestamp and strategy name (thread-safe)"""
+ idea_hash = get_idea_hash(idea)
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+
+ with file_lock:
+ if not PROCESSED_IDEAS_LOG.exists():
+ PROCESSED_IDEAS_LOG.parent.mkdir(parents=True, exist_ok=True)
+ with open(PROCESSED_IDEAS_LOG, 'w') as f:
+ f.write("# Moon Dev's RBI AI - Processed Ideas Log π\n")
+ f.write("# Format: hash,timestamp,thread_id,strategy_name,idea_snippet\n")
+
+ idea_snippet = idea[:50].replace(',', ';') + ('...' if len(idea) > 50 else '')
+ with open(PROCESSED_IDEAS_LOG, 'a') as f:
+ f.write(f"{idea_hash},{timestamp},T{thread_id:02d},{strategy_name},{idea_snippet}\n")
+
+ thread_print(f"π Logged processed idea: {strategy_name}", thread_id, "green")
+
+def has_nan_results(execution_result: dict) -> bool:
+ """Check if backtest results contain NaN values indicating no trades"""
+ if not execution_result.get('success'):
+ return False
+
+ stdout = execution_result.get('stdout', '')
+
+ nan_indicators = [
+ '# Trades 0',
+ 'Win Rate [%] NaN',
+ 'Exposure Time [%] 0.0',
+ 'Return [%] 0.0'
+ ]
+
+ nan_count = sum(1 for indicator in nan_indicators if indicator in stdout)
+ return nan_count >= 2
+
+def analyze_no_trades_issue(execution_result: dict) -> str:
+ """Analyze why strategy shows signals but no trades"""
+ stdout = execution_result.get('stdout', '')
+
+ if 'ENTRY SIGNAL' in stdout and '# Trades 0' in stdout:
+ return "Strategy is generating entry signals but self.buy() calls are not executing. This usually means: 1) Position sizing issues (size parameter invalid), 2) Insufficient cash/equity, 3) Logic preventing buy execution, or 4) Missing actual self.buy() call in the code. The strategy prints signals but never calls self.buy()."
+
+ elif '# Trades 0' in stdout:
+ return "Strategy executed but took 0 trades, resulting in NaN values. The entry conditions are likely too restrictive or there are logic errors preventing trade execution."
+
+ return "Strategy executed but took 0 trades, resulting in NaN values. Please adjust the strategy logic to actually generate trading signals and take trades."
+
+def chat_with_model(system_prompt, user_content, model_config, thread_id):
+ """Chat with AI model using model factory with rate limiting"""
+ def _api_call():
+ model = model_factory.get_model(model_config["type"], model_config["name"])
+ if not model:
+ raise ValueError(f"π¨ Could not initialize {model_config['type']} {model_config['name']} model!")
+
+ if model_config["type"] == "ollama":
+ response = model.generate_response(
+ system_prompt=system_prompt,
+ user_content=user_content,
+ temperature=AI_TEMPERATURE
+ )
+ if isinstance(response, str):
+ return response
+ if hasattr(response, 'content'):
+ return response.content
+ return str(response)
+ else:
+ response = model.generate_response(
+ system_prompt=system_prompt,
+ user_content=user_content,
+ temperature=AI_TEMPERATURE,
+ max_tokens=AI_MAX_TOKENS
+ )
+ if not response:
+ raise ValueError("Model returned None response")
+ return response.content
+
+ # Apply rate limiting
+ return rate_limited_api_call(_api_call, thread_id)
+
+def clean_model_output(output, content_type="text"):
+ """Clean model output by removing thinking tags and extracting code from markdown"""
+ cleaned_output = output
+
+ if "" in output and "" in output:
+ clean_content = output.split("")[-1].strip()
+ if not clean_content:
+ import re
+ clean_content = re.sub(r'.*?', '', output, flags=re.DOTALL).strip()
+ if clean_content:
+ cleaned_output = clean_content
+
+ if content_type == "code" and "```" in cleaned_output:
+ try:
+ import re
+ code_blocks = re.findall(r'```python\n(.*?)\n```', cleaned_output, re.DOTALL)
+ if not code_blocks:
+ code_blocks = re.findall(r'```(?:python)?\n(.*?)\n```', cleaned_output, re.DOTALL)
+ if code_blocks:
+ cleaned_output = "\n\n".join(code_blocks)
+ except Exception as e:
+ thread_print(f"β Error extracting code: {str(e)}", 0, "red")
+
+ return cleaned_output
+
+# ============================================
+# π€ AI AGENT FUNCTIONS (Thread-safe versions)
+# ============================================
+
+def research_strategy(content, thread_id):
+ """Research AI: Analyzes and creates trading strategy"""
+ thread_print_status(thread_id, "π RESEARCH", "Starting analysis...")
+
+ output = chat_with_model(
+ RESEARCH_PROMPT,
+ content,
+ RESEARCH_CONFIG,
+ thread_id
+ )
+
+ if output:
+ output = clean_model_output(output, "text")
+
+ strategy_name = "UnknownStrategy"
+ if "STRATEGY_NAME:" in output:
+ try:
+ name_section = output.split("STRATEGY_NAME:")[1].strip()
+ if "\n\n" in name_section:
+ strategy_name = name_section.split("\n\n")[0].strip()
+ else:
+ strategy_name = name_section.split("\n")[0].strip()
+
+ strategy_name = re.sub(r'[^\w\s-]', '', strategy_name)
+ strategy_name = re.sub(r'[\s]+', '', strategy_name)
+
+ thread_print(f"β
Strategy: {strategy_name}", thread_id, "green")
+ except Exception as e:
+ thread_print(f"β οΈ Error extracting strategy name: {str(e)}", thread_id, "yellow")
+
+ # Add thread ID to filename
+ filepath = RESEARCH_DIR / f"T{thread_id:02d}_{strategy_name}_strategy.txt"
+ with file_lock:
+ with open(filepath, 'w') as f:
+ f.write(output)
+
+ return output, strategy_name
+ return None, None
+
+def create_backtest(strategy, strategy_name, thread_id):
+ """Backtest AI: Creates backtest implementation"""
+ thread_print_status(thread_id, "π BACKTEST", "Creating backtest code...")
+
+ output = chat_with_model(
+ BACKTEST_PROMPT,
+ f"Create a backtest for this strategy:\n\n{strategy}",
+ BACKTEST_CONFIG,
+ thread_id
+ )
+
+ if output:
+ output = clean_model_output(output, "code")
+
+ filepath = BACKTEST_DIR / f"T{thread_id:02d}_{strategy_name}_BT.py"
+ with file_lock:
+ with open(filepath, 'w') as f:
+ f.write(output)
+
+ thread_print(f"π₯ Backtest code saved", thread_id, "green")
+ return output
+ return None
+
+def package_check(backtest_code, strategy_name, thread_id):
+ """Package AI: Ensures correct indicator packages are used"""
+ thread_print_status(thread_id, "π¦ PACKAGE", "Checking imports...")
+
+ output = chat_with_model(
+ PACKAGE_PROMPT,
+ f"Check and fix indicator packages in this code:\n\n{backtest_code}",
+ PACKAGE_CONFIG,
+ thread_id
+ )
+
+ if output:
+ output = clean_model_output(output, "code")
+
+ filepath = PACKAGE_DIR / f"T{thread_id:02d}_{strategy_name}_PKG.py"
+ with file_lock:
+ with open(filepath, 'w') as f:
+ f.write(output)
+
+ thread_print(f"π¦ Package check complete", thread_id, "green")
+ return output
+ return None
+
+def debug_backtest(backtest_code, error_message, strategy_name, thread_id, iteration=1):
+ """Debug AI: Fixes technical issues in backtest code"""
+ thread_print_status(thread_id, f"π§ DEBUG #{iteration}", "Fixing errors...")
+
+ debug_prompt_with_error = DEBUG_PROMPT.format(error_message=error_message)
+
+ output = chat_with_model(
+ debug_prompt_with_error,
+ f"Fix this backtest code:\n\n{backtest_code}",
+ DEBUG_CONFIG,
+ thread_id
+ )
+
+ if output:
+ output = clean_model_output(output, "code")
+
+ # π Moon Dev: Save debug iterations to BACKTEST_DIR, not FINAL
+ # Only threshold-passing backtests go to FINAL/WORKING folders!
+ filepath = BACKTEST_DIR / f"T{thread_id:02d}_{strategy_name}_DEBUG_v{iteration}.py"
+ with file_lock:
+ with open(filepath, 'w') as f:
+ f.write(output)
+
+ thread_print(f"π§ Debug iteration {iteration} complete", thread_id, "green")
+ return output
+ return None
+
+def optimize_strategy(backtest_code, current_return, target_return, strategy_name, thread_id, iteration=1):
+ """Optimization AI: Improves strategy to hit target return"""
+ thread_print_status(thread_id, f"π― OPTIMIZE #{iteration}", f"{current_return}% β {target_return}%")
+
+ optimize_prompt_with_stats = OPTIMIZE_PROMPT.format(
+ current_return=current_return,
+ target_return=target_return
+ )
+
+ output = chat_with_model(
+ optimize_prompt_with_stats,
+ f"Optimize this backtest code to hit the target:\n\n{backtest_code}",
+ OPTIMIZE_CONFIG,
+ thread_id
+ )
+
+ if output:
+ output = clean_model_output(output, "code")
+
+ filepath = OPTIMIZATION_DIR / f"T{thread_id:02d}_{strategy_name}_OPT_v{iteration}.py"
+ with file_lock:
+ with open(filepath, 'w') as f:
+ f.write(output)
+
+ thread_print(f"π― Optimization {iteration} complete", thread_id, "green")
+ return output
+ return None
+
+# ============================================
+# π PARALLEL PROCESSING CORE
+# ============================================
+
+def process_trading_idea_parallel(idea: str, thread_id: int) -> dict:
+ """
+ Process a single trading idea with full Research β Backtest β Debug β Optimize pipeline
+ This is the worker function for each parallel thread
+ """
+ try:
+ thread_print(f"π Starting processing", thread_id, attrs=['bold'])
+
+ # Phase 1: Research
+ strategy, strategy_name = research_strategy(idea, thread_id)
+
+ if not strategy:
+ thread_print("β Research failed", thread_id, "red")
+ return {"success": False, "error": "Research failed", "thread_id": thread_id}
+
+ log_processed_idea(idea, strategy_name, thread_id)
+
+ # Phase 2: Backtest
+ backtest = create_backtest(strategy, strategy_name, thread_id)
+
+ if not backtest:
+ thread_print("β Backtest failed", thread_id, "red")
+ return {"success": False, "error": "Backtest failed", "thread_id": thread_id}
+
+ # Phase 3: Package Check
+ package_checked = package_check(backtest, strategy_name, thread_id)
+
+ if not package_checked:
+ thread_print("β Package check failed", thread_id, "red")
+ return {"success": False, "error": "Package check failed", "thread_id": thread_id}
+
+ package_file = PACKAGE_DIR / f"T{thread_id:02d}_{strategy_name}_PKG.py"
+
+ # Phase 4: Execution Loop
+ debug_iteration = 0
+ current_code = package_checked
+ current_file = package_file
+ error_history = []
+
+ while debug_iteration < MAX_DEBUG_ITERATIONS:
+ thread_print_status(thread_id, "π EXECUTE", f"Attempt {debug_iteration + 1}/{MAX_DEBUG_ITERATIONS}")
+
+ execution_result = execute_backtest(current_file, strategy_name, thread_id)
+
+ if execution_result['success']:
+ if has_nan_results(execution_result):
+ thread_print("β οΈ No trades taken", thread_id, "yellow")
+
+ error_message = analyze_no_trades_issue(execution_result)
+ debug_iteration += 1
+
+ if debug_iteration < MAX_DEBUG_ITERATIONS:
+ debugged_code = debug_backtest(
+ current_code,
+ error_message,
+ strategy_name,
+ thread_id,
+ debug_iteration
+ )
+
+ if not debugged_code:
+ thread_print("β Debug AI failed", thread_id, "red")
+ return {"success": False, "error": "Debug failed", "thread_id": thread_id}
+
+ current_code = debugged_code
+ # π Moon Dev: Update to match new debug file location
+ current_file = BACKTEST_DIR / f"T{thread_id:02d}_{strategy_name}_DEBUG_v{debug_iteration}.py"
+ continue
+ else:
+ thread_print(f"β Max debug iterations reached", thread_id, "red")
+ return {"success": False, "error": "Max debug iterations", "thread_id": thread_id}
+ else:
+ # SUCCESS! Code executes with trades!
+ thread_print("π BACKTEST SUCCESSFUL!", thread_id, "green", attrs=['bold'])
+
+ # π Moon Dev: Parse ALL stats, not just return!
+ all_stats = parse_all_stats_from_output(execution_result['stdout'], thread_id)
+ current_return = all_stats.get('return_pct')
+
+ if current_return is None:
+ thread_print("β οΈ Could not parse return", thread_id, "yellow")
+ final_file = FINAL_BACKTEST_DIR / f"T{thread_id:02d}_{strategy_name}_BTFinal_WORKING.py"
+ with file_lock:
+ with open(final_file, 'w') as f:
+ f.write(current_code)
+ break
+
+ # π Moon Dev: Check threshold and save if met!
+ save_backtest_if_threshold_met(
+ current_code,
+ all_stats,
+ strategy_name,
+ debug_iteration,
+ thread_id,
+ phase="debug"
+ )
+
+ # π Moon Dev: Parse and log multi-data results!
+ thread_print("π Checking for multi-data test results...", thread_id, "cyan")
+ parse_and_log_multi_data_results(
+ strategy_name,
+ thread_id,
+ current_file
+ )
+
+ thread_print(f"π Return: {current_return}% | Target: {TARGET_RETURN}%", thread_id)
+
+ if current_return >= TARGET_RETURN:
+ # TARGET HIT!
+ thread_print("πππ TARGET HIT! πππ", thread_id, "green", attrs=['bold'])
+
+ # π Moon Dev: Save to OPTIMIZATION_DIR for target hits
+ final_file = OPTIMIZATION_DIR / f"T{thread_id:02d}_{strategy_name}_TARGET_HIT_{current_return}pct.py"
+ with file_lock:
+ with open(final_file, 'w') as f:
+ f.write(current_code)
+
+ return {
+ "success": True,
+ "thread_id": thread_id,
+ "strategy_name": strategy_name,
+ "return": current_return,
+ "target_hit": True
+ }
+ else:
+ # Need to optimize
+ gap = TARGET_RETURN - current_return
+ thread_print(f"π Need {gap}% more - Starting optimization", thread_id)
+
+ optimization_iteration = 0
+ optimization_code = current_code
+ best_return = current_return
+ best_code = current_code
+
+ while optimization_iteration < MAX_OPTIMIZATION_ITERATIONS:
+ optimization_iteration += 1
+
+ optimized_code = optimize_strategy(
+ optimization_code,
+ best_return,
+ TARGET_RETURN,
+ strategy_name,
+ thread_id,
+ optimization_iteration
+ )
+
+ if not optimized_code:
+ thread_print("β Optimization AI failed", thread_id, "red")
+ break
+
+ opt_file = OPTIMIZATION_DIR / f"T{thread_id:02d}_{strategy_name}_OPT_v{optimization_iteration}.py"
+ opt_result = execute_backtest(opt_file, strategy_name, thread_id)
+
+ if not opt_result['success'] or has_nan_results(opt_result):
+ thread_print(f"β οΈ Optimization {optimization_iteration} failed", thread_id, "yellow")
+ continue
+
+ # π Moon Dev: Parse ALL stats from optimization!
+ opt_stats = parse_all_stats_from_output(opt_result['stdout'], thread_id)
+ new_return = opt_stats.get('return_pct')
+
+ if new_return is None:
+ continue
+
+ change = new_return - best_return
+ thread_print(f"π Opt {optimization_iteration}: {new_return}% ({change:+.2f}%)", thread_id)
+
+ if new_return > best_return:
+ thread_print(f"β
Improved by {change:.2f}%!", thread_id, "green")
+ best_return = new_return
+ best_code = optimized_code
+ optimization_code = optimized_code
+
+ # π Moon Dev: Check threshold and save if met!
+ save_backtest_if_threshold_met(
+ optimized_code,
+ opt_stats,
+ strategy_name,
+ optimization_iteration,
+ thread_id,
+ phase="opt"
+ )
+
+ # π Moon Dev: Parse and log multi-data results from optimization!
+ thread_print("π Checking for multi-data test results...", thread_id, "cyan")
+ parse_and_log_multi_data_results(
+ strategy_name,
+ thread_id,
+ opt_file
+ )
+
+ if new_return >= TARGET_RETURN:
+ thread_print("πππ TARGET HIT VIA OPTIMIZATION! πππ", thread_id, "green", attrs=['bold'])
+
+ final_file = OPTIMIZATION_DIR / f"T{thread_id:02d}_{strategy_name}_TARGET_HIT_{new_return}pct.py"
+ with file_lock:
+ with open(final_file, 'w') as f:
+ f.write(best_code)
+
+ return {
+ "success": True,
+ "thread_id": thread_id,
+ "strategy_name": strategy_name,
+ "return": new_return,
+ "target_hit": True,
+ "optimizations": optimization_iteration
+ }
+
+ # Max optimization iterations reached
+ thread_print(f"β οΈ Max optimizations reached. Best: {best_return}%", thread_id, "yellow")
+
+ best_file = OPTIMIZATION_DIR / f"T{thread_id:02d}_{strategy_name}_BEST_{best_return}pct.py"
+ with file_lock:
+ with open(best_file, 'w') as f:
+ f.write(best_code)
+
+ return {
+ "success": True,
+ "thread_id": thread_id,
+ "strategy_name": strategy_name,
+ "return": best_return,
+ "target_hit": False
+ }
+ else:
+ # Execution failed
+ error_message = parse_execution_error(execution_result)
+
+ error_signature = error_message.split('\n')[-1] if '\n' in error_message else error_message
+ if error_signature in error_history:
+ thread_print(f"π Repeated error detected - stopping", thread_id, "red")
+ return {"success": False, "error": "Repeated error", "thread_id": thread_id}
+
+ error_history.append(error_signature)
+ debug_iteration += 1
+
+ if debug_iteration < MAX_DEBUG_ITERATIONS:
+ debugged_code = debug_backtest(
+ current_code,
+ error_message,
+ strategy_name,
+ thread_id,
+ debug_iteration
+ )
+
+ if not debugged_code:
+ thread_print("β Debug AI failed", thread_id, "red")
+ return {"success": False, "error": "Debug failed", "thread_id": thread_id}
+
+ current_code = debugged_code
+ # π Moon Dev: Update to match new debug file location
+ current_file = BACKTEST_DIR / f"T{thread_id:02d}_{strategy_name}_DEBUG_v{debug_iteration}.py"
+ else:
+ thread_print(f"β Max debug iterations reached", thread_id, "red")
+ return {"success": False, "error": "Max debug iterations", "thread_id": thread_id}
+
+ return {"success": True, "thread_id": thread_id}
+
+ except Exception as e:
+ thread_print(f"β FATAL ERROR: {str(e)}", thread_id, "red", attrs=['bold'])
+ return {"success": False, "error": str(e), "thread_id": thread_id}
+
+def main(ideas_file_path=None, run_name=None):
+ """Main parallel processing orchestrator with multi-data testing"""
+ # π Moon Dev: Use custom ideas file if provided
+ global IDEAS_FILE
+ if ideas_file_path:
+ IDEAS_FILE = Path(ideas_file_path)
+
+ cprint(f"\n{'='*60}", "cyan", attrs=['bold'])
+ cprint(f"π Moon Dev's RBI AI v3.0 PARALLEL PROCESSOR + MULTI-DATA π", "cyan", attrs=['bold'])
+ cprint(f"{'='*60}", "cyan", attrs=['bold'])
+
+ cprint(f"\nπ
Date: {TODAY_DATE}", "magenta")
+ cprint(f"π― Target Return: {TARGET_RETURN}%", "green", attrs=['bold'])
+ cprint(f"π Max Parallel Threads: {MAX_PARALLEL_THREADS}", "yellow", attrs=['bold'])
+ cprint(f"π Conda env: {CONDA_ENV}", "cyan")
+ cprint(f"π Data dir: {DATA_DIR}", "magenta")
+ cprint(f"π Ideas file: {IDEAS_FILE}", "magenta")
+ if run_name:
+ cprint(f"π Run Name: {run_name}\n", "green", attrs=['bold'])
+ else:
+ cprint("", "white")
+
+ if not IDEAS_FILE.exists():
+ cprint(f"β Ideas file not found: {IDEAS_FILE}", "red")
+ cprint("β ideas.txt not found! Creating template...", "red")
+ IDEAS_FILE.parent.mkdir(parents=True, exist_ok=True)
+ with open(IDEAS_FILE, 'w') as f:
+ f.write("# Add your trading ideas here (one per line)\n")
+ f.write("# Can be YouTube URLs, PDF links, or text descriptions\n")
+ f.write("# Lines starting with # are ignored\n\n")
+ f.write("Create a simple RSI strategy that buys when RSI < 30 and sells when RSI > 70\n")
+ f.write("Momentum strategy using 20/50 SMA crossover with volume confirmation\n")
+ cprint(f"π Created template ideas.txt at: {IDEAS_FILE}", "yellow")
+ cprint("π‘ Add your trading ideas and run again!", "yellow")
+ return
+
+ with open(IDEAS_FILE, 'r') as f:
+ ideas = [line.strip() for line in f if line.strip() and not line.startswith('#')]
+
+ total_ideas = len(ideas)
+ already_processed = sum(1 for idea in ideas if is_idea_processed(idea))
+ new_ideas = total_ideas - already_processed
+
+ cprint(f"π― Total ideas: {total_ideas}", "cyan")
+ cprint(f"β
Already processed: {already_processed}", "green")
+ cprint(f"π New to process: {new_ideas}\n", "yellow", attrs=['bold'])
+
+ if new_ideas == 0:
+ cprint("π All ideas have been processed!", "green", attrs=['bold'])
+ return
+
+ # Filter out already processed ideas
+ ideas_to_process = [(i, idea) for i, idea in enumerate(ideas) if not is_idea_processed(idea)]
+
+ cprint(f"π Starting parallel processing with {MAX_PARALLEL_THREADS} threads...\n", "cyan", attrs=['bold'])
+
+ start_time = datetime.now()
+
+ # Process ideas in parallel
+ with ThreadPoolExecutor(max_workers=MAX_PARALLEL_THREADS) as executor:
+ # Submit all ideas as futures with thread IDs
+ futures = {
+ executor.submit(process_trading_idea_parallel, idea, thread_id): (thread_id, idea)
+ for thread_id, idea in ideas_to_process
+ }
+
+ # Track results
+ results = []
+ completed = 0
+
+ # Process completed futures as they finish
+ for future in as_completed(futures):
+ thread_id, idea = futures[future]
+ completed += 1
+
+ try:
+ result = future.result()
+ results.append(result)
+
+ with console_lock:
+ cprint(f"\n{'='*60}", "green")
+ cprint(f"β
Thread {thread_id:02d} COMPLETED ({completed}/{len(futures)})", "green", attrs=['bold'])
+ if result.get('success'):
+ if result.get('target_hit'):
+ cprint(f"π― TARGET HIT: {result.get('strategy_name')} @ {result.get('return')}%", "green", attrs=['bold'])
+ else:
+ cprint(f"π Best return: {result.get('return', 'N/A')}%", "yellow")
+ else:
+ cprint(f"β Failed: {result.get('error', 'Unknown error')}", "red")
+ cprint(f"{'='*60}\n", "green")
+
+ except Exception as e:
+ with console_lock:
+ cprint(f"\nβ Thread {thread_id:02d} raised exception: {str(e)}", "red", attrs=['bold'])
+ results.append({"success": False, "thread_id": thread_id, "error": str(e)})
+
+ total_time = (datetime.now() - start_time).total_seconds()
+
+ # Final summary
+ cprint(f"\n{'='*60}", "cyan", attrs=['bold'])
+ cprint(f"π PARALLEL PROCESSING COMPLETE!", "cyan", attrs=['bold'])
+ cprint(f"{'='*60}", "cyan", attrs=['bold'])
+
+ cprint(f"\nβ±οΈ Total time: {total_time:.2f}s", "magenta")
+ cprint(f"π Ideas processed: {len(results)}", "cyan")
+
+ successful = [r for r in results if r.get('success')]
+ failed = [r for r in results if not r.get('success')]
+ targets_hit = [r for r in successful if r.get('target_hit')]
+
+ cprint(f"β
Successful: {len(successful)}", "green")
+ cprint(f"π― Targets hit: {len(targets_hit)}", "green", attrs=['bold'])
+ cprint(f"β Failed: {len(failed)}", "red")
+
+ if targets_hit:
+ cprint(f"\nπ STRATEGIES THAT HIT TARGET {TARGET_RETURN}%:", "green", attrs=['bold'])
+ for r in targets_hit:
+ cprint(f" β’ {r.get('strategy_name')}: {r.get('return')}%", "green")
+
+ cprint(f"\n⨠All results saved to: {TODAY_DIR}", "cyan")
+ cprint(f"{'='*60}\n", "cyan", attrs=['bold'])
+
+if __name__ == "__main__":
+ # π Moon Dev: Parse command-line arguments for custom ideas file and run name
+ parser = argparse.ArgumentParser(description="Moon Dev's RBI Agent - Parallel Backtest Processor")
+ parser.add_argument('--ideas-file', type=str, help='Custom ideas file path (overrides default ideas.txt)')
+ parser.add_argument('--run-name', type=str, help='Run name for folder organization')
+ args = parser.parse_args()
+
+ # Call main with optional parameters
+ main(ideas_file_path=args.ideas_file, run_name=args.run_name)
diff --git a/src/agents/sentiment_agent.py b/src/agents/sentiment_agent.py
index 92aab15df..111a006fc 100644
--- a/src/agents/sentiment_agent.py
+++ b/src/agents/sentiment_agent.py
@@ -314,7 +314,7 @@ def init_twitter_client(self):
try:
if not os.path.exists("cookies.json"):
cprint("β No cookies.json found! Please run twitter_login.py first", "red")
- sys.exit(1)
+ return None # Changed from sys.exit(1) to prevent killing orchestrator
cprint("π Moon Dev's Sentiment Agent starting up...", "cyan")
client = Client()
@@ -328,7 +328,7 @@ def init_twitter_client(self):
os.remove("cookies.json")
cprint("ποΈ Removed invalid cookies file", "yellow")
cprint("π Please run twitter_login.py again", "yellow")
- sys.exit(1)
+ return None # Changed from sys.exit(1) to prevent killing orchestrator
async def get_tweets(self, query):
"""Get tweets with proper error handling"""
@@ -513,4 +513,4 @@ def run(self):
cprint("\nπ Moon Dev's Sentiment Agent shutting down gracefully...", "yellow")
except Exception as e:
cprint(f"\nβ Fatal error: {str(e)}", "red")
- sys.exit(1)
+ # sys.exit(1) - Commented out to prevent killing orchestrator when run as module
diff --git a/src/agents/trading_agent.py b/src/agents/trading_agent.py
index 569f53794..ba7c6eb66 100644
--- a/src/agents/trading_agent.py
+++ b/src/agents/trading_agent.py
@@ -77,8 +77,13 @@
# False = Single model fast execution (~10s per token)
# π TRADING MODE SETTINGS
-LONG_ONLY = True # True = Long positions only (Solana on-chain, no shorting available)
- # False = Long & Short positions (HyperLiquid perpetuals)
+EXCHANGE = 'binance' # Options: 'solana', 'hyperliquid', 'binance'
+ # 'solana' = Long positions only (Solana on-chain)
+ # 'hyperliquid' = Long & Short positions (HyperLiquid perpetuals)
+ # 'binance' = Spot trading on Binance
+
+LONG_ONLY = True # True = Long positions only (Solana/Binance spot)
+ # False = Long & Short positions (HyperLiquid perpetuals only)
#
# When LONG_ONLY = True:
# - "Buy" = Opens/maintains long position
@@ -91,40 +96,90 @@
# - Full long/short capability (for HyperLiquid)
# π€ SINGLE MODEL SETTINGS (only used when USE_SWARM_MODE = False)
-AI_MODEL_TYPE = 'xai' # Options: 'groq', 'openai', 'claude', 'deepseek', 'xai', 'ollama'
-AI_MODEL_NAME = None # None = use default, or specify: 'grok-4-fast-reasoning', 'claude-3-5-sonnet-latest', etc.
+# ALSO used for portfolio allocation when swarm finds BUY signals!
+AI_MODEL_TYPE = 'openrouter' # Options: 'groq', 'openai', 'claude', 'deepseek', 'xai', 'ollama', 'openrouter'
+AI_MODEL_NAME = 'meta-llama/llama-3.1-405b-instruct' # Using OpenRouter Llama for portfolio allocation (fast & reliable)
AI_TEMPERATURE = 0.7 # Creativity vs precision (0-1)
AI_MAX_TOKENS = 1024 # Max tokens for AI response
-# π° POSITION SIZING & RISK MANAGEMENT
-usd_size = 25 # Target position size in USD
-max_usd_order_size = 3 # Maximum order chunk size in USD
-MAX_POSITION_PERCENTAGE = 30 # Max % of portfolio per position (0-100)
-CASH_PERCENTAGE = 20 # Minimum % to keep in USDC cash buffer (0-100)
+# π° POSITION SIZING & RISK MANAGEMENT (OPTION B - AGGRESSIVE)
+usd_size = 200 # Target position size in USD (up from $25)
+max_usd_order_size = 50 # Maximum order chunk size in USD (up from $3)
+MAX_POSITION_PERCENTAGE = 40 # Max % of portfolio per position (up from 30%)
+CASH_PERCENTAGE = 15 # Minimum % to keep in USDT cash buffer (down from 20%)
# π MARKET DATA COLLECTION
DAYSBACK_4_DATA = 3 # Days of historical data to fetch
DATA_TIMEFRAME = '1H' # Bar timeframe: 1m, 3m, 5m, 15m, 30m, 1H, 2H, 4H, 6H, 8H, 12H, 1D, 3D, 1W, 1M
# Current: 3 days @ 1H = ~72 bars
# For 15-min: '15m' = ~288 bars
+
+# π― MULTI-TIMEFRAME ANALYSIS (ADVANCED)
+USE_MULTI_TIMEFRAME = True # True = Analyze multiple timeframes for each token (more comprehensive)
+ # False = Single timeframe analysis (faster)
+MULTI_TIMEFRAMES = ['15m', '1H', '4H'] # List of timeframes for multi-timeframe analysis
+ # 15m = Short-term entry/exit signals
+ # 1H = Medium-term trend confirmation
+ # 4H = Long-term trend direction
SAVE_OHLCV_DATA = False # True = save data permanently, False = temp data only
+# π‘οΈ RISK MANAGEMENT - CIRCUIT BREAKERS
+USE_RISK_CHECKS = True # True = Enable circuit breakers before each trade cycle
+MAX_TOTAL_POSITION_USD = 1600 # Maximum total USD across all positions (8 tokens Γ $200)
+MAX_POSITION_LOSS_PERCENT = 15 # Max % loss on any single position before force-close
+MAX_PORTFOLIO_LOSS_PERCENT = 10 # Max % loss on total portfolio before halt trading
+MIN_USDT_BALANCE = 1 # Minimum USDT balance to maintain (lowered for moderate trading)
+
# β‘ TRADING EXECUTION SETTINGS
slippage = 199 # Slippage tolerance (199 = ~2%)
-SLEEP_BETWEEN_RUNS_MINUTES = 15 # Minutes between trading cycles
+SLEEP_BETWEEN_RUNS_MINUTES = 5 # Minutes between trading cycles (faster = more opportunities)
+
+# π― FEE OPTIMIZATION SETTINGS (Option B - Moderate + Stop-Loss)
+MIN_CONFIDENCE_FOR_TRADE = 62 # Minimum confidence % to execute BUY signal (filters weak signals)
+AUTO_TAKE_PROFIT_PERCENT = 0 # Auto-exit positions at +3% profit (0 = disabled) - TEMPORARILY DISABLED DUE TO API HANG
+AUTO_STOP_LOSS_PERCENT = 0 # Auto-exit positions at -5% loss (0 = disabled) - TEMPORARILY DISABLED DUE TO API HANG
# π― TOKEN CONFIGURATION
-USDC_ADDRESS = "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v" # Never trade
-SOL_ADDRESS = "So11111111111111111111111111111111111111111" # Never trade
-EXCLUDED_TOKENS = [USDC_ADDRESS, SOL_ADDRESS]
-
-# β οΈ IMPORTANT: The swarm will analyze ALL tokens in this list (one at a time)
-# Each token takes ~45-60 seconds in swarm mode
-# Comment out tokens you don't want to trade (add # at start of line)
-MONITORED_TOKENS = [
- '9BB6NFEcjBCtnNLFko2FqVQBq8HHM13kCyYcdQbgpump', # π¬οΈ FART (DISABLED)
- #'DitHyRMQiSDhn5cnKMJV2CDDt6sVct96YrECiM49pump', # π housecoin (ACTIVE)
-]
+if EXCHANGE == 'solana':
+ USDC_ADDRESS = "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v" # Never trade
+ SOL_ADDRESS = "So11111111111111111111111111111111111111111" # Never trade
+ EXCLUDED_TOKENS = [USDC_ADDRESS, SOL_ADDRESS]
+
+ # β οΈ IMPORTANT: The swarm will analyze ALL tokens in this list (one at a time)
+ # Each token takes ~45-60 seconds in swarm mode
+ # Comment out tokens you don't want to trade (add # at start of line)
+ MONITORED_TOKENS = [
+ '9BB6NFEcjBCtnNLFko2FqVQBq8HHM13kCyYcdQbgpump', # π¬οΈ FART (DISABLED)
+ #'DitHyRMQiSDhn5cnKMJV2CDDt6sVct96YrECiM49pump', # π housecoin (ACTIVE)
+ ]
+
+elif EXCHANGE == 'binance':
+ # For Binance, we use trading pair symbols (e.g., BTCUSDT)
+ USDC_ADDRESS = "USDT" # Treat USDT as our cash position on Binance
+ EXCLUDED_TOKENS = [USDC_ADDRESS]
+
+ MONITORED_TOKENS = [
+ 'BTCUSDT', # Bitcoin
+ 'ETHUSDT', # Ethereum
+ 'BNBUSDT', # Binance Coin
+ 'SOLUSDT', # Solana
+ 'ADAUSDT', # Cardano
+ 'XRPUSDT', # Ripple
+ 'DOGEUSDT', # Dogecoin
+ 'DOTUSDT', # Polkadot
+ ]
+
+elif EXCHANGE == 'hyperliquid':
+ # HyperLiquid uses token symbols
+ USDC_ADDRESS = "USDC"
+ EXCLUDED_TOKENS = []
+ MONITORED_TOKENS = [
+ 'BTC', # Bitcoin perpetual
+ 'ETH', # Ethereum perpetual
+ ]
+
+else:
+ raise ValueError(f"β Unsupported exchange: {EXCHANGE}. Choose 'solana', 'hyperliquid', or 'binance'")
# Example: To trade multiple tokens, uncomment the ones you want:
# MONITORED_TOKENS = [
@@ -169,6 +224,8 @@
ALLOCATION_PROMPT = """
You are Moon Dev's Portfolio Allocation Assistant π
+CRITICAL: Your response MUST be ONLY a JSON object. No explanations, no code, no text before or after.
+
Given the total portfolio size and trading recommendations, allocate capital efficiently.
Consider:
1. Position sizing based on confidence levels
@@ -176,20 +233,27 @@
3. Keep cash buffer as specified
4. Maximum allocation per position
-Format your response as a Python dictionary:
-{
- "token_address": allocated_amount, # In USD
- ...
- "USDC_ADDRESS": remaining_cash # Always use USDC_ADDRESS for cash
-}
+RESPOND WITH ONLY THIS JSON FORMAT (nothing else):
+{{
+ "token_address": allocated_amount_in_usd,
+ "USDT": remaining_cash_in_usd
+}}
+
+Example valid response:
+{{
+ "BTCUSDT": 50.0,
+ "ETHUSDT": 40.0,
+ "USDT": 110.0
+}}
-Remember:
+Rules:
- Total allocations must not exceed total_size
- Higher confidence should get larger allocations
- Never allocate more than {MAX_POSITION_PERCENTAGE}% to a single position
-- Keep at least {CASH_PERCENTAGE}% in USDC as safety buffer
+- Keep at least {CASH_PERCENTAGE}% in USDT as safety buffer
- Only allocate to BUY recommendations
-- Cash must be stored as USDC using USDC_ADDRESS: {USDC_ADDRESS}
+- DO NOT include explanations or Python code
+- RESPOND WITH ONLY THE JSON OBJECT
"""
SWARM_TRADING_PROMPT = """You are an expert cryptocurrency trading AI analyzing market data.
@@ -231,6 +295,14 @@
from src.models.model_factory import model_factory
from src.agents.swarm_agent import SwarmAgent
+# Import exchange-specific modules
+try:
+ from src import binance_nice_funcs as bn
+ from src.data.binance_ohlcv_collector import binance_collect_all_tokens, binance_collect_multi_timeframe
+ BINANCE_AVAILABLE = True
+except ImportError:
+ BINANCE_AVAILABLE = False
+
# Load environment variables
load_dotenv()
@@ -273,13 +345,25 @@ def __init__(self):
# Show trading mode
cprint("π Trading Mode:", "yellow", attrs=['bold'])
- if LONG_ONLY:
- cprint(" π LONG ONLY - Solana on-chain (no shorting)", "cyan")
+ if EXCHANGE == 'solana':
+ cprint(" π SOLANA - Long positions only (on-chain)", "cyan")
cprint(" π‘ SELL signals close positions, can't open shorts", "white")
- else:
- cprint(" β‘ LONG/SHORT - HyperLiquid perpetuals", "green")
+ elif EXCHANGE == 'binance':
+ if not BINANCE_AVAILABLE:
+ cprint(" β BINANCE - python-binance not available!", "red")
+ cprint(" π‘ Run: pip install python-binance", "yellow")
+ sys.exit(1)
+ cprint(" π¦ BINANCE - Spot trading", "green")
+ cprint(" π‘ SELL signals close positions to USDT", "white")
+ elif EXCHANGE == 'hyperliquid':
+ cprint(" β‘ HYPERLIQUID - Long & Short perpetuals", "green")
cprint(" π‘ SELL signals can close longs OR open shorts", "white")
+ if LONG_ONLY and EXCHANGE == 'hyperliquid':
+ cprint(" β οΈ WARNING: LONG_ONLY=True but using HyperLiquid (supports shorts)", "yellow")
+ elif not LONG_ONLY and EXCHANGE != 'hyperliquid':
+ cprint(" β οΈ WARNING: LONG_ONLY=False only supported on HyperLiquid", "yellow")
+
cprint("\nπ€ Moon Dev's LLM Trading Agent initialized!", "green")
def chat_with_ai(self, system_prompt, user_content):
@@ -301,14 +385,106 @@ def chat_with_ai(self, system_prompt, user_content):
cprint(f"β AI model error: {e}", "red")
return None
+ def check_risk_limits(self):
+ """π‘οΈ Risk Management - Circuit Breakers"""
+ if not USE_RISK_CHECKS:
+ return True # Risk checks disabled, allow trading
+
+ try:
+ cprint("\nπ‘οΈ Running Risk Checks (Circuit Breakers)...", "cyan", attrs=['bold'])
+
+ # Get all account balances
+ total_position_usd = 0
+ usdt_balance = 0
+ position_details = []
+
+ if EXCHANGE == 'binance':
+ # Get Binance account balances
+ for symbol in MONITORED_TOKENS:
+ try:
+ position_usd = bn.binance_get_token_balance_usd(symbol)
+ if position_usd > 0:
+ total_position_usd += position_usd
+ position_details.append((symbol, position_usd))
+ cprint(f" π° {symbol}: ${position_usd:.2f}", "yellow")
+ except Exception as e:
+ cprint(f" β οΈ Could not get balance for {symbol}: {e}", "yellow")
+
+ # Get USDT balance
+ try:
+ from binance.client import Client
+ client = Client(os.getenv("BINANCE_API_KEY"), os.getenv("BINANCE_SECRET_KEY"))
+ usdt_asset = client.get_asset_balance(asset='USDT')
+ usdt_balance = float(usdt_asset['free']) + float(usdt_asset['locked'])
+ cprint(f" π΅ USDT Balance: ${usdt_balance:.2f}", "green")
+ except Exception as e:
+ cprint(f" β οΈ Could not get USDT balance: {e}", "yellow")
+
+ # Check #1: Minimum USDT Balance
+ if usdt_balance < MIN_USDT_BALANCE:
+ cprint(f"\nβ CIRCUIT BREAKER: USDT balance (${usdt_balance:.2f}) below minimum (${MIN_USDT_BALANCE})", "white", "on_red")
+ cprint("π Trading halted to preserve capital", "red")
+ return False
+
+ # Check #2: Maximum Total Position Size
+ if total_position_usd > MAX_TOTAL_POSITION_USD:
+ cprint(f"\nβ CIRCUIT BREAKER: Total position (${total_position_usd:.2f}) exceeds max (${MAX_TOTAL_POSITION_USD})", "white", "on_red")
+ cprint("π Trading halted - positions too large", "red")
+ return False
+
+ # Check #3: Portfolio Loss Percentage (if we have historical data)
+ portfolio_value = total_position_usd + usdt_balance
+ cprint(f"\nπΌ Total Portfolio Value: ${portfolio_value:.2f}", "cyan")
+
+ cprint("\nβ
All risk checks passed - trading allowed", "green", attrs=['bold'])
+ return True
+
+ except Exception as e:
+ cprint(f"β Error in risk checks: {e}", "red")
+ # On error, default to safe side - halt trading
+ return False
+
def _format_market_data_for_swarm(self, token, market_data):
"""Format market data into a clean, readable format for swarm analysis"""
try:
# Print market data visibility for confirmation
cprint(f"\nπ MARKET DATA RECEIVED FOR {token[:8]}...", "cyan", attrs=['bold'])
- # Check if market_data is a DataFrame
- if isinstance(market_data, pd.DataFrame):
+ # Check if multi-timeframe data (dict of DataFrames)
+ if isinstance(market_data, dict) and all(isinstance(v, pd.DataFrame) for v in market_data.values() if not isinstance(v, (str, list))):
+ cprint(f"π― Multi-timeframe data received: {list(market_data.keys())}", "green")
+
+ # Build multi-timeframe analysis prompt
+ formatted = f"TOKEN: {token}\n"
+ formatted += f"MULTI-TIMEFRAME ANALYSIS ({len(market_data)} timeframes)\n"
+ formatted += "="*80 + "\n\n"
+
+ # Add each timeframe
+ for timeframe, df in market_data.items():
+ if not isinstance(df, pd.DataFrame):
+ continue
+
+ cprint(f" π {timeframe}: {len(df)} bars from {df.index[0]} to {df.index[-1]}", "yellow")
+
+ formatted += f"""
+TIMEFRAME: {timeframe}
+TOTAL BARS: {len(df)}
+DATE RANGE: {df.index[0]} to {df.index[-1]}
+
+RECENT PRICE ACTION (Last 5 bars):
+{df.tail(5).to_string()}
+
+"""
+
+ formatted += "\n" + "="*80 + "\n"
+ formatted += "ANALYSIS GUIDELINES:\n"
+ formatted += "- 15m timeframe: Look for short-term entry/exit signals and momentum shifts\n"
+ formatted += "- 1H timeframe: Confirm medium-term trend and support/resistance levels\n"
+ formatted += "- 4H timeframe: Identify long-term trend direction and major reversals\n"
+ formatted += "- Consider alignment across timeframes for higher confidence signals\n"
+
+ # Check if single timeframe data (DataFrame)
+ elif isinstance(market_data, pd.DataFrame):
cprint(f"β
DataFrame received: {len(market_data)} bars", "green")
cprint(f"π
Date range: {market_data.index[0]} to {market_data.index[-1]}", "yellow")
cprint(f"π Timeframe: {DATA_TIMEFRAME}", "yellow")
@@ -335,11 +511,11 @@ def _format_market_data_for_swarm(self, token, market_data):
{market_data.to_string()}
"""
else:
- # If it's not a DataFrame, show what we got
- cprint(f"β οΈ Market data is not a DataFrame: {type(market_data)}", "yellow")
+ # If it's not a DataFrame or multi-timeframe dict, show what we got
+ cprint(f"β οΈ Market data format unexpected: {type(market_data)}", "yellow")
formatted = f"TOKEN: {token}\nMARKET DATA:\n{str(market_data)}"
- # Add strategy signals if available
+ # Add strategy signals if available (for dict-based data)
if isinstance(market_data, dict) and 'strategy_signals' in market_data:
formatted += f"\n\nSTRATEGY SIGNALS:\n{json.dumps(market_data['strategy_signals'], indent=2)}"
@@ -590,16 +766,19 @@ def execute_allocations(self, allocation_dict):
print("\nπ Moon Dev executing portfolio allocations...")
for token, amount in allocation_dict.items():
- # Skip USDC and other excluded tokens
- if token in EXCLUDED_TOKENS:
+ # Skip stablecoin allocation (treated as cash)
+ if token == USDC_ADDRESS:
print(f"π΅ Keeping ${amount:.2f} in {token}")
continue
print(f"\nπ― Processing allocation for {token}...")
try:
- # Get current position value
- current_position = n.get_token_balance_usd(token)
+ # Get current position value based on exchange
+ if EXCHANGE == 'binance':
+ current_position = bn.binance_get_token_balance_usd(token)
+ else:
+ current_position = n.get_token_balance_usd(token)
target_allocation = amount
print(f"π― Target allocation: ${target_allocation:.2f} USD")
@@ -607,7 +786,10 @@ def execute_allocations(self, allocation_dict):
if current_position < target_allocation:
print(f"β¨ Executing entry for {token}")
- n.ai_entry(token, amount)
+ if EXCHANGE == 'binance':
+ bn.binance_ai_entry(token, amount)
+ else:
+ n.ai_entry(token, amount)
print(f"β
Entry complete for {token}")
else:
print(f"βΈοΈ Position already at target size for {token}")
@@ -636,21 +818,96 @@ def handle_exits(self):
action = row['action']
# Check if we have a position
- current_position = n.get_token_balance_usd(token)
+ if EXCHANGE == 'binance':
+ current_position = bn.binance_get_token_balance_usd(token)
+ else:
+ current_position = n.get_token_balance_usd(token)
cprint(f"\n{'='*60}", "cyan")
cprint(f"π― Token: {token_short}", "cyan", attrs=['bold'])
cprint(f"π€ Swarm Signal: {action} ({row['confidence']}% confidence)", "yellow", attrs=['bold'])
cprint(f"πΌ Current Position: ${current_position:.2f}", "white")
+
+ # π― Check for auto take-profit AND stop-loss (Option C)
+ should_take_profit = False
+ should_stop_loss = False
+ if current_position > 0 and (AUTO_TAKE_PROFIT_PERCENT > 0 or AUTO_STOP_LOSS_PERCENT > 0):
+ try:
+ # Get current price and calculate profit %
+ if EXCHANGE == 'binance':
+ from binance.client import Client
+ client = Client(os.getenv("BINANCE_API_KEY"), os.getenv("BINANCE_SECRET_KEY"))
+
+ # Get current price
+ ticker = client.get_symbol_ticker(symbol=token)
+ current_price = float(ticker['price'])
+
+ # Get all trades for this symbol to find average entry price
+ trades = client.get_my_trades(symbol=token, limit=100)
+ if trades:
+ # Calculate weighted average entry price
+ total_qty = 0
+ total_cost = 0
+ for trade in trades:
+ if trade['isBuyer']: # Only count buy trades
+ qty = float(trade['qty'])
+ price = float(trade['price'])
+ total_qty += qty
+ total_cost += qty * price
+
+ if total_qty > 0:
+ avg_entry_price = total_cost / total_qty
+ profit_pct = ((current_price / avg_entry_price) - 1) * 100
+
+ cprint(f"π Entry: ${avg_entry_price:.4f} | Current: ${current_price:.4f} | P&L: {profit_pct:+.2f}%", "yellow")
+
+ # Check take-profit
+ if profit_pct >= AUTO_TAKE_PROFIT_PERCENT and AUTO_TAKE_PROFIT_PERCENT > 0:
+ should_take_profit = True
+ cprint(f"π― AUTO TAKE-PROFIT TRIGGERED! (+{profit_pct:.2f}% >= +{AUTO_TAKE_PROFIT_PERCENT}%)", "white", "on_green")
+
+ # Check stop-loss
+ elif profit_pct <= -AUTO_STOP_LOSS_PERCENT and AUTO_STOP_LOSS_PERCENT > 0:
+ should_stop_loss = True
+ cprint(f"π AUTO STOP-LOSS TRIGGERED! ({profit_pct:.2f}% <= -{AUTO_STOP_LOSS_PERCENT}%)", "white", "on_red")
+ except Exception as e:
+ cprint(f"β οΈ Could not calculate P&L: {e}", "yellow")
+
cprint(f"{'='*60}", "cyan")
if current_position > 0:
+ # π― Take-profit and stop-loss take priority over all other signals
+ if should_take_profit:
+ cprint(f"π° TAKE-PROFIT TRIGGERED - CLOSING POSITION FOR PROFIT", "white", "on_green")
+ try:
+ cprint(f"π Executing chunk_kill (${max_usd_order_size} chunks)...", "yellow")
+ if EXCHANGE == 'binance':
+ bn.binance_chunk_kill(token, max_usd_order_size, slippage)
+ else:
+ n.chunk_kill(token, max_usd_order_size, slippage)
+ cprint(f"β
Position closed with profit!", "white", "on_green")
+ except Exception as e:
+ cprint(f"β Error closing position: {str(e)}", "white", "on_red")
+ elif should_stop_loss:
+ cprint(f"π STOP-LOSS TRIGGERED - CLOSING POSITION TO LIMIT LOSS", "white", "on_red")
+ try:
+ cprint(f"π Executing chunk_kill (${max_usd_order_size} chunks)...", "yellow")
+ if EXCHANGE == 'binance':
+ bn.binance_chunk_kill(token, max_usd_order_size, slippage)
+ else:
+ n.chunk_kill(token, max_usd_order_size, slippage)
+ cprint(f"β
Position closed - loss limited to -5%", "white", "on_yellow")
+ except Exception as e:
+ cprint(f"β Error closing position: {str(e)}", "white", "on_red")
# We have a position - take action based on signal
- if action == "SELL":
+ elif action == "SELL":
cprint(f"π¨ SELL signal with position - CLOSING POSITION", "white", "on_red")
try:
cprint(f"π Executing chunk_kill (${max_usd_order_size} chunks)...", "yellow")
- n.chunk_kill(token, max_usd_order_size, slippage)
+ if EXCHANGE == 'binance':
+ bn.binance_chunk_kill(token, max_usd_order_size, slippage)
+ else:
+ n.chunk_kill(token, max_usd_order_size, slippage)
cprint(f"β
Position closed successfully!", "white", "on_green")
except Exception as e:
cprint(f"β Error closing position: {str(e)}", "white", "on_red")
@@ -685,24 +942,42 @@ def parse_allocation_response(self, response):
print("π Raw response received:")
print(response)
-
- # Find the JSON block between curly braces
- start = response.find('{')
- end = response.rfind('}') + 1
- if start == -1 or end == 0:
+
+ # Find ALL JSON blocks using regex
+ import re
+ json_pattern = r'\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}'
+ json_matches = re.findall(json_pattern, response)
+
+ if not json_matches:
raise ValueError("No JSON object found in response")
-
- json_str = response[start:end]
-
- # More aggressive JSON cleaning
- json_str = (json_str
- .replace('\n', '') # Remove newlines
- .replace(' ', '') # Remove indentation
- .replace('\t', '') # Remove tabs
- .replace('\\n', '') # Remove escaped newlines
- .replace(' ', '') # Remove all spaces
- .strip()) # Remove leading/trailing whitespace
-
+
+ # Try parsing each JSON block, starting from the LAST one (most likely to be the final allocation)
+ json_str = None
+ for candidate in reversed(json_matches):
+ try:
+ # Clean the candidate
+ cleaned = re.sub(r'#[^\n]*', '', candidate) # Remove Python comments
+ cleaned = re.sub(r'//[^\n]*', '', cleaned) # Remove JS comments
+ cleaned = (cleaned
+ .replace('\n', '')
+ .replace(' ', '')
+ .replace('\t', '')
+ .replace('\\n', '')
+ .replace(' ', '')
+ .strip())
+
+ # Try to parse it
+ test_parse = json.loads(cleaned)
+ # If we get here, it's valid JSON - use it!
+ json_str = cleaned
+ print(f"\nβ
Found valid JSON allocation (tried {len(json_matches) - json_matches.index(candidate)} candidates)")
+ break
+ except json.JSONDecodeError:
+ continue # Try next candidate
+
+ if not json_str:
+ raise ValueError("No valid JSON object could be parsed from response")
+
print("\nπ§Ή Cleaned JSON string:")
print(json_str)
@@ -768,17 +1043,45 @@ def run_trading_cycle(self, strategy_signals=None):
try:
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
cprint(f"\nβ° AI Agent Run Starting at {current_time}", "white", "on_green")
-
+
+ # π‘οΈ Run risk checks FIRST - before any trading analysis
+ if not self.check_risk_limits():
+ cprint("\nβ TRADING HALTED - Risk limits breached", "white", "on_red")
+ cprint("π‘οΈ System will retry next cycle after conditions improve", "yellow")
+ return
+
# Collect OHLCV data for all tokens using this agent's config
cprint("π Collecting market data...", "white", "on_blue")
cprint(f"π― Tokens to collect: {MONITORED_TOKENS}", "yellow")
- cprint(f"π
Settings: {DAYSBACK_4_DATA} days @ {DATA_TIMEFRAME}", "yellow")
- market_data = collect_all_tokens(
- tokens=MONITORED_TOKENS,
- days_back=DAYSBACK_4_DATA,
- timeframe=DATA_TIMEFRAME
- )
+ if USE_MULTI_TIMEFRAME:
+ cprint(f"β±οΈ Multi-timeframe mode: {MULTI_TIMEFRAMES}", "yellow")
+ else:
+ cprint(f"π
Settings: {DAYSBACK_4_DATA} days @ {DATA_TIMEFRAME}", "yellow")
+
+ # Use appropriate data collector based on exchange and mode
+ if EXCHANGE == 'binance':
+ if USE_MULTI_TIMEFRAME:
+ # Multi-timeframe collection
+ market_data = binance_collect_multi_timeframe(
+ symbols=MONITORED_TOKENS,
+ days_back=DAYSBACK_4_DATA,
+ timeframes=MULTI_TIMEFRAMES
+ )
+ else:
+ # Single timeframe collection
+ market_data = binance_collect_all_tokens(
+ symbols=MONITORED_TOKENS,
+ days_back=DAYSBACK_4_DATA,
+ timeframe=DATA_TIMEFRAME
+ )
+ else:
+ # Default to Solana collector for solana/hyperliquid (single timeframe only for now)
+ market_data = collect_all_tokens(
+ tokens=MONITORED_TOKENS,
+ days_back=DAYSBACK_4_DATA,
+ timeframe=DATA_TIMEFRAME
+ )
cprint(f"π¦ Market data received for {len(market_data)} tokens", "green")
if len(market_data) == 0:
@@ -808,7 +1111,16 @@ def run_trading_cycle(self, strategy_signals=None):
self.handle_exits()
# Portfolio allocation (only run if there are BUY recommendations)
- buy_recommendations = self.recommendations_df[self.recommendations_df['action'] == 'BUY']
+ # π― Filter for minimum confidence threshold to avoid weak signals
+ buy_recommendations = self.recommendations_df[
+ (self.recommendations_df['action'] == 'BUY') &
+ (self.recommendations_df['confidence'] >= MIN_CONFIDENCE_FOR_TRADE)
+ ]
+
+ # Show filtered vs total BUY signals
+ total_buys = len(self.recommendations_df[self.recommendations_df['action'] == 'BUY'])
+ if total_buys > len(buy_recommendations):
+ cprint(f"\nπ {total_buys} BUY signals found, {total_buys - len(buy_recommendations)} filtered out (confidence < {MIN_CONFIDENCE_FOR_TRADE}%)", "yellow")
if len(buy_recommendations) > 0:
cprint(f"\nπ° Found {len(buy_recommendations)} BUY signal(s) - Calculating portfolio allocation...", "white", "on_green")
diff --git a/src/binance_nice_funcs.py b/src/binance_nice_funcs.py
new file mode 100644
index 000000000..0cc27a6f2
--- /dev/null
+++ b/src/binance_nice_funcs.py
@@ -0,0 +1,351 @@
+"""
+π Moon Dev's Binance Functions - Binance trading integration
+Built with love by Moon Dev π
+"""
+
+import os
+import sys
+import time
+import json
+from datetime import datetime, timedelta
+from termcolor import colored, cprint
+import pandas as pd
+from dotenv import load_dotenv
+
+# Load environment variables
+load_dotenv()
+
+# Binance imports
+try:
+ from binance.client import Client
+ from binance.exceptions import BinanceAPIException, BinanceOrderException
+except ImportError:
+ print("β python-binance not installed. Run: pip install python-binance")
+ sys.exit(1)
+
+# Get Binance API keys from environment
+BINANCE_API_KEY = os.getenv("BINANCE_API_KEY")
+BINANCE_SECRET_KEY = os.getenv("BINANCE_SECRET_KEY")
+
+if not BINANCE_API_KEY or not BINANCE_SECRET_KEY:
+ raise ValueError("π¨ BINANCE_API_KEY and BINANCE_SECRET_KEY not found in environment variables!")
+
+# Initialize Binance client with timeout (prevents API hangs)
+client = Client(BINANCE_API_KEY, BINANCE_SECRET_KEY)
+# Set request timeout to 10 seconds to prevent hanging
+client.REQUEST_TIMEOUT = 10
+
+# Configuration from existing config
+from src.config import usd_size, max_usd_order_size, slippage
+
+# Binance precision requirements (stepSize from LOT_SIZE filter)
+BINANCE_PRECISION = {
+ 'BTCUSDT': 0.00001,
+ 'ETHUSDT': 0.0001,
+ 'BNBUSDT': 0.001,
+ 'SOLUSDT': 0.001,
+ 'ADAUSDT': 0.1,
+ 'XRPUSDT': 0.1,
+ 'DOGEUSDT': 1.0,
+ 'DOTUSDT': 0.01,
+}
+
+def round_quantity_to_precision(symbol, quantity):
+ """Round quantity to Binance's precision requirements"""
+ if symbol not in BINANCE_PRECISION:
+ # Default: round to 6 decimals if symbol not in our list
+ return float(f"{quantity:.6f}")
+
+ step_size = BINANCE_PRECISION[symbol]
+
+ # Calculate number of decimal places from step size
+ if step_size >= 1:
+ decimals = 0
+ else:
+ decimals = len(str(step_size).split('.')[-1].rstrip('0'))
+
+ # Round to the appropriate precision
+ rounded = round(quantity / step_size) * step_size
+ rounded = round(rounded, decimals)
+
+ # Format to avoid scientific notation (force decimal format)
+ return float(f"{rounded:.{decimals}f}")
+
+def binance_market_buy(symbol, quantity, slippage_pct=0.02):
+ """Execute market buy order on Binance"""
+ try:
+ # Round quantity to Binance's precision requirements
+ quantity = round_quantity_to_precision(symbol, quantity)
+
+ cprint(f"π’ Placing MARKET BUY: {quantity} {symbol} with {slippage_pct*100}% slippage tolerance", "green")
+
+ # Create market buy order
+ order = client.order_market_buy(
+ symbol=symbol,
+ quantity=quantity
+ )
+
+ cprint(f"β
BUY ORDER FILLED: {order['executedQty']} @ avg ${order['cummulativeQuoteQty']}", "green")
+ return order
+
+ except BinanceAPIException as e:
+ cprint(f"β Binance API Error: {e}", "red")
+ return None
+ except BinanceOrderException as e:
+ cprint(f"β Order Error: {e}", "red")
+ return None
+ except Exception as e:
+ cprint(f"β Unexpected error in market buy: {str(e)}", "red")
+ return None
+
+def binance_market_sell(symbol, quantity, slippage_pct=0.02):
+ """Execute market sell order on Binance"""
+ try:
+ # Round quantity to Binance's precision requirements
+ quantity = round_quantity_to_precision(symbol, quantity)
+
+ cprint(f"π΄ Placing MARKET SELL: {quantity} {symbol} with {slippage_pct*100}% slippage tolerance", "red")
+
+ # Create market sell order
+ order = client.order_market_sell(
+ symbol=symbol,
+ quantity=quantity
+ )
+
+ cprint(f"β
SELL ORDER FILLED: {order['executedQty']} @ avg ${order['cummulativeQuoteQty']}", "red")
+ return order
+
+ except BinanceAPIException as e:
+ cprint(f"β Binance API Error: {e}", "red")
+ return None
+ except BinanceOrderException as e:
+ cprint(f"β Order Error: {e}", "red")
+ return None
+ except Exception as e:
+ cprint(f"β Unexpected error in market sell: {str(e)}", "red")
+ return None
+
+def binance_get_position(symbol):
+ """Get current position size for a symbol on Binance"""
+ try:
+ # Get account information
+ account = client.get_account()
+
+ # Find the asset in balances
+ asset = symbol.replace('USDT', '').replace('BUSD', '').replace('USDC', '') # Extract base asset
+
+ for balance in account['balances']:
+ if balance['asset'] == asset:
+ free_balance = float(balance['free'])
+ locked_balance = float(balance['locked'])
+ total_balance = free_balance + locked_balance
+ return total_balance
+
+ return 0.0 # No position found
+
+ except Exception as e:
+ cprint(f"β Error getting position for {symbol}: {str(e)}", "red")
+ return 0.0
+
+def binance_token_price(symbol):
+ """Get current price for a symbol on Binance"""
+ try:
+ ticker = client.get_symbol_ticker(symbol=symbol)
+ return float(ticker['price'])
+ except Exception as e:
+ cprint(f"β Error getting price for {symbol}: {str(e)}", "red")
+ return 0.0
+
+def binance_get_token_balance_usd(symbol):
+ """Get USD value of position for a symbol on Binance"""
+ try:
+ position_size = binance_get_position(symbol)
+ price = binance_token_price(symbol)
+ usd_value = position_size * price
+ return usd_value
+
+ except Exception as e:
+ cprint(f"β Error getting balance for {symbol}: {str(e)}", "red")
+ return 0.0
+
+def binance_ai_entry(symbol, amount):
+ """AI agent entry function for Binance trading π€"""
+ cprint("π€ Moon Dev's AI Trading Agent initiating Binance position entry...", "white", "on_blue")
+
+ # amount passed in is the target allocation (up to 30% of usd_size)
+ target_size = amount
+
+ pos = binance_get_position(symbol)
+ price = binance_token_price(symbol)
+ pos_usd = pos * price
+
+ cprint(f"π― Target allocation: ${target_size:.2f} USD", "white", "on_blue")
+ cprint(f"π Current position: ${pos_usd:.2f} USD", "white", "on_blue")
+
+ # Check if we're already at or above target
+ if pos_usd >= (target_size * 0.97):
+ cprint("β Position already at or above target size!", "white", "on_blue")
+ return
+
+ # Calculate how much more we need to buy
+ size_needed = target_size - pos_usd
+ if size_needed <= 0:
+ cprint("π No additional size needed", "white", "on_blue")
+ return
+
+ # For order execution, we'll chunk into max_usd_order_size pieces
+ if size_needed > max_usd_order_size:
+ chunk_size = max_usd_order_size
+ else:
+ chunk_size = size_needed
+
+ # Convert USD amount to token quantity
+ chunk_quantity = chunk_size / price
+
+ cprint(f"π« Entry chunk size: {chunk_quantity:.6f} {symbol} (${chunk_size:.2f})", "white", "on_blue")
+
+ # Add retry limit to prevent infinite loops
+ max_retries = 5
+ retry_count = 0
+
+ while pos_usd < (target_size * 0.97):
+ cprint(f"π€ AI Agent executing Binance entry for {symbol}...", "white", "on_blue")
+ print(f"Position: {round(pos,6)} | Price: {round(price,4)} | USD Value: ${round(pos_usd,2)}")
+
+ try:
+ # Place market buy order
+ order = binance_market_buy(symbol, chunk_quantity, slippage)
+ if not order:
+ retry_count += 1
+ if retry_count >= max_retries:
+ cprint(f"β Max retries ({max_retries}) reached. Aborting entry for {symbol}.", "red")
+ break
+ cprint(f"β Order failed, retrying in 30 seconds... (attempt {retry_count}/{max_retries})", "white", "on_blue")
+ time.sleep(30)
+ continue
+
+ # Reset retry count on successful order
+ retry_count = 0
+
+ time.sleep(2) # Brief pause after order
+
+ # Update position info
+ pos = binance_get_position(symbol)
+ price = binance_token_price(symbol)
+ pos_usd = pos * price
+
+ # Break if we're at or above target
+ if pos_usd >= (target_size * 0.97):
+ break
+
+ # Recalculate needed size
+ size_needed = target_size - pos_usd
+ if size_needed <= 0:
+ break
+
+ # Determine next chunk size
+ if size_needed > max_usd_order_size:
+ chunk_size = max_usd_order_size
+ else:
+ chunk_size = size_needed
+
+ chunk_quantity = chunk_size / price
+
+ except Exception as e:
+ try:
+ cprint("π AI Agent retrying Binance order in 30 seconds...", "white", "on_blue")
+ time.sleep(30)
+
+ order = binance_market_buy(symbol, chunk_quantity, slippage)
+ if order:
+ time.sleep(2)
+ pos = binance_get_position(symbol)
+ price = binance_token_price(symbol)
+ pos_usd = pos * price
+
+ if pos_usd >= (target_size * 0.97):
+ break
+
+ size_needed = target_size - pos_usd
+ if size_needed <= 0:
+ break
+
+ if size_needed > max_usd_order_size:
+ chunk_size = max_usd_order_size
+ else:
+ chunk_size = size_needed
+
+ chunk_quantity = chunk_size / price
+
+ except:
+ cprint("β AI Agent encountered critical error, manual intervention needed", "white", "on_red")
+ return
+
+ cprint("β¨ AI Agent completed Binance position entry", "white", "on_blue")
+
+def binance_chunk_kill(symbol, max_usd_order_size, slippage_pct=0.02):
+ """Kill a position in chunks on Binance"""
+ cprint(f"\nπͺ Moon Dev's AI Agent initiating Binance position exit...", "white", "on_cyan")
+
+ # Binance minimum order size (NOTIONAL filter)
+ MIN_ORDER_SIZE_USD = 5.0 # Most tokens require $5 minimum
+
+ try:
+ # Get current position
+ pos = binance_get_position(symbol)
+ price = binance_token_price(symbol)
+ current_usd_value = pos * price
+
+ cprint(f"π Initial position: {pos:.6f} {symbol} (${current_usd_value:.2f})", "white", "on_cyan")
+
+ # If position is very small (under $15), sell all at once
+ if current_usd_value < 15.0:
+ cprint(f"π‘ Small position (${current_usd_value:.2f}) - selling all at once to avoid NOTIONAL error", "yellow")
+ try:
+ order = binance_market_sell(symbol, pos, slippage_pct)
+ if order:
+ cprint("\n⨠Position successfully closed!", "white", "on_green")
+ return
+ else:
+ cprint("β Failed to close small position", "white", "on_red")
+ return
+ except Exception as e:
+ cprint(f"β Error closing small position: {str(e)}", "white", "on_red")
+ return
+
+ # Keep going until position is essentially zero
+ while current_usd_value > 1.0: # Keep going until less than $1
+ # Calculate chunk size - ensure it's above minimum
+ chunk_usd_value = max(MIN_ORDER_SIZE_USD, min(max_usd_order_size, current_usd_value / 3))
+ chunk_quantity = chunk_usd_value / price
+
+ cprint(f"\nπ Selling chunk of ${chunk_usd_value:.2f} (min ${MIN_ORDER_SIZE_USD:.2f} required)", "white", "on_cyan")
+
+ # Execute sell orders in chunks
+ for i in range(3):
+ try:
+ cprint(f"\nπ« Executing sell chunk {i+1}/3...", "white", "on_cyan")
+ order = binance_market_sell(symbol, chunk_quantity, slippage_pct)
+ if not order:
+ cprint(f"β Sell chunk {i+1}/3 failed", "white", "on_red")
+ else:
+ cprint(f"β
Sell chunk {i+1}/3 complete", "white", "on_green")
+ time.sleep(2) # Small delay between chunks
+ except Exception as e:
+ cprint(f"β Error in sell chunk: {str(e)}", "white", "on_red")
+
+ # Check remaining position
+ time.sleep(5) # Wait for order to settle
+ pos = binance_get_position(symbol)
+ price = binance_token_price(symbol)
+ current_usd_value = pos * price
+ cprint(f"\nπ Remaining position: {pos:.6f} {symbol} (${current_usd_value:.2f})", "white", "on_cyan")
+
+ if current_usd_value > 1.0:
+ cprint("π Position still open - continuing to close...", "white", "on_cyan")
+ time.sleep(2)
+
+ cprint("\n⨠Position successfully closed!", "white", "on_green")
+
+ except Exception as e:
+ cprint(f"β Error during Binance position exit: {str(e)}", "white", "on_red")
\ No newline at end of file