Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ dist/
*.egg-info/

.streamlit/
.ga-switch/

.vscode/
.idea/
Expand Down
83 changes: 47 additions & 36 deletions agentmain.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,31 +6,40 @@
elif hasattr(sys.stderr, 'reconfigure'): sys.stderr.reconfigure(errors='replace')
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

from llmcore import LLMSession, ToolClient, ClaudeSession, MixinSession, NativeToolClient, NativeClaudeSession, NativeOAISession
from agent_loop import agent_runner_loop
from ga import GenericAgentHandler, smart_format, get_global_memory, format_error, consume_file
from ga_switch import get_service
from ga_switch.runtime_bridge import describe_runtime, load_clients, next_client, set_active_route as bridge_set_active_route

script_dir = os.path.dirname(os.path.abspath(__file__))
def load_tool_schema(suffix=''):
global TOOLS_SCHEMA
TS = open(os.path.join(script_dir, f'assets/tools_schema{suffix}.json'), 'r', encoding='utf-8').read()
with open(os.path.join(script_dir, f'assets/tools_schema{suffix}.json'), 'r', encoding='utf-8') as f: TS = f.read()
TOOLS_SCHEMA = json.loads(TS if os.name == 'nt' else TS.replace('powershell', 'bash'))
load_tool_schema()

lang_suffix = '_en' if os.environ.get('GA_LANG', '') == 'en' else ''
mem_dir = os.path.join(script_dir, 'memory')
if not os.path.exists(mem_dir): os.makedirs(mem_dir)
mem_txt = os.path.join(mem_dir, 'global_mem.txt')
if not os.path.exists(mem_txt): open(mem_txt, 'w', encoding='utf-8').write('# [Global Memory - L2]\n')
if not os.path.exists(mem_txt):
with open(mem_txt, 'w', encoding='utf-8') as f:
f.write('# [Global Memory - L2]\n')
mem_insight = os.path.join(mem_dir, 'global_mem_insight.txt')
if not os.path.exists(mem_insight):
t = os.path.join(script_dir, f'assets/global_mem_insight_template{lang_suffix}.txt')
open(mem_insight, 'w', encoding='utf-8').write(open(t, encoding='utf-8').read() if os.path.exists(t) else '')
template = ''
if os.path.exists(t):
with open(t, encoding='utf-8') as f:
template = f.read()
with open(mem_insight, 'w', encoding='utf-8') as f:
f.write(template)
cdp_cfg = os.path.join(script_dir, 'assets/tmwd_cdp_bridge/config.js')
if not os.path.exists(cdp_cfg):
try:
os.makedirs(os.path.dirname(cdp_cfg), exist_ok=True)
open(cdp_cfg, 'w', encoding='utf-8').write(f"const TID = '__ljq_{hex(random.randint(0, 99999999))[2:8]}';")
with open(cdp_cfg, 'w', encoding='utf-8') as f:
f.write(f"const TID = '__ljq_{hex(random.randint(0, 99999999))[2:8]}';")
except Exception as e: print(f'[WARN] CDP config init failed: {e} — advanced web features (tmwebdriver) will be unavailable.')

def get_system_prompt():
Expand All @@ -43,47 +52,49 @@ class GeneraticAgent:
def __init__(self):
script_dir = os.path.dirname(os.path.abspath(__file__))
os.makedirs(os.path.join(script_dir, 'temp'), exist_ok=True)
from llmcore import mykeys
llm_sessions = []
for k, cfg in mykeys.items():
if not any(x in k for x in ['api', 'config', 'cookie']): continue
try:
if 'native' in k and 'claude' in k: llm_sessions += [NativeToolClient(NativeClaudeSession(cfg=cfg))]
elif 'native' in k and 'oai' in k: llm_sessions += [NativeToolClient(NativeOAISession(cfg=cfg))]
elif 'claude' in k: llm_sessions += [ToolClient(ClaudeSession(cfg=cfg))]
elif 'oai' in k: llm_sessions += [ToolClient(LLMSession(cfg=cfg))]
elif 'mixin' in k: llm_sessions += [{'mixin_cfg': cfg}]
except: pass
for i, s in enumerate(llm_sessions):
if isinstance(s, dict) and 'mixin_cfg' in s:
try:
mixin = MixinSession(llm_sessions, s['mixin_cfg'])
if isinstance(mixin._sessions[0], (NativeClaudeSession, NativeOAISession)): llm_sessions[i] = NativeToolClient(mixin)
else: llm_sessions[i] = ToolClient(mixin)
except Exception as e: print(f'[WARN] Failed to init MixinSession with cfg {s["mixin_cfg"]}: {e}')
self.llmclients = llm_sessions
self.lock = threading.Lock()
self.task_dir = None
self.history = []
self.task_queue = queue.Queue()
self.is_running = False; self.stop_sig = False
self.llm_no = 0; self.inc_out = False
self.llm_no = 0; self.llmclient = None; self.llmclients = []
self.config_source = 'legacy'; self.config_meta = {}
self.ga_switch = get_service()
self.inc_out = False
self.handler = None; self.verbose = True
self.llmclient = self.llmclients[self.llm_no]
self._reload_clients(initial=True)

def next_llm(self, n=-1):
self.llm_no = ((self.llm_no + 1) if n < 0 else n) % len(self.llmclients)
lastc = self.llmclient
self.llmclient = self.llmclients[self.llm_no]
self.llmclient.backend.history = lastc.backend.history
self.llmclient.last_tools = ''
def _sync_tool_schema(self):
name = self.get_llm_name().lower()
if 'glm' in name or 'minimax' in name or 'kimi' in name: load_tool_schema('_cn')
else: load_tool_schema()
def list_llms(self): return [(i, self.get_llm_name(b), i == self.llm_no) for i, b in enumerate(self.llmclients)]
def get_llm_name(self, b=None):
b = self.llmclient if b is None else b
return f"{type(b.backend).__name__}/{b.backend.name}" if not isinstance(b, dict) else "BADCONFIG_MIXIN"

def _reload_clients(self, *, initial=False, preserve_history=True):
return load_clients(self, preserve_history=preserve_history, initial=initial)

def next_llm(self, n=-1):
return next_client(self, n)

def set_active_route(self, route_id_or_idx):
return bridge_set_active_route(self, route_id_or_idx)

def reload_llm_config(self, preserve_history=True):
if self.is_running:
raise RuntimeError('Cannot reload LLM config while agent is running.')
self._reload_clients(initial=False, preserve_history=preserve_history)
return self.describe_llms()

def describe_llms(self):
return describe_runtime(self)

def list_llms(self):
return [(item['idx'], f"{item['name']} [{item['backend_class']}/{item.get('provider_name') or self.llmclients[item['idx']].backend.name}]", item['active']) for item in self.describe_llms()]

def get_llm_name(self):
if self.llmclient is None:
return 'No LLM'
item = self.describe_llms()[self.llm_no]
return f"{item['name']} [{item['backend_class']}/{item.get('provider_name') or self.llmclient.backend.name}]"

def abort(self):
if not self.is_running: return
Expand Down
14 changes: 14 additions & 0 deletions ga_switch/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import os
from functools import lru_cache


def get_default_db_path():
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
return os.path.join(root_dir, ".ga-switch", "ga-switch.db")


@lru_cache(maxsize=8)
def get_service(db_path=None):
from .service import GASwitchService

return GASwitchService(db_path=db_path or get_default_db_path())
48 changes: 48 additions & 0 deletions ga_switch/diagnostics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
from datetime import datetime, timezone


ERROR_KINDS = (
"auth",
"quota",
"rate_limit",
"timeout",
"network",
"server",
"bad_request",
"model_not_found",
"unsupported_param",
"unknown",
)


def utcnow_iso() -> str:
return datetime.now(timezone.utc).replace(microsecond=0).isoformat().replace("+00:00", "Z")


def normalize_message(message, limit=2000) -> str:
text = "" if message is None else str(message).strip()
return text[:limit]


def classify_error(*, status_code=None, message="", body="", exc_type="") -> str:
status = None if status_code is None else int(status_code)
hay = " ".join(x for x in (str(message or ""), str(body or ""), str(exc_type or "")) if x).lower()

if status in (401, 403):
return "auth"
if status == 404 or "model_not_found" in hay or "model not found" in hay or "no such model" in hay:
return "model_not_found"
if "unsupported_param" in hay or ("unsupported" in hay and any(k in hay for k in ("param", "reasoning_effort", "reasoning.effort", "api_mode"))):
return "unsupported_param"
if status == 400:
return "unsupported_param" if "unsupported" in hay else "bad_request"
if status == 429:
quota_tokens = ("insufficient_quota", "quota", "credit", "billing", "余额", "配额")
return "quota" if any(token in hay for token in quota_tokens) else "rate_limit"
if status is not None and status >= 500:
return "server"
if any(token in hay for token in ("timeout", "timed out", "readtimeout", "connecttimeout")):
return "timeout"
if any(token in hay for token in ("connectionerror", "proxyerror", "sslerror", "name or service not known", "connection reset", "dns", "proxy", "connection refused")):
return "network"
return "unknown"
59 changes: 59 additions & 0 deletions ga_switch/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
from dataclasses import dataclass, field
from typing import Any


PROVIDER_BACKEND_KINDS = (
"native_claude",
"native_oai",
"claude_text",
"oai_text",
)

ROUTE_KINDS = ("single", "failover")


def is_native_backend_kind(kind: str) -> bool:
return str(kind or "").startswith("native_")


def backend_family(kind: str) -> str:
kind = str(kind or "")
if "claude" in kind:
return "claude"
return "oai"


@dataclass
class ProviderModel:
id: int | None = None
name: str = ""
backend_kind: str = "oai_text"
apikey: str = ""
apibase: str = ""
model: str = ""
api_mode: str = "chat_completions"
temperature: float = 1.0
max_tokens: int = 8192
context_win: int = 24000
proxy: str | None = None
timeout: int = 5
read_timeout: int = 30
max_retries: int = 1
reasoning_effort: str | None = None
thinking_type: str | None = None
thinking_budget_tokens: int | None = None
stream: bool = True
is_enabled: bool = True
extra: dict[str, Any] = field(default_factory=dict)


@dataclass
class RouteModel:
id: int | None = None
name: str = ""
kind: str = "single"
provider_id: int | None = None
member_provider_ids: list[int] = field(default_factory=list)
is_enabled: bool = True
is_default: bool = False
config: dict[str, Any] = field(default_factory=dict)
Loading