2525logger = get_logger (__name__ )
2626router = APIRouter (tags = ["model-settings" ])
2727
28- # 全局锁,用于确保配置更新的原子性
28+ # Global lock to ensure atomic configuration updates
2929_config_lock = threading .Lock ()
3030
3131class ModelSettingsVO (BaseModel ):
32- """模型设置数据结构 (保持原始字段以兼容历史客户端)
33- 安全调整: GET 接口中 apiKey 字段现在直接返回掩码后的值,而非明文;。
34- 这样旧前端仍可显示(只是掩码),避免破坏性变更。
3532 """
36- modelPlatform : str = Field (..., description = "模型平台: doubao | openai" )
37- modelId : str = Field (..., description = "VLM模型ID" )
38- baseUrl : str = Field (..., description = "API基础URL" )
39- embeddingModelId : str = Field (..., description = "嵌入模型ID" )
40- apiKey : str = Field (..., description = "API密钥(更新请求需提供; 查询返回为掩码值)" )
33+ Model settings data structure (keeps original field names for backward compatibility).
34+ Security adjustment: In the GET API, the apiKey field now returns a masked value instead of the plaintext.
35+ This allows old frontends to still display (masked) without a breaking change.
36+ """
37+ modelPlatform : str = Field (..., description = "Model platform: doubao | openai" )
38+ modelId : str = Field (..., description = "VLM model ID" )
39+ baseUrl : str = Field (..., description = "API base URL" )
40+ embeddingModelId : str = Field (..., description = "Embedding model ID" )
41+ apiKey : str = Field (..., description = "API key (plaintext required in update request; masked in query response)" )
4142
4243
4344class GetModelSettingsRequest (BaseModel ):
44- """获取模型设置请求(空请求) """
45+ """Request body for fetching model settings (empty body). """
4546 pass
4647
4748
4849class GetModelSettingsResponse (BaseModel ):
49- """获取模型设置响应 (apiKey 为掩码值) """
50+ """Response body for fetching model settings (apiKey is masked). """
5051 config : ModelSettingsVO
5152
5253
5354class UpdateModelSettingsRequest (BaseModel ):
54- """更新模型设置请求 (继续使用原始模型, 接受明文 apiKey)"""
55+ """Request body for updating model settings (accepts plaintext apiKey). """
5556 config : ModelSettingsVO
5657
5758
5859class UpdateModelSettingsResponse (BaseModel ):
59- """更新模型设置响应 """
60+ """Response body for updating model settings. """
6061 success : bool
6162 message : str
6263
@@ -65,39 +66,39 @@ async def get_model_settings(
6566 _auth : str = auth_dependency
6667):
6768 """
68- 获取当前模型配置
69+ Get current model configuration.
6970 """
7071 try :
7172 def _mask_api_key (raw : str ) -> str :
72- # 直接硬编码:保留前4后2,中间 ***
73+ # Fixed rule: keep first 4 and last 2 characters, mask the middle with ***
7374 if not raw :
7475 return ""
7576 if len (raw ) <= 6 : # 4 + 2
7677 return raw [0 ] + "***" if len (raw ) > 1 else "***"
7778 return f"{ raw [:4 ]} ***{ raw [- 2 :]} "
78- # 从全局配置获取当前设置
79+ # Retrieve current settings from global config
7980 global_config = GlobalConfig .get_instance ()
8081 config = global_config .get_config ()
8182 if not config :
8283 raise HTTPException (status_code = 500 , detail = "配置未初始化" )
8384
84- # 获取VLM和嵌入模型配置
85+ # Get VLM and embedding model configs
8586 vlm_config = config .get ("vlm_model" , {})
8687 embedding_config = config .get ("embedding_model" , {})
8788
88- # 推断平台类型
89+ # Infer platform type
8990 base_url = vlm_config .get ("base_url" , "" )
9091 platform = vlm_config .get ("provider" , "" )
9192
92- # 构造响应 - 使用掩码
93+ # Build response - using masked api key
9394 masked_key = _mask_api_key (vlm_config .get ("api_key" , "" ))
94- # 注意: apiKey 字段返回空串以兼容老客户端字段存在性,但不泄露明文
95+ # Note: apiKey returns masked string for backward compatibility (field presence kept)
9596 model_settings = ModelSettingsVO (
9697 modelPlatform = platform ,
9798 modelId = vlm_config .get ("model" , "" ),
9899 baseUrl = base_url ,
99100 embeddingModelId = embedding_config .get ("model" , "" ),
100- apiKey = masked_key # 直接使用掩码后的 key,避免泄露明文
101+ apiKey = masked_key
101102 )
102103
103104 response = GetModelSettingsResponse (config = model_settings )
@@ -106,7 +107,7 @@ def _mask_api_key(raw: str) -> str:
106107 except HTTPException :
107108 raise
108109 except Exception as e :
109- logger .exception (f"获取模型设置失败 : { e } " )
110+ logger .exception (f"Failed to get model settings : { e } " )
110111 return convert_resp (code = 500 , status = 500 , message = f"获取模型设置失败: { str (e )} " )
111112
112113@router .post ("/api/model_settings/update" )
@@ -115,12 +116,12 @@ async def update_model_settings(
115116 _auth : str = auth_dependency
116117):
117118 """
118- 更新模型配置并重新初始化LLM客户端
119+ Update model configuration and reinitialize LLM clients.
119120 """
120121 with _config_lock :
121122 try :
122123 def _is_masked_api_key (val : str ) -> bool :
123- # 直接硬编码:包含 *** 且不以 *** 结尾 且长度>= 6
124+ # Heuristic: contains *** , does not end with *** , and length >= 6
124125 if not val :
125126 return False
126127 return ("***" in val ) and not val .endswith ("***" ) and len (val ) >= 6
@@ -132,10 +133,10 @@ def _is_masked_api_key(val: str) -> bool:
132133 keep_original = _is_masked_api_key (incoming_key )
133134
134135 if not incoming_key and not current_vlm_key :
135- # 没有任何真实 key
136+ # No valid key provided
136137 raise HTTPException (status_code = 400 , detail = "api key cannot be empty" )
137138
138- # 如果是掩码表示不修改;否则使用新 key
139+ # If masked -> keep original; else use new key
139140 final_api_key = current_vlm_key if keep_original else incoming_key
140141
141142 if not final_api_key :
@@ -149,7 +150,7 @@ def _is_masked_api_key(val: str) -> bool:
149150 if not request .config .baseUrl :
150151 raise HTTPException (status_code = 400 , detail = "vlm model base url cannot be empty" )
151152
152- # 构造新的配置
153+ # Construct new settings dict
153154 new_settings = {
154155 "vlm_model" : {
155156 "base_url" : request .config .baseUrl ,
@@ -167,7 +168,7 @@ def _is_masked_api_key(val: str) -> bool:
167168 }
168169 }
169170
170- # 获取配置管理器
171+ # Get config manager
171172 config_manager = GlobalConfig .get_instance ().get_config_manager ()
172173
173174 if not config_manager :
@@ -180,16 +181,16 @@ def _is_masked_api_key(val: str) -> bool:
180181 config_manager .load_config (current_config_path )
181182
182183 try :
183- # 重新初始化VLM客户端
184+ # Reinitialize VLM client
184185 vlm_success = GlobalVLMClient .get_instance ().reinitialize ()
185- logger .info ("VLM客户端重新初始化成功 " )
186+ logger .info ("VLM client reinitialized successfully " )
186187 embedding_success = GlobalEmbeddingClient .get_instance ().reinitialize ()
187- logger .info ("嵌入客户端重新初始化成功 " )
188+ logger .info ("Embedding client reinitialized successfully " )
188189 if not vlm_success or not embedding_success :
189190 raise HTTPException (status_code = 500 , detail = "internal error: reinitialize LLM clients failed" )
190191
191192 except Exception as e :
192- logger .error (f"重新初始化LLM客户端失败 : { e } " )
193+ logger .error (f"Failed to reinitialize LLM client : { e } " )
193194 return convert_resp (
194195 code = 500 ,
195196 status = 500 ,
@@ -205,7 +206,7 @@ def _is_masked_api_key(val: str) -> bool:
205206 except HTTPException :
206207 raise
207208 except Exception as e :
208- logger .error (f"更新模型设置失败 : { e } " )
209+ logger .error (f"Failed to update model settings : { e } " )
209210 return convert_resp (
210211 code = 500 ,
211212 status = 500 ,
0 commit comments