Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ Click [Github Latest Release](https://github.com/volcengine/MineContext/releases

After the application launches, please follow the prompts to enter your API key. (Note: On the first run, the application needs to install the backend environment, which may take about two minutes).

We currently support services from Doubao, OpenAI, and custom models. This includes any **local models** or **third-party model** services that are compatible with the OpenAI API format.
We currently support services from Doubao, OpenAI, MiniMax, and custom models. This includes any **local models** or **third-party model** services that are compatible with the OpenAI API format.

We recommend using [LMStudio](https://lmstudio.ai/) to run local models. It provides a simple interface and powerful features to help you quickly deploy and manage them.

Expand Down Expand Up @@ -326,7 +326,7 @@ opencontext/

6. **LLM Integration** (`llm/`)

- Support for multiple LLM providers (OpenAI, Doubao)
- Support for multiple LLM providers (OpenAI, Doubao, MiniMax)
- VLM (Vision-Language Model) integration
- Embedding generation services

Expand Down Expand Up @@ -359,12 +359,12 @@ server:
debug: false

embedding_model:
provider: doubao # options: openai, doubao
provider: doubao # options: openai, doubao, minimax
api_key: your-api-key
model: doubao-embedding-vision-250615

vlm_model:
provider: doubao # options: openai, doubao
provider: doubao # options: openai, doubao, minimax
api_key: your-api-key
model: doubao-seed-1-6-flash-250828

Expand Down
8 changes: 4 additions & 4 deletions README_zh.md
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ MineContext 非常注重用户隐私,所有数据都默认保存在本地如

## 2. 输入您的 API 密钥

应用程序启动后(首次运行时需要安装后端环境,约需等待两分钟),请根据引导输入您的 API 密钥。目前我们支持豆包、OpenAI 以及自定义模型服务,包括任何兼容 OpenAI API 格式的**本地模型**或**第三方模型**服务。
应用程序启动后(首次运行时需要安装后端环境,约需等待两分钟),请根据引导输入您的 API 密钥。目前我们支持豆包、OpenAI、MiniMax 以及自定义模型服务,包括任何兼容 OpenAI API 格式的**本地模型**或**第三方模型**服务。
我们推荐使用 [LMStudio](https://lmstudio.ai/) 来运行本地模型,它提供了简单的界面和强大的功能,能够帮助您快速部署和管理本地模型。

**综合成本和性能,我们推荐使用豆包模型**,豆包模型的 API-Key 可以在 [API 管理界面](https://console.volcengine.com/ark/region:ark+cn-beijing/apiKey) 生成。
Expand Down Expand Up @@ -325,7 +325,7 @@ opencontext/

6. **LLM 集成** (`llm/`)

- 支持多个 LLM 提供商(OpenAI、豆包)
- 支持多个 LLM 提供商(OpenAI、豆包、MiniMax
- VLM(视觉-语言模型)集成
- 嵌入生成服务

Expand Down Expand Up @@ -358,12 +358,12 @@ server:
debug: false

embedding_model:
provider: doubao # 选项:openai, doubao
provider: doubao # 选项:openai, doubao, minimax
api_key: your-api-key
model: doubao-embedding-vision-250615

vlm_model:
provider: doubao # 选项:openai, doubao
provider: doubao # 选项:openai, doubao, minimax
api_key: your-api-key
model: doubao-seed-1-6-flash-250828

Expand Down
4 changes: 4 additions & 0 deletions frontend/src/renderer/src/assets/images/settings/minimax.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
23 changes: 21 additions & 2 deletions frontend/src/renderer/src/pages/settings/constants.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -4,21 +4,25 @@
import { ReactNode } from 'react'
import openAI from '../../assets/images/settings/OpenAI.png'
import doubao from '../../assets/images/settings/doubao.png'
import minimax from '../../assets/images/settings/minimax.svg'
import custom from '../../assets/images/settings/custom.svg'

export enum ModelTypeList {
Doubao = 'doubao',
OpenAI = 'openai',
MiniMax = 'minimax',
Custom = 'custom'
}

export enum embeddingModels {
DoubaoEmbeddingModelId = 'doubao-embedding-vision-250615',
OpenAIEmbeddingModelId = 'text-embedding-3-large'
OpenAIEmbeddingModelId = 'text-embedding-3-large',
MiniMaxEmbeddingModelId = 'embo-01'
}
export enum BaseUrl {
DoubaoUrl = 'https://ark.cn-beijing.volces.com/api/v3',
OpenAIUrl = 'https://api.openai.com/v1'
OpenAIUrl = 'https://api.openai.com/v1',
MiniMaxUrl = 'https://api.minimax.io/v1'
}
export interface OptionInfo {
value: string
Expand Down Expand Up @@ -70,6 +74,21 @@ export const ModelInfoList = [
}
]
},
{
icon: <img src={minimax} className="!max-w-none w-[24px] h-[24px]" />,
key: 'MiniMax',
value: 'minimax',
option: [
{
value: 'MiniMax-M2.7',
label: 'MiniMax-M2.7'
},
{
value: 'MiniMax-M2.7-highspeed',
label: 'MiniMax-M2.7-highspeed'
}
]
},
{
icon: <img src={custom} className="!max-w-none w-[18px] h-[18px]" />,
key: 'Custom',
Expand Down
39 changes: 27 additions & 12 deletions frontend/src/renderer/src/pages/settings/settings.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -157,14 +157,19 @@ const StandardFormItems: FC<StandardFormItemsProps> = (props) => {
You can get the API Key Here:
<Button
onClick={() => {
const url =
modelPlatform === ModelTypeList.Doubao
? 'https://www.volcengine.com/docs/82379/1541594'
: 'https://platform.openai.com/settings/organization/api-keys'
window.open(`${url}`)
const urlMap: Record<string, string> = {
[ModelTypeList.Doubao]: 'https://www.volcengine.com/docs/82379/1541594',
[ModelTypeList.MiniMax]: 'https://platform.minimaxi.com/user-center/basic-information/interface-key',
[ModelTypeList.OpenAI]: 'https://platform.openai.com/settings/organization/api-keys'
}
window.open(urlMap[modelPlatform] || urlMap[ModelTypeList.OpenAI])
}}
type="text">
{modelPlatform === ModelTypeList.Doubao ? 'Get Doubao API Key' : 'Get OpenAI API Key'}
{modelPlatform === ModelTypeList.Doubao
? 'Get Doubao API Key'
: modelPlatform === ModelTypeList.MiniMax
? 'Get MiniMax API Key'
: 'Get OpenAI API Key'}
</Button>
</div>
}
Expand Down Expand Up @@ -246,15 +251,22 @@ const Settings: FC<SettingsProps> = (props) => {
const formatData = Object.fromEntries(
Object.entries(data).map(([key, value]) => [key.replace(`${values.modelPlatform}-`, ''), value])
)
const getBaseUrl = (platform: string) => {
if (platform === ModelTypeList.Doubao) return BaseUrl.DoubaoUrl
if (platform === ModelTypeList.MiniMax) return BaseUrl.MiniMaxUrl
return BaseUrl.OpenAIUrl
}
const getEmbeddingModelId = (platform: string) => {
if (platform === ModelTypeList.Doubao) return embeddingModels.DoubaoEmbeddingModelId
if (platform === ModelTypeList.MiniMax) return embeddingModels.MiniMaxEmbeddingModelId
return embeddingModels.OpenAIEmbeddingModelId
}
const params = isCustom
? formatData
: {
...formatData,
baseUrl: values.modelPlatform === ModelTypeList.Doubao ? BaseUrl.DoubaoUrl : BaseUrl.OpenAIUrl,
embeddingModelId:
values.modelPlatform === ModelTypeList.Doubao
? embeddingModels.DoubaoEmbeddingModelId
: embeddingModels.OpenAIEmbeddingModelId
baseUrl: getBaseUrl(values.modelPlatform),
embeddingModelId: getEmbeddingModelId(values.modelPlatform)
}

updateModelSettings(params as unknown as ModelConfigProps)
Expand Down Expand Up @@ -299,7 +311,8 @@ const Settings: FC<SettingsProps> = (props) => {
initialValues={{
modelPlatform: ModelTypeList.Doubao,
[`${ModelTypeList.Doubao}-modelId`]: 'doubao-seed-1-6-flash-250828',
[`${ModelTypeList.OpenAI}-modelId`]: 'gpt-5-nano'
[`${ModelTypeList.OpenAI}-modelId`]: 'gpt-5-nano',
[`${ModelTypeList.MiniMax}-modelId`]: 'MiniMax-M2.7'
}}>
<FormItem label="Model platform" field={'modelPlatform'} requiredSymbol={false}>
<ModelRadio />
Expand All @@ -315,6 +328,8 @@ const Settings: FC<SettingsProps> = (props) => {
return <StandardFormItems modelPlatform={modelPlatform} prefix={ModelTypeList.Doubao} />
} else if (modelPlatform === ModelTypeList.OpenAI) {
return <StandardFormItems modelPlatform={modelPlatform} prefix={ModelTypeList.OpenAI} />
} else if (modelPlatform === ModelTypeList.MiniMax) {
return <StandardFormItems modelPlatform={modelPlatform} prefix={ModelTypeList.MiniMax} />
} else {
return null
}
Expand Down
91 changes: 85 additions & 6 deletions opencontext/llm/llm_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
class LLMProvider(Enum):
OPENAI = "openai"
DOUBAO = "doubao"
MINIMAX = "minimax"


class LLMType(Enum):
Expand Down Expand Up @@ -264,9 +265,42 @@ async def _openai_chat_completion_stream_async(self, messages: List[Dict[str, An
logger.error(f"OpenAI API async stream error: {e}")
raise

def _minimax_embedding(self, text: str, **kwargs) -> List[float]:
"""Request embedding from MiniMax's native embedding API (embo-01).

MiniMax uses a non-OpenAI-compatible format:
- Request: {"model": "embo-01", "texts": [...], "type": "db"|"query"}
- Response: {"vectors": [[...]], "total_tokens": N, "base_resp": {...}}
"""
import httpx

url = f"{self.base_url.rstrip('/')}/embeddings"
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
payload = {
"model": self.model,
"texts": [text],
"type": kwargs.get("embedding_type", "db"),
}
resp = httpx.post(url, json=payload, headers=headers, timeout=self.timeout)
resp.raise_for_status()
data = resp.json()
base_resp = data.get("base_resp", {})
if base_resp.get("status_code", 0) != 0:
raise ValueError(f"MiniMax embedding error: {base_resp.get('status_msg', 'unknown')}")
vectors = data.get("vectors", [])
if not vectors:
raise ValueError("MiniMax embedding returned empty vectors")
return vectors[0]

def _request_embedding(self, text: str, **kwargs) -> List[float]:
try:
if self.provider != LLMProvider.DOUBAO.value:
response = None
if self.provider == LLMProvider.MINIMAX.value:
embedding = self._minimax_embedding(text)
elif self.provider != LLMProvider.DOUBAO.value:
response = self.client.embeddings.create(model=self.model, input=[text])
embedding = response.data[0].embedding
else:
Expand All @@ -276,7 +310,7 @@ def _request_embedding(self, text: str, **kwargs) -> List[float]:
embedding = response.data.embedding

# Record token usage
if hasattr(response, "usage") and response.usage:
if response and hasattr(response, "usage") and response.usage:
try:
from opencontext.monitoring import record_token_usage

Expand Down Expand Up @@ -311,9 +345,38 @@ def _request_embedding(self, text: str, **kwargs) -> List[float]:
logger.error(f"OpenAI API error during embedding: {e}")
raise

async def _minimax_embedding_async(self, text: str, **kwargs) -> List[float]:
"""Async request embedding from MiniMax's native embedding API."""
import httpx

url = f"{self.base_url.rstrip('/')}/embeddings"
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
payload = {
"model": self.model,
"texts": [text],
"type": kwargs.get("embedding_type", "db"),
}
async with httpx.AsyncClient(timeout=self.timeout) as client:
resp = await client.post(url, json=payload, headers=headers)
resp.raise_for_status()
data = resp.json()
base_resp = data.get("base_resp", {})
if base_resp.get("status_code", 0) != 0:
raise ValueError(f"MiniMax embedding error: {base_resp.get('status_msg', 'unknown')}")
vectors = data.get("vectors", [])
if not vectors:
raise ValueError("MiniMax embedding returned empty vectors")
return vectors[0]

async def _request_embedding_async(self, text: str, **kwargs) -> List[float]:
try:
if self.provider == LLMProvider.DOUBAO.value:
response = None
if self.provider == LLMProvider.MINIMAX.value:
embedding = await self._minimax_embedding_async(text, **kwargs)
elif self.provider == LLMProvider.DOUBAO.value:
# Only ark has multimodal_embeddings
response = self.client.multimodal_embeddings.create(
model=self.model, input=[{"type": "text", "text": text}]
Expand All @@ -324,7 +387,7 @@ async def _request_embedding_async(self, text: str, **kwargs) -> List[float]:
embedding = response.data[0].embedding

# Record token usage
if hasattr(response, "usage") and response.usage:
if response and hasattr(response, "usage") and response.usage:
try:
from opencontext.monitoring import record_token_usage

Expand Down Expand Up @@ -406,7 +469,17 @@ def _extract_error_summary(error: Any) -> str:
if code in error_msg:
return msg

# 2. Check for OpenAI specific errors
# 2. Check for MiniMax specific errors
minimax_errors = {
"invalid_api_key": "Invalid MiniMax API key.",
"insufficient_balance": "Insufficient MiniMax account balance.",
}

for code, msg in minimax_errors.items():
if code in error_msg:
return msg

# 3. Check for OpenAI specific errors
openai_errors = {
"insufficient_quota": "Insufficient quota. Check your plan and billing details.",
"invalid_api_key": "Invalid API key provided.",
Expand Down Expand Up @@ -477,7 +550,13 @@ def _extract_error_summary(error: Any) -> str:

elif self.llm_type == LLMType.EMBEDDING:
# Test with a simple text
if self.provider == LLMProvider.DOUBAO.value:
if self.provider == LLMProvider.MINIMAX.value:
embedding = self._minimax_embedding("test")
if embedding and len(embedding) > 0:
return True, "Embedding model validation successful"
else:
return False, "Embedding model returned empty response"
elif self.provider == LLMProvider.DOUBAO.value:
response = self.client.multimodal_embeddings.create(
model=self.model, input=[{"type": "text", "text": "test"}]
)
Expand Down
9 changes: 9 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,16 @@ dependencies = [
"python-multipart",
"playwright",
"volcengine-python-sdk[ark]",
"httpx",
]

[project.optional-dependencies]
dev = [
"black>=24.8.0",
"isort>=5.13.0",
"pre-commit>=3.6.0",
"pytest>=7.0.0",
"pytest-asyncio>=0.21.0",
]

[project.scripts]
Expand Down Expand Up @@ -90,6 +93,12 @@ extend-exclude = '''
)/
'''

# pytest - test configuration
[tool.pytest.ini_options]
markers = [
"integration: marks tests that require a real MiniMax API key (deselect with '-m \"not integration\"')",
]

# isort - import 语句排序工具
[tool.isort]
profile = "black"
Expand Down
Empty file added tests/__init__.py
Empty file.
Empty file added tests/llm/__init__.py
Empty file.
Loading