diff --git a/README.md b/README.md index 3fbe40f..e11e7da 100644 --- a/README.md +++ b/README.md @@ -484,6 +484,37 @@ docker run -p 3000:3000 my-mock-openai-api - `PORT` - Server port (default: 3000) - `HOST` - Server host (default: 0.0.0.0) - `VERBOSE` - Enable verbose logging (default: false) +- `MODEL_MAPPING_CONFIG` - Path to model mapping configuration file (default: model-mapping.json) + +### Model Mapping Configuration + +You can customize the model names displayed to users by creating a `model-mapping.json` file. This allows you to map internal model names to external names for better user experience. + +**Example model-mapping.json:** +```json +{ + "mock-gpt-thinking": "gpt-4o-mini", + "gpt-4-mock": "gpt-4-turbo", + "mock-gpt-markdown": "gpt-4o", + "gpt-4o-image": "dall-e-3", + "mock-claude-markdown": "claude-3-opus-20240229", + "gemini-1.5-pro": "gemini-2.0-pro-exp-2025-01-15", + "gemini-1.5-flash": "gemini-2.0-flash-exp-2025-01-15", + "gemini-pro": "gemini-pro-1.0", + "gemini-pro-vision": "gemini-pro-vision-1.0" +} +``` + +**CLI Usage:** +```bash +# Use custom model mapping configuration +npx mock-openai-api -c custom-mapping.json + +# Or set via environment variable +MODEL_MAPPING_CONFIG=custom-mapping.json npx mock-openai-api +``` + +The server will automatically load the configuration and display mapped model names in the console output and API responses. ## 🧪 Testing diff --git a/README.zh.md b/README.zh.md index 6a3a4d3..0a715e8 100644 --- a/README.zh.md +++ b/README.zh.md @@ -326,6 +326,37 @@ docker run -p 3000:3000 mock-openai-api - `PORT` - 服务器端口(默认:3000) - `HOST` - 服务器主机(默认:0.0.0.0) +- `MODEL_MAPPING_CONFIG` - 模型映射配置文件路径(默认:model-mapping.json) + +### 模型映射配置 + +您可以通过创建 `model-mapping.json` 文件来自定义显示给用户的模型名称。这允许您将内部模型名称映射到外部名称,以提供更好的用户体验。 + +**示例 model-mapping.json:** +```json +{ + "mock-gpt-thinking": "gpt-4o-mini", + "gpt-4-mock": "gpt-4-turbo", + "mock-gpt-markdown": "gpt-4o", + "gpt-4o-image": "dall-e-3", + "mock-claude-markdown": "claude-3-opus-20240229", + "gemini-1.5-pro": "gemini-2.0-pro-exp-2025-01-15", + "gemini-1.5-flash": "gemini-2.0-flash-exp-2025-01-15", + "gemini-pro": "gemini-pro-1.0", + "gemini-pro-vision": "gemini-pro-vision-1.0" +} +``` + +**CLI 使用:** +```bash +# 使用自定义模型映射配置 +npx mock-openai-api -c custom-mapping.json + +# 或通过环境变量设置 +MODEL_MAPPING_CONFIG=custom-mapping.json npx mock-openai-api +``` + +服务器将自动加载配置并在控制台输出和 API 响应中显示映射后的模型名称。 ## 🧪 测试 diff --git a/custom-mapping.json b/custom-mapping.json new file mode 100644 index 0000000..0da4276 --- /dev/null +++ b/custom-mapping.json @@ -0,0 +1,11 @@ +{ + "mock-gpt-thinking": "custom-gpt-mini", + "gpt-4-mock": "custom-gpt-pro", + "mock-gpt-markdown": "custom-gpt-markdown", + "gpt-4o-image": "custom-dalle", + "mock-claude-markdown": "custom-claude-pro", + "gemini-1.5-pro": "custom-gemini-pro", + "gemini-1.5-flash": "custom-gemini-flash", + "gemini-pro": "custom-gemini-basic", + "gemini-pro-vision": "custom-gemini-vision" +} \ No newline at end of file diff --git a/model-mapping.json b/model-mapping.json new file mode 100644 index 0000000..e8e2c5c --- /dev/null +++ b/model-mapping.json @@ -0,0 +1,12 @@ +{ + "mock-gpt-thinking": "gpt-4o-mini", + "mock-gpt-thinking-tag": "gpt-4o-mini", + "gpt-4-mock": "gpt-4-turbo", + "mock-gpt-markdown": "gpt-4o", + "gpt-4o-image": "dall-e-3", + "mock-claude-markdown": "claude-3-opus-20240229", + "gemini-1.5-pro": "gemini-2.0-pro-exp-2025-01-15", + "gemini-1.5-flash": "gemini-2.0-flash-exp-2025-01-15", + "gemini-pro": "gemini-pro-1.0", + "gemini-pro-vision": "gemini-pro-vision-1.0" +} \ No newline at end of file diff --git a/src/cli.ts b/src/cli.ts index 60a1377..8801fe4 100644 --- a/src/cli.ts +++ b/src/cli.ts @@ -3,6 +3,7 @@ import { Command } from 'commander'; import app from './app'; import { version } from '../package.json' +import { loadModelMapping, getMappedModelName } from './config/modelMapping'; // 扩展全局对象类型 declare global { var verboseLogging: boolean; @@ -17,6 +18,7 @@ program .option('-p, --port ', 'Server port', '3000') .option('-H, --host
', 'Server host address', '0.0.0.0') .option('-v, --verbose', 'Enable request logging to console', false) + .option('-c, --config ', 'Path to model mapping config file', './model-mapping.json') .parse(); const options = program.opts(); @@ -27,6 +29,9 @@ const HOST = options.host || '0.0.0.0'; // 设置全局变量控制日志输出 global.verboseLogging = options.verbose; +// Load model mapping configuration +loadModelMapping(options.config); + app.listen(PORT, HOST, () => { console.log(`🚀 Mock OpenAI API server started successfully!`); console.log(`📍 Server address: http://${HOST}:${PORT}`); @@ -34,6 +39,7 @@ app.listen(PORT, HOST, () => { console.log(` • Port: ${PORT}`); console.log(` • Host: ${HOST}`); console.log(` • Verbose logging: ${options.verbose ? 'ENABLED' : 'DISABLED'}`); + console.log(` • Config file: ${options.config}`); console.log(` • Version: ${version}`); console.log(`📖 API Documentation:`); console.log(` • GET /health - Health check`); @@ -47,22 +53,22 @@ app.listen(PORT, HOST, () => { console.log(` • POST /v1beta/models/{model}:streamGenerateContent - Gemini streaming generation`); console.log(`\n✨ Available models:`); console.log(` OpenAI Compatible:`); - console.log(` - mock-gpt-thinking: Model supporting thought process`); - console.log(` - gpt-4-mock: Model supporting function calls`); - console.log(` - mock-gpt-markdown: Model outputting standard Markdown`); - console.log(` - gpt-4o-image: Model specifically for image generation`); + console.log(` - ${getMappedModelName('mock-gpt-thinking')}: Model supporting thought process`); + console.log(` - ${getMappedModelName('gpt-4-mock')}: Model supporting function calls`); + console.log(` - ${getMappedModelName('mock-gpt-markdown')}: Model outputting standard Markdown`); + console.log(` - ${getMappedModelName('gpt-4o-image')}: Model specifically for image generation`); console.log(` Anthropic Compatible:`); - console.log(` - mock-claude-markdown: Claude markdown sample model`); + console.log(` - ${getMappedModelName('mock-claude-markdown')}: Claude markdown sample model`); console.log(` Gemini Compatible:`); - console.log(` - gemini-1.5-pro: Advanced multimodal AI model`); - console.log(` - gemini-1.5-flash: Fast and efficient model`); - console.log(` - gemini-pro: Versatile model for various tasks`); - console.log(` - gemini-pro-vision: Multimodal model for text and images`); + console.log(` - ${getMappedModelName('gemini-1.5-pro')}: Advanced multimodal AI model`); + console.log(` - ${getMappedModelName('gemini-1.5-flash')}: Fast and efficient model`); + console.log(` - ${getMappedModelName('gemini-pro')}: Versatile model for various tasks`); + console.log(` - ${getMappedModelName('gemini-pro-vision')}: Multimodal model for text and images`); console.log(`\n🔗 Usage example:`); console.log(` curl -X POST http://localhost:${PORT}/v1/chat/completions \\`); console.log(` -H "Content-Type: application/json" \\`); console.log(` -d '{`); - console.log(` "model": "gpt-4-mock",`); + console.log(` "model": "${getMappedModelName('gpt-4-mock')}",`); console.log(` "messages": [{"role": "user", "content": "Hello"}]`); console.log(` }'`); console.log(`\n💡 CLI Options:`); @@ -70,4 +76,5 @@ app.listen(PORT, HOST, () => { console.log(` • Use -v or --verbose to enable request logging`); console.log(` • Use -p to specify custom port`); console.log(` • Use -H to specify custom host address`); + console.log(` • Use -c to specify custom config file`); }); diff --git a/src/config/modelMapping.ts b/src/config/modelMapping.ts new file mode 100644 index 0000000..e5a478f --- /dev/null +++ b/src/config/modelMapping.ts @@ -0,0 +1,60 @@ +import fs from 'fs'; +import path from 'path'; + +interface ModelMappingConfig { + [originalModel: string]: string; +} + +let modelMapping: ModelMappingConfig = {}; +let configLoaded = false; + +const CONFIG_FILE_PATH = process.env.MODEL_MAPPING_CONFIG || path.join(process.cwd(), 'model-mapping.json'); + +export function loadModelMapping(configPath?: string): void { + if (configLoaded) { + return; + } + + const configFilePath = configPath || CONFIG_FILE_PATH; + + try { + if (fs.existsSync(configFilePath)) { + const configContent = fs.readFileSync(configFilePath, 'utf-8'); + const config = JSON.parse(configContent); + + if (typeof config === 'object' && config !== null) { + modelMapping = config; + console.log(`✅ Loaded model mapping configuration from ${configFilePath}`); + console.log(`📋 Model mappings: ${Object.keys(modelMapping).length} mappings configured`); + + if (Object.keys(modelMapping).length > 0) { + Object.entries(modelMapping).forEach(([original, mapped]) => { + console.log(` • ${original} → ${mapped}`); + }); + } + } + } else { + console.log(`ℹ️ No model mapping configuration found at ${configFilePath}`); + } + } catch (error) { + console.error(`❌ Failed to load model mapping configuration: ${error}`); + } + + configLoaded = true; +} + +export function getMappedModelName(originalModel: string): string { + return modelMapping[originalModel] || originalModel; +} + +export function getOriginalModelName(mappedModel: string): string | undefined { + return Object.keys(modelMapping).find(key => modelMapping[key] === mappedModel); +} + +export function getAllMappings(): ModelMappingConfig { + return { ...modelMapping }; +} + +export function hasMappings(): boolean { + return Object.keys(modelMapping).length > 0; +} \ No newline at end of file diff --git a/src/index.ts b/src/index.ts index 7dbc58b..e375d79 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,6 +1,7 @@ #!/usr/bin/env node import app from './app'; +import { loadModelMapping, getMappedModelName } from './config/modelMapping'; const PORT = process.env.PORT || 3000; const HOST = process.env.HOST || '0.0.0.0'; @@ -8,6 +9,9 @@ const HOST = process.env.HOST || '0.0.0.0'; // Enable verbose logging by default in development or when VERBOSE is set global.verboseLogging = process.env.NODE_ENV !== 'production' || process.env.VERBOSE === 'true'; +// Load model mapping configuration +loadModelMapping(); + app.listen(PORT, () => { console.log(`🚀 Mock OpenAI API server started successfully!`); console.log(`📍 Server address: http://${HOST}:${PORT}`); @@ -24,22 +28,22 @@ app.listen(PORT, () => { console.log(` • POST /v1beta/models/{model}:streamGenerateContent - Gemini streaming generation`); console.log(`\n✨ Available models:`); console.log(` OpenAI Compatible:`); - console.log(` - mock-gpt-thinking: Model supporting thought process`); - console.log(` - gpt-4-mock: Model supporting function calls with tool calls format`); - console.log(` - mock-gpt-markdown: Model outputting standard Markdown`); - console.log(` - gpt-4o-image: Model specifically for image generation`); + console.log(` - ${getMappedModelName('mock-gpt-thinking')}: Model supporting thought process`); + console.log(` - ${getMappedModelName('gpt-4-mock')}: Model supporting function calls with tool calls format`); + console.log(` - ${getMappedModelName('mock-gpt-markdown')}: Model outputting standard Markdown`); + console.log(` - ${getMappedModelName('gpt-4o-image')}: Model specifically for image generation`); console.log(` Anthropic Compatible:`); - console.log(` - mock-claude-markdown: Claude markdown sample model`); + console.log(` - ${getMappedModelName('mock-claude-markdown')}: Claude markdown sample model`); console.log(` Gemini Compatible:`); - console.log(` - gemini-1.5-pro: Advanced multimodal AI model`); - console.log(` - gemini-1.5-flash: Fast and efficient model`); - console.log(` - gemini-pro: Versatile model for various tasks`); - console.log(` - gemini-pro-vision: Multimodal model for text and images`); + console.log(` - ${getMappedModelName('gemini-1.5-pro')}: Advanced multimodal AI model`); + console.log(` - ${getMappedModelName('gemini-1.5-flash')}: Fast and efficient model`); + console.log(` - ${getMappedModelName('gemini-pro')}: Versatile model for various tasks`); + console.log(` - ${getMappedModelName('gemini-pro-vision')}: Multimodal model for text and images`); console.log(`\n🔗 Usage example:`); console.log(` curl -X POST http://localhost:${PORT}/v1/chat/completions \\`); console.log(` -H "Content-Type: application/json" \\`); console.log(` -d '{`); - console.log(` "model": "gpt-4-mock",`); + console.log(` "model": "${getMappedModelName('gpt-4-mock')}",`); console.log(` "messages": [{"role": "user", "content": "Hello"}]`); console.log(` }'`); console.log(`\n💡 Use CLI for more options: npm run build && npx mock-openai-api --help`); diff --git a/src/services/openaiService.ts b/src/services/openaiService.ts index 6e0fd44..1a7a7c9 100644 --- a/src/services/openaiService.ts +++ b/src/services/openaiService.ts @@ -18,13 +18,14 @@ import { randomChoice, formatErrorResponse, } from "../utils/helpers"; +import { getMappedModelName } from "../config/modelMapping"; import { ImgData } from "../data/base64Img"; /** * Get model list */ export function getModels(): ModelsResponse { const models: Model[] = mockModels.map((mockModel) => ({ - id: mockModel.id, + id: getMappedModelName(mockModel.id), object: "model", created: getCurrentTimestamp(), owned_by: "mock-openai", diff --git a/src/utils/anthropicHelpers.ts b/src/utils/anthropicHelpers.ts index 5e0e437..c4ae037 100644 --- a/src/utils/anthropicHelpers.ts +++ b/src/utils/anthropicHelpers.ts @@ -1,6 +1,7 @@ import { MockModel } from "../types/index"; import { anthropicMockModels } from "../data/anthropicMockData"; import { ErrorResponse, StreamingEvent } from "../types/anthropic"; +import { getMappedModelName } from "../config/modelMapping"; /** * Get current timestamp @@ -27,7 +28,8 @@ export function calculateTokens(text: string): number { * Find model by ID */ export function findModelById(modelId: string): MockModel | undefined { - return anthropicMockModels.find(model => model.id === modelId); + const mappedModelId = getMappedModelName(modelId); + return anthropicMockModels.find(model => model.id === mappedModelId); } /** diff --git a/src/utils/geminiHelpers.ts b/src/utils/geminiHelpers.ts index 225a27f..2d851bb 100644 --- a/src/utils/geminiHelpers.ts +++ b/src/utils/geminiHelpers.ts @@ -1,4 +1,5 @@ import { geminiMockModels } from '../data/geminiMockData'; +import { getMappedModelName } from '../config/modelMapping'; /** * Get current timestamp @@ -29,7 +30,8 @@ export function generateModelName(): string { export function findGeminiModelById(modelId: string) { // Remove 'models/' prefix if present const cleanModelId = modelId.replace('models/', ''); - return geminiMockModels.find(model => model.id === cleanModelId); + const mappedModelId = getMappedModelName(cleanModelId); + return geminiMockModels.find(model => model.id === mappedModelId); } /** diff --git a/src/utils/helpers.ts b/src/utils/helpers.ts index f82e6b6..578a974 100644 --- a/src/utils/helpers.ts +++ b/src/utils/helpers.ts @@ -1,5 +1,6 @@ import { MockModel, MockTestCase } from '../types'; import { mockModels } from '../data/mockData'; +import { getMappedModelName, getOriginalModelName } from '../config/modelMapping'; /** * Generate unique chat completion ID @@ -26,7 +27,22 @@ export function getCurrentTimestamp(): number { * Find model by ID */ export function findModelById(modelId: string): MockModel | undefined { - return mockModels.find(model => model.id === modelId); + // First check if it's a direct match with original model ID + let foundModel = mockModels.find(model => model.id === modelId); + + if (foundModel) { + return foundModel; + } + + // If not found, check if it's a mapped model name, get the original ID + const originalModelId = getOriginalModelName(modelId); + if (originalModelId) { + return mockModels.find(model => model.id === originalModelId); + } + + // Finally, try mapping the input and finding the model + const mappedModelId = getMappedModelName(modelId); + return mockModels.find(model => model.id === mappedModelId); } /**