Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions common/consts/OpenAIConst.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ export type OpenAIMessageRole = typeof OPENAI_MESSAGE_ROLES[keyof typeof OPENAI_
export const OPENAI_MODEL = {
GPT_4_1_MINI: 'gpt-4.1-mini',
GPT_4_1: 'gpt-4.1',
GPT_5: 'gpt-5',
} as const;

export type OpenAIModel = typeof OPENAI_MODEL[keyof typeof OPENAI_MODEL];
39 changes: 30 additions & 9 deletions common/services/OpenAIService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,15 +31,8 @@ export default class OpenAIService implements OpenAIServiceType {
}

try {
const completion = await this.openaiClient.chat.completions.create({
model: options?.model || OPENAI_MODEL.GPT_4_1,
messages: messages.map(msg => ({
role: msg.role,
content: msg.content,
})),
max_completion_tokens: options?.maxTokens,
temperature: options?.temperature,
});
const completionParams = this.prepareCompletionParams(messages, options);
const completion = await this.openaiClient.chat.completions.create(completionParams);

const response = completion.choices?.[0]?.message?.content;

Expand All @@ -65,6 +58,34 @@ export default class OpenAIService implements OpenAIServiceType {
}
}

/**
* Prepares completion parameters with model-specific adjustments.
* @private
*/
private prepareCompletionParams(messages: OpenAIChatHistory, options?: OpenAIChatOptions) {
const model = options?.model || OPENAI_MODEL.GPT_4_1;

const baseParams = {
model,
messages: messages.map(msg => ({
role: msg.role,
content: msg.content,
})),
max_completion_tokens: options?.maxTokens,
temperature: options?.temperature,
};

// Handle GPT-5 specific parameters if needed
if (model === OPENAI_MODEL.GPT_5) {
// GPT-5 may have specific parameter requirements or limitations
// Currently, we use the same parameters as other models
// This can be extended in the future if GPT-5 requires special handling
return baseParams;
}

return baseParams;
}

/**
* Creates a new conversation with a system message.
* @param systemPrompt The system message to set the AI's behavior
Expand Down
18 changes: 17 additions & 1 deletion docs/OpenAIService.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,28 @@ conversation = openaiService.addAssistantResponse(conversation, newResponse);

```typescript
const response = await openaiService.chat(messages, {
model: 'gpt-4', // 使用するモデル(デフォルト: 'gpt-3.5-turbo')
model: 'gpt-5', // 使用するモデル(デフォルト: 'gpt-4.1')
maxTokens: 1000, // 最大トークン数
temperature: 0.7 // 応答のランダム性(0-2)
});
```

利用可能なモデル:
- `OPENAI_MODEL.GPT_4_1_MINI`: 'gpt-4.1-mini'
- `OPENAI_MODEL.GPT_4_1`: 'gpt-4.1'
- `OPENAI_MODEL.GPT_5`: 'gpt-5'

```typescript
import { OPENAI_MODEL } from '@common/consts/OpenAIConst';

// GPT-5を使用する例
const response = await openaiService.chat(messages, {
model: OPENAI_MODEL.GPT_5,
maxTokens: 1000,
temperature: 0.7
});
```

## 型定義

### OpenAIMessageType
Expand Down
27 changes: 27 additions & 0 deletions tests/OpenAIService.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,16 @@ describe('OpenAIService', () => {
expect(response).toBe('This is a mock response from OpenAI. (using gpt-4.1)');
});

it('should support GPT-5 model', async () => {
const messages: OpenAIChatHistory = [
{ role: OPENAI_MESSAGE_ROLES.USER, content: 'Random question' },
];

const response = await mockService.chat(messages, { model: OPENAI_MODEL.GPT_5 });

expect(response).toBe('This is a mock response from OpenAI. (using gpt-5)');
});

it('should allow custom default response', async () => {
mockService.setDefaultResponse('Custom mock response');

Expand Down Expand Up @@ -276,5 +286,22 @@ describe('OpenAIService', () => {
expect(typeof response).toBe('string');
expect(response.length).toBeGreaterThan(0);
}, 30000);

it('should handle GPT-5 model with real API', async () => {
const messages: OpenAIChatHistory = [
{ role: OPENAI_MESSAGE_ROLES.USER, content: 'Say "test" and nothing else.' },
];

const response = await service.chat(messages, {
model: OPENAI_MODEL.GPT_5,
maxTokens: 10,
temperature: 0
});

console.log('OpenAI API GPT-5 response:', response);

expect(typeof response).toBe('string');
expect(response.length).toBeGreaterThan(0);
}, 30000);
});
});