Skip to content

Commit

Permalink
feat!: Update ChatOpenAI default model to gpt-4o-mini (#507)
Browse files Browse the repository at this point in the history
  • Loading branch information
davidmigloz committed Jul 26, 2024
1 parent b1134bf commit c7b8ce9
Show file tree
Hide file tree
Showing 27 changed files with 57 additions and 159 deletions.
6 changes: 3 additions & 3 deletions docs/expression_language/cookbook/prompt_llm_parser.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ print(res);
// },
// finishReason: FinishReason.stop,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714835666,
// system_fingerprint: fp_3b956da36b
// },
Expand Down Expand Up @@ -74,7 +74,7 @@ print(res);
// },
// finishReason: FinishReason.stop,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714835734,
// system_fingerprint: fp_a450710239
// },
Expand Down Expand Up @@ -144,7 +144,7 @@ print(res);
// },
// finishReason: FinishReason.stop,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714835806,
// system_fingerprint: fp_3b956da36b
// },
Expand Down
2 changes: 1 addition & 1 deletion docs/expression_language/get_started.md
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ print(res2);
// },
// finishReason: FinishReason.stop,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714327251,
// system_fingerprint: fp_3b956da36b
// },
Expand Down
2 changes: 1 addition & 1 deletion docs/expression_language/interface.md
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ final res = await chain.batch(
{'topic': 'cats'},
],
options: [
const ChatOpenAIOptions(model: 'gpt-3.5-turbo', temperature: 0.5),
const ChatOpenAIOptions(model: 'gpt-4o-mini', temperature: 0.5),
const ChatOpenAIOptions(model: 'gpt-4', temperature: 0.7),
],
);
Expand Down
2 changes: 1 addition & 1 deletion docs/expression_language/primitives/binding.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ final chain = Runnable.fromMap({
chatModel.bind(ChatOpenAIOptions(model: 'gpt-4-turbo')) |
outputParser,
'q2': prompt2 |
chatModel.bind(ChatOpenAIOptions(model: 'gpt-3.5-turbo')) |
chatModel.bind(ChatOpenAIOptions(model: 'gpt-4o-mini')) |
outputParser,
});
Expand Down
8 changes: 4 additions & 4 deletions docs/expression_language/primitives/function.md
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ await chain.invoke('x raised to the third plus seven equals 12');
// },
// finishReason: FinishReason.stop,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714463309,
// system_fingerprint: fp_3b956da36b
// },
Expand Down Expand Up @@ -122,7 +122,7 @@ chain.stream('x raised to the third plus seven equals 12').listen((_){});
// },
// finishReason: FinishReason.unspecified,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714463766,
// system_fingerprint: fp_3b956da36b
// },
Expand All @@ -141,7 +141,7 @@ chain.stream('x raised to the third plus seven equals 12').listen((_){});
// },
// finishReason: FinishReason.unspecified,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714463766,
// system_fingerprint: fp_3b956da36b
// },
Expand All @@ -160,7 +160,7 @@ chain.stream('x raised to the third plus seven equals 12').listen((_){});
// },
// finishReason: FinishReason.unspecified,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714463766,
// system_fingerprint: fp_3b956da36b
// },
Expand Down
4 changes: 2 additions & 2 deletions docs/expression_language/streaming.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ print(chunks.first);
// },
// finishReason: FinishReason.unspecified,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714143945,
// system_fingerprint: fp_3b956da36b
// },
Expand All @@ -71,7 +71,7 @@ print(result);
// },
// finishReason: FinishReason.unspecified,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714143945,
// system_fingerprint: fp_3b956da36b
// },
Expand Down
2 changes: 1 addition & 1 deletion docs/modules/agents/agent_types/agent_types.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ response to the user. Here are the agents available in LangChain.

### OpenAI Functions

Certain OpenAI models (like gpt-3.5-turbo-0613 and gpt-4-0613) have been
Certain OpenAI models (like `gpt-3.5-turbo` and `gpt-4`) have been
explicitly fine-tuned to detect when a function should to be called and respond
with the inputs that should be passed to the function. The OpenAI Functions
Agent is designed to work with these models.
Expand Down
2 changes: 1 addition & 1 deletion docs/modules/model_io/models/chat_models/chat_models.md
Original file line number Diff line number Diff line change
Expand Up @@ -93,5 +93,5 @@ print(chatRes1.generations);
print(chatRes1.usage?.totalTokens);
// -> 36
print(chatRes1.modelOutput);
// -> {id: chatcmpl-7QHTjpTCELFuGbxRaazFqvYtepXOc, created: 2023-06-11 17:41:11.000, model: gpt-3.5-turbo}
// -> {id: chatcmpl-7QHTjpTCELFuGbxRaazFqvYtepXOc, created: 2023-06-11 17:41:11.000, model: gpt-4o-mini}
```
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ Future<void> _promptTemplateLLM() async {
// },
// finishReason: FinishReason.stop,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714835666,
// system_fingerprint: fp_3b956da36b
// },
Expand Down Expand Up @@ -65,7 +65,7 @@ Future<void> _attachingStopSequences() async {
// },
// finishReason: FinishReason.stop,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714835734,
// system_fingerprint: fp_a450710239
// },
Expand Down Expand Up @@ -133,7 +133,7 @@ Future<void> _attachingToolCallInformation() async {
// },
// finishReason: FinishReason.stop,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714835806,
// system_fingerprint: fp_3b956da36b
// },
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ Future<void> _languageModels() async {
// },
// finishReason: FinishReason.unspecified,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714143945,
// system_fingerprint: fp_3b956da36b
// },
Expand All @@ -49,7 +49,7 @@ Future<void> _languageModels() async {
// },
// finishReason: FinishReason.unspecified,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714143945,
// system_fingerprint: fp_3b956da36b
// },
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ Future<void> _promptModelOutputParser() async {
// },
// finishReason: FinishReason.stop,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714327251,
// system_fingerprint: fp_3b956da36b
// },
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ Future<void> _runnableInterfaceBatchOptions() async {
{'topic': 'cats'},
],
options: [
const ChatOpenAIOptions(model: 'gpt-3.5-turbo', temperature: 0.5),
const ChatOpenAIOptions(model: 'gpt-4o-mini', temperature: 0.5),
const ChatOpenAIOptions(model: 'gpt-4', temperature: 0.7),
],
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ Future<void> _differentModels() async {
chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4-turbo')) |
outputParser,
'q2': prompt2 |
chatModel.bind(const ChatOpenAIOptions(model: 'gpt-3.5-turbo')) |
chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4o-mini')) |
outputParser,
});
final res = await chain.invoke({'name': 'David'});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ Future<void> _function() async {
// },
// finishReason: FinishReason.stop,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714463309,
// system_fingerprint: fp_3b956da36b
// },
Expand Down Expand Up @@ -116,7 +116,7 @@ Future<void> _function() async {
// },
// finishReason: FinishReason.unspecified,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714463766,
// system_fingerprint: fp_3b956da36b
// },
Expand All @@ -135,7 +135,7 @@ Future<void> _function() async {
// },
// finishReason: FinishReason.unspecified,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714463766,
// system_fingerprint: fp_3b956da36b
// },
Expand All @@ -154,7 +154,7 @@ Future<void> _function() async {
// },
// finishReason: FinishReason.unspecified,
// metadata: {
// model: gpt-3.5-turbo-0125,
// model: gpt-4o-mini,
// created: 1714463766,
// system_fingerprint: fp_3b956da36b
// },
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import 'qa_with_structure.dart';
/// ```dart
/// final llm = ChatOpenAI(
/// apiKey: openaiApiKey,
/// model: 'gpt-3.5-turbo-0613',
/// model: 'gpt-4o-mini',
/// temperature: 0,
/// );
/// final qaChain = OpenAIQAWithSourcesChain(llm: llm);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ import 'types.dart';
/// final prompt2 = PromptTemplate.fromTemplate('How old are you {name}?');
/// final chain = Runnable.fromMap({
/// 'q1': prompt1 | chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4')) | outputParser,
/// 'q2': prompt2| chatModel.bind(const ChatOpenAIOptions(model: 'gpt-3.5-turbo')) | outputParser,
/// 'q2': prompt2| chatModel.bind(const ChatOpenAIOptions(model: 'gpt-4o-mini')) | outputParser,
/// });
/// final res = await chain.invoke({'name': 'David'});
/// ```
Expand Down Expand Up @@ -239,7 +239,7 @@ class ChatOpenAI extends BaseChatModel<ChatOpenAIOptions> {
String get modelType => 'openai-chat';

/// The default model to use unless another is specified.
static const defaultModel = 'gpt-3.5-turbo';
static const defaultModel = 'gpt-4o-mini';

@override
Future<ChatResult> invoke(
Expand Down Expand Up @@ -348,7 +348,6 @@ class ChatOpenAI extends BaseChatModel<ChatOpenAIOptions> {
final int tokensPerName;

switch (model) {
case 'gpt-3.5-turbo-0613':
case 'gpt-3.5-turbo-16k-0613':
case 'gpt-4-0314':
case 'gpt-4-32k-0314':
Expand All @@ -362,8 +361,8 @@ class ChatOpenAI extends BaseChatModel<ChatOpenAIOptions> {
// If there's a name, the role is omitted
tokensPerName = -1;
default:
if (model.startsWith('gpt-3.5-turbo') || model.startsWith('gpt-4')) {
// Returning num tokens assuming gpt-3.5-turbo-0613
if (model.startsWith('gpt-4o-mini') || model.startsWith('gpt-4')) {
// Returning num tokens assuming gpt-4
tokensPerMessage = 3;
tokensPerName = 1;
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ Question: {question}
final chatModel = ChatOpenAI(
apiKey: openaiApiKey,
defaultOptions: const ChatOpenAIOptions(
model: 'gpt-3.5-turbo',
model: 'gpt-4o-mini',
temperature: 0,
),
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import 'package:test/test.dart';
void main() {
group('ChatOpenAI tests', () {
final openaiApiKey = Platform.environment['OPENAI_API_KEY'];
const defaultModel = 'gpt-3.5-turbo';
const defaultModel = 'gpt-4o-mini';

test('Test ChatOpenAI parameters', () async {
final chat = ChatOpenAI(
Expand Down Expand Up @@ -208,7 +208,6 @@ void main() {

test('Test countTokens messages', () async {
final models = [
'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-16k-0613',
'gpt-4-0314',
'gpt-4-0613',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ void main() {

test('Test invoke OpenRouter API with different models', () async {
final models = [
'gpt-3.5-turbo',
'gpt-4o-mini',
'gpt-4',
'google/gemini-pro',
'anthropic/claude-2',
Expand Down Expand Up @@ -57,7 +57,7 @@ void main() {

test('Test stream OpenRouter API with different models', () async {
final models = [
'gpt-3.5-turbo',
'gpt-4o-mini',
'gpt-4',
// 'google/gemini-pro', // Not supported
'anthropic/claude-2',
Expand Down Expand Up @@ -88,7 +88,7 @@ void main() {

test('Test countTokens', () async {
final models = [
'gpt-3.5-turbo',
'gpt-4o-mini',
'gpt-4',
'google/gemini-pro',
'anthropic/claude-2',
Expand Down
8 changes: 4 additions & 4 deletions packages/openai_dart/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ const tool = ChatCompletionTool(
final res1 = await client.createChatCompletion(
request: CreateChatCompletionRequest(
model: const ChatCompletionModel.model(
ChatCompletionModels.gpt35Turbo,
ChatCompletionModels.gpt4oMini,
),
messages: [
ChatCompletionMessage.system(
Expand Down Expand Up @@ -333,7 +333,7 @@ const function = FunctionObject(
final res1 = await client.createChatCompletion(
request: CreateChatCompletionRequest(
model: ChatCompletionModel.modelId('gpt-3.5-turbo'),
model: ChatCompletionModel.modelId('gpt-4o-mini'),
messages: [
ChatCompletionMessage.system(
content: 'You are a helpful assistant.',
Expand All @@ -355,7 +355,7 @@ final functionResult = getCurrentWeather(arguments['location'], arguments['unit'
final res2 = await client.createChatCompletion(
request: CreateChatCompletionRequest(
model: ChatCompletionModel.modelId('gpt-3.5-turbo'),
model: ChatCompletionModel.modelId('gpt-4o-mini'),
messages: [
ChatCompletionMessage.system(
content: 'You are a helpful assistant.',
Expand Down Expand Up @@ -480,7 +480,7 @@ Related guide: [Fine-tune models](https://platform.openai.com/docs/guides/fine-t

```dart
const request = CreateFineTuningJobRequest(
model: FineTuningModel.modelId('gpt-3.5-turbo'),
model: FineTuningModel.modelId('gpt-4o-mini'),
trainingFile: 'file-abc123',
validationFile: 'file-abc123',
hyperparameters: FineTuningJobHyperparameters(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,6 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest {
/// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
/// during tool use.
@JsonKey(name: 'parallel_tool_calls', includeIfNull: false)
@Default(true)
bool? parallelToolCalls,

/// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ class CreateRunRequest with _$CreateRunRequest {
/// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
/// during tool use.
@JsonKey(name: 'parallel_tool_calls', includeIfNull: false)
@Default(true)
bool? parallelToolCalls,

/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ class CreateThreadAndRunRequest with _$CreateThreadAndRunRequest {
/// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
/// during tool use.
@JsonKey(name: 'parallel_tool_calls', includeIfNull: false)
@Default(true)
bool? parallelToolCalls,

/// Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Expand Down
Loading

0 comments on commit c7b8ce9

Please sign in to comment.