Skip to content

Commit

Permalink
refactor(openai_dart): Rename ChatCompletionFunction to FunctionObject (
Browse files Browse the repository at this point in the history
#206)

The following classes have been renamed to follow OpenAI naming:
- `ChatCompletionFunctionParameters` -> `FunctionParameters`
- `ChatCompletionFunction` -> `FunctionObject`
  • Loading branch information
davidmigloz committed Nov 9, 2023
1 parent dad60d2 commit 0f06df3
Show file tree
Hide file tree
Showing 15 changed files with 531 additions and 400 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ Future<void> _chatOpenAIJsonMode() async {
final prompt = PromptValue.chat([
ChatMessage.system(
"Extract the 'name' and 'origin' of any companies mentioned in the "
'following statement. Return a JSON list.',
'following statement. Return a JSON list.',
),
ChatMessage.human(
'Google was founded in the USA, while Deepmind was founded in the UK',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,15 +120,15 @@ extension _AIChatMessageFunctionCallMapper on AIChatMessageFunctionCall {
}

extension ChatFunctionListMapper on List<ChatFunction> {
List<ChatCompletionFunction> toChatCompletionFunctions() {
return map((final function) => function.toChatCompletionFunction())
List<FunctionObject> toFunctionObjects() {
return map((final function) => function.toFunctionObject())
.toList(growable: false);
}
}

extension _ChatFunctionMapper on ChatFunction {
ChatCompletionFunction toChatCompletionFunction() {
return ChatCompletionFunction(
FunctionObject toFunctionObject() {
return FunctionObject(
name: name,
description: description,
parameters: parameters ?? {},
Expand Down
4 changes: 2 additions & 2 deletions packages/langchain_openai/lib/src/chat_models/openai.dart
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ class ChatOpenAI extends BaseChatModel<ChatOpenAIOptions> {
final ChatOpenAIOptions? options,
}) async {
final messagesDtos = messages.toChatCompletionMessages();
final functionsDtos = options?.functions?.toChatCompletionFunctions();
final functionsDtos = options?.functions?.toFunctionObjects();
final functionCall = options?.functionCall?.toChatCompletionFunctionCall();
final resFormat = responseFormat?.toChatCompletionResponseFormat();

Expand Down Expand Up @@ -276,7 +276,7 @@ class ChatOpenAI extends BaseChatModel<ChatOpenAIOptions> {
final ChatOpenAIOptions? options,
}) {
final messagesDtos = input.toChatMessages().toChatCompletionMessages();
final functionsDtos = options?.functions?.toChatCompletionFunctions();
final functionsDtos = options?.functions?.toFunctionObjects();
final functionCall = options?.functionCall?.toChatCompletionFunctionCall();

return _client
Expand Down
125 changes: 119 additions & 6 deletions packages/openai_dart/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ Dart client for [OpenAI](https://platform.openai.com/docs/api-reference) API.

## Features

- Generated from the official OpenAI [OpenAPI specification](https://github.com/openai/openai-openapi) (low maintenance effort)
- Fully documented and tested
- Generated from the official OpenAI [OpenAPI specification](https://github.com/openai/openai-openapi)
- Fully type-safe, documented and tested
- Authentication with organization support
- Custom base URL and headers support (e.g. HTTP proxies)
- Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases)
Expand Down Expand Up @@ -105,10 +105,121 @@ await for (final res in stream) {
}
```

**Function calling:**
**JSON mode:**

```dart
const function = ChatCompletionFunction(
final res = await client.createChatCompletion(
request: CreateChatCompletionRequest(
model: ChatCompletionModel.enumeration(
ChatCompletionModels.gpt41106Preview,
),
messages: [
ChatCompletionMessage(
role: ChatCompletionMessageRole.system,
content:
'You are a helpful assistant. That extracts names from text '
'and returns them in a JSON array.',
),
ChatCompletionMessage(
role: ChatCompletionMessageRole.user,
content: 'John, Mary, and Peter.',
),
],
temperature: 0,
responseFormat: ChatCompletionResponseFormat(
type: ChatCompletionResponseFormatType.jsonObject,
),
),
);
// { "names": ["John", "Mary", "Peter"] }
```

**Tools:**

```dart
const function = FunctionObject(
name: 'get_current_weather',
description: 'Get the current weather in a given location',
parameters: {
'type': 'object',
'properties': {
'location': {
'type': 'string',
'description': 'The city and state, e.g. San Francisco, CA',
},
'unit': {
'type': 'string',
'description': 'The unit of temperature to return',
'enum': ['celsius', 'fahrenheit'],
},
},
'required': ['location'],
},
);
const tool = ChatCompletionTool(
type: ChatCompletionToolType.function,
function: function,
);
final res1 = await client.createChatCompletion(
request: CreateChatCompletionRequest(
model: const ChatCompletionModel.enumeration(
ChatCompletionModels.gpt35Turbo,
),
messages: [
ChatCompletionMessage(
role: ChatCompletionMessageRole.system,
content: 'You are a helpful assistant.',
),
ChatCompletionMessage(
role: ChatCompletionMessageRole.user,
content: 'What’s the weather like in Boston right now?',
),
],
tools: [tool],
toolChoice: ChatCompletionToolChoiceOption.chatCompletionNamedToolChoice(
ChatCompletionNamedToolChoice(
type: ChatCompletionNamedToolChoiceType.function,
function: ChatCompletionFunctionCallOption(name: function.name),
),
),
),
);
final toolCall = res1.choices.first.message.toolCalls!.first;
final functionCall = toolCall.function;
final arguments = json.decode(functionCall.arguments) as Map<String, dynamic>;
final functionResult = getCurrentWeather(arguments['location'], arguments['unit']);
final res2 = await client.createChatCompletion(
request: CreateChatCompletionRequest(
model: ChatCompletionModel.string('gpt-3.5-turbo'),
messages: [
ChatCompletionMessage(
role: ChatCompletionMessageRole.system,
content: 'You are a helpful assistant.',
),
ChatCompletionMessage(
role: ChatCompletionMessageRole.user,
content: 'What’s the weather like in Boston right now?',
),
ChatCompletionMessage(
role: ChatCompletionMessageRole.tool,
toolCallId: toolCall.id,
content: json.encode(functionResult),
),
],
tools: [tool],
),
);
final answer = res2.choices.first.message.content;
// The weather in Boston right now is sunny with a temperature of 22°C
```

**Function calling:** (deprecated in favor of tools)

```dart
const function = FunctionObject(
name: 'get_current_weather',
description: 'Get the current weather in a given location',
parameters: {
Expand Down Expand Up @@ -322,9 +433,11 @@ Given a prompt and/or an input image, the model will generate a new image.
```dart
final res = await client.createImage(
request: CreateImageRequest(
model: CreateImageRequestModel.enumeration(ImageModels.dallE3),
prompt: 'A cute baby sea otter',
n: 2,
size: ImageSize.v256x256,
quality: ImageQuality.hd,
size: ImageSize.v1024x1792,
style: ImageStyle.natural,
),
);
print(res.data.first.url);
Expand Down
4 changes: 4 additions & 0 deletions packages/openai_dart/example/openai_dart_example.dart
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,11 @@ Future<void> _fineTuning(final OpenAIClient client) async {
Future<void> _images(final OpenAIClient client) async {
final res = await client.createImage(
request: const CreateImageRequest(
model: CreateImageRequestModel.enumeration(ImageModels.dallE3),
prompt: 'A cute baby sea otter',
quality: ImageQuality.hd,
size: ImageSize.v1024x1792,
style: ImageStyle.natural,
),
);
print(res.data.first.url);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class ChatCompletionTool with _$ChatCompletionTool {
required ChatCompletionToolType type,

/// A function that the model may call.
required ChatCompletionFunction function,
required FunctionObject function,
}) = _ChatCompletionTool;

/// Object construction from a JSON representation
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,11 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest {
@Default(0.0)
double? presencePenalty,

/// An object specifying the format that the model must output. Used to enable JSON mode.
/// An object specifying the format that the model must output.
///
/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
///
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in increased latency and appearance of a "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
@JsonKey(name: 'response_format', includeIfNull: false)
ChatCompletionResponseFormat? responseFormat,

Expand Down Expand Up @@ -107,7 +111,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest {
/// Deprecated in favor of `tools`.
///
/// A list of functions the model may generate JSON inputs for.
@JsonKey(includeIfNull: false) List<ChatCompletionFunction>? functions,
@JsonKey(includeIfNull: false) List<FunctionObject>? functions,
}) = _CreateChatCompletionRequest;

/// Object construction from a JSON representation
Expand Down Expand Up @@ -315,17 +319,17 @@ class _ChatCompletionModelConverter
// CLASS: ChatCompletionResponseFormat
// ==========================================

/// An object specifying the format that the model must output. Used to enable JSON mode.
/// An object specifying the format that the model must output.
///
/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
///
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in increased latency and appearance of a "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
@freezed
class ChatCompletionResponseFormat with _$ChatCompletionResponseFormat {
const ChatCompletionResponseFormat._();

/// Factory constructor for ChatCompletionResponseFormat
const factory ChatCompletionResponseFormat({
/// Setting to `json_object` enables JSON mode. This guarantees that the message the model generates is valid JSON.
///
/// Note that your system prompt must still instruct the model to produce JSON, and to help ensure you don't forget, the API will throw an error if the string `JSON` does not appear in your system message. Also note that the message content may be partial (i.e. cut off) if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
///
/// Must be one of `text` or `json_object`.
@Default(ChatCompletionResponseFormatType.text)
ChatCompletionResponseFormatType type,
Expand Down Expand Up @@ -579,10 +583,6 @@ class _ChatCompletionFunctionCallConverter
// ENUM: ChatCompletionResponseFormatType
// ==========================================

/// Setting to `json_object` enables JSON mode. This guarantees that the message the model generates is valid JSON.
///
/// Note that your system prompt must still instruct the model to produce JSON, and to help ensure you don't forget, the API will throw an error if the string `JSON` does not appear in your system message. Also note that the message content may be partial (i.e. cut off) if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
///
/// Must be one of `text` or `json_object`.
enum ChatCompletionResponseFormatType {
@JsonValue('text')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,16 @@
part of open_a_i_schema;

// ==========================================
// CLASS: ChatCompletionFunction
// CLASS: FunctionObject
// ==========================================

/// A function that the model may call.
@freezed
class ChatCompletionFunction with _$ChatCompletionFunction {
const ChatCompletionFunction._();
class FunctionObject with _$FunctionObject {
const FunctionObject._();

/// Factory constructor for ChatCompletionFunction
const factory ChatCompletionFunction({
/// Factory constructor for FunctionObject
const factory FunctionObject({
/// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
required String name,

Expand All @@ -24,12 +24,12 @@ class ChatCompletionFunction with _$ChatCompletionFunction {
/// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
///
/// To describe a function that accepts no parameters, provide the value `{"type": "object", "properties": {}}`.
required ChatCompletionFunctionParameters parameters,
}) = _ChatCompletionFunction;
required FunctionParameters parameters,
}) = _FunctionObject;

/// Object construction from a JSON representation
factory ChatCompletionFunction.fromJson(Map<String, dynamic> json) =>
_$ChatCompletionFunctionFromJson(json);
factory FunctionObject.fromJson(Map<String, dynamic> json) =>
_$FunctionObjectFromJson(json);

/// List of all property names of schema
static const List<String> propertyNames = [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@
part of open_a_i_schema;

// ==========================================
// TYPE: ChatCompletionFunctionParameters
// TYPE: FunctionParameters
// ==========================================

/// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
///
/// To describe a function that accepts no parameters, provide the value `{"type": "object", "properties": {}}`.
typedef ChatCompletionFunctionParameters = Map<String, dynamic>;
typedef FunctionParameters = Map<String, dynamic>;
4 changes: 2 additions & 2 deletions packages/openai_dart/lib/src/generated/schema/schema.dart
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ part 'create_chat_completion_request.dart';
part 'chat_completion_message.dart';
part 'chat_completion_message_role.dart';
part 'chat_completion_message_function_call.dart';
part 'chat_completion_function.dart';
part 'chat_completion_function_parameters.dart';
part 'chat_completion_function_call_option.dart';
part 'function_object.dart';
part 'function_parameters.dart';
part 'chat_completion_tool.dart';
part 'chat_completion_named_tool_choice.dart';
part 'chat_completion_message_tool_calls.dart';
Expand Down

0 comments on commit 0f06df3

Please sign in to comment.