Skip to content

Commit

Permalink
Revamp OpenAI Client (#697)
Browse files Browse the repository at this point in the history
* Working model gen, not correct package

* Ktor API done

* Clean-up, and add mapping from java.net.URI to String

* Generate OpenAPI, Operation interfaces, and Remove Api Suffix

* Clean-up generation

* Remove redundant fixes

* Add support for streaming

* Remove redundant new lines in data class, fix JsName edge-case

* Build similar smart constructor

* Put back project

* Clean-up

* Few changes to progress with compilation

* Xef builds with new generated client

* fixes from main

* Fixed assistant BETA headers

* Update enum

* Update enum as discussed with @raulraja

* Value class serialization for running evaluator/TestExample

* Add debugging tasks, and README.md

* Small fixes, WIP deserialize enum

* Fix paths

* Apply spotless formatting

* Fix vision examples

* Update oneOf KSerializer

* Apply spotless formatting

* Add beta headers to assistant calls

* Fix (query) parameter generation in api.mustache

* Update oneOf names to CaseInnerType

* Automatically add stream flag to JSON

* Cleanup

* Move generated source to build folder

* Report all errors on oneOf

* Make sure client is generated, when module is required as dependency

* Attempt to fix build in CI for new OpenAI client (#709)

* Attempt to fix build in CI

* Attempt to fix build in CI

* Attempt to fix build in CI

* Attempt to fix build in CI

* Attempt to fix build in CI

* Attempt to fix build in CI

* Attempt to fix build in CI

* Attempt to fix build/release in CI

---------

Co-authored-by: raulraja <raulraja@gmail.com>
  • Loading branch information
nomisRev and raulraja committed Apr 1, 2024
1 parent e8a62e9 commit dc55080
Show file tree
Hide file tree
Showing 354 changed files with 10,965 additions and 14,867 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/create-version-tag.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ jobs:
- name: Build
uses: gradle/gradle-build-action@v3
with:
arguments: build
arguments: :xef-openai-client-generator:openaiClientGenerate build

- name: Read version
id: read-version
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/publish-development-version.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ jobs:
- name: Assemble
uses: gradle/gradle-build-action@v3
with:
arguments: assemble
arguments: :xef-openai-client-generator:openaiClientGenerate assemble

- name: Upload reports
if: failure()
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:
- name: Assemble
uses: gradle/gradle-build-action@v3
with:
arguments: assemble
arguments: :xef-openai-client-generator:openaiClientGenerate assemble

- name: Upload reports
if: failure()
Expand Down
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -148,4 +148,6 @@ kotlin-js-store/

.env

*.bin
*.bin
model.log
operations.log
6 changes: 6 additions & 0 deletions build.gradle.kts
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
@file:Suppress("DSL_SCOPE_VIOLATION")

import org.jetbrains.kotlin.gradle.tasks.KotlinCompile


plugins {
base
alias(libs.plugins.kotlin.multiplatform) apply false
Expand All @@ -25,10 +28,12 @@ fun Project.configureBuildAndTestTask(taskName: String, moduleType: ModulePlatfo
doLast {
when (moduleType) {
ModulePlatformType.SINGLE -> {
project.exec { commandLine(gradleCommand, ":xef-openai-client-generator:openaiClientGenerate") }
val excludedModules = includeOrNotModulesToCommand(multiPlatformModules, platform, false)
project.exec { commandLine(gradleCommand, "build", *excludedModules) }
}
ModulePlatformType.MULTI -> {
project.exec { commandLine(gradleCommand, ":xef-openai-client-generator:openaiClientGenerate") }
val includedModules = includeOrNotModulesToCommand(multiPlatformModules, platform, true)
project.exec { commandLine(gradleCommand, *includedModules) }
}
Expand Down Expand Up @@ -59,3 +64,4 @@ fun getGradleCommand(platform: String): String {

configureBuildAndTestTask("buildAndTestMultip", ModulePlatformType.MULTI)
configureBuildAndTestTask("buildAndTestSinglep", ModulePlatformType.SINGLE)

2 changes: 2 additions & 0 deletions core/build.gradle.kts
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,8 @@ kotlin {
implementation(libs.bundles.ktor.client)
implementation(libs.klogging)
implementation(libs.uuid)
implementation(libs.ktor.client.logging)
implementation(libs.klogging)
}
}
val commonTest by getting {
Expand Down
50 changes: 26 additions & 24 deletions core/src/commonMain/kotlin/com/xebia/functional/xef/AI.kt
Original file line number Diff line number Diff line change
@@ -1,13 +1,10 @@
package com.xebia.functional.xef

import ai.xef.openai.CustomModel
import ai.xef.openai.OpenAIModel
import com.xebia.functional.openai.apis.ChatApi
import com.xebia.functional.openai.apis.ImagesApi
import com.xebia.functional.openai.models.CreateChatCompletionRequestModel
import com.xebia.functional.openai.generated.api.Chat
import com.xebia.functional.openai.generated.api.Images
import com.xebia.functional.openai.generated.model.CreateChatCompletionRequestModel
import com.xebia.functional.xef.conversation.AiDsl
import com.xebia.functional.xef.conversation.Conversation
import com.xebia.functional.xef.llm.fromEnvironment
import com.xebia.functional.xef.prompt.Prompt
import kotlin.coroutines.cancellation.CancellationException
import kotlin.reflect.KClass
Expand All @@ -29,14 +26,14 @@ sealed interface AI {

fun <A : Any> chat(
target: KType,
model: OpenAIModel<CreateChatCompletionRequestModel>,
api: ChatApi,
model: CreateChatCompletionRequestModel,
api: Chat,
conversation: Conversation,
enumSerializer: ((case: String) -> A)?,
caseSerializers: List<KSerializer<A>>,
serializer: () -> KSerializer<A>,
): Chat<A> =
Chat(
): DefaultAI<A> =
DefaultAI(
target = target,
model = model,
api = api,
Expand All @@ -47,15 +44,15 @@ sealed interface AI {
)

fun images(
api: ImagesApi = fromEnvironment(::ImagesApi),
chatApi: ChatApi = fromEnvironment(::ChatApi)
): Images = Images(api, chatApi)
config: Config = Config(),
): Images = OpenAI(config).images

@PublishedApi
internal suspend inline fun <reified A : Any> invokeEnum(
prompt: Prompt<CreateChatCompletionRequestModel>,
prompt: Prompt,
target: KType = typeOf<A>(),
api: ChatApi = fromEnvironment(::ChatApi),
config: Config = Config(),
api: Chat = OpenAI(config).chat,
conversation: Conversation = Conversation()
): A =
chat(
Expand Down Expand Up @@ -91,14 +88,16 @@ sealed interface AI {
context: String,
model: CreateChatCompletionRequestModel = CreateChatCompletionRequestModel.gpt_4_1106_preview,
target: KType = typeOf<E>(),
api: ChatApi = fromEnvironment(::ChatApi),
config: Config = Config(),
api: Chat = OpenAI(config).chat,
conversation: Conversation = Conversation()
): E where E : PromptClassifier, E : Enum<E> {
val value = enumValues<E>().firstOrNull() ?: error("No enum values found")
return invoke(
prompt = value.template(input, output, context),
model = model,
target = target,
config = config,
api = api,
conversation = conversation
)
Expand All @@ -109,31 +108,34 @@ sealed interface AI {
prompt: String,
target: KType = typeOf<A>(),
model: CreateChatCompletionRequestModel = CreateChatCompletionRequestModel.gpt_4_1106_preview,
api: ChatApi = fromEnvironment(::ChatApi),
config: Config = Config(),
api: Chat = OpenAI(config).chat,
conversation: Conversation = Conversation()
): A = chat(Prompt(CustomModel(model.value), prompt), target, api, conversation)
): A = chat(Prompt(model, prompt), target, config, api, conversation)

@AiDsl
suspend inline operator fun <reified A : Any> invoke(
prompt: Prompt<CreateChatCompletionRequestModel>,
prompt: Prompt,
target: KType = typeOf<A>(),
api: ChatApi = fromEnvironment(::ChatApi),
config: Config = Config(),
api: Chat = OpenAI(config).chat,
conversation: Conversation = Conversation()
): A = chat(prompt, target, api, conversation)
): A = chat(prompt, target, config, api, conversation)

@OptIn(InternalSerializationApi::class, ExperimentalSerializationApi::class)
@AiDsl
suspend inline fun <reified A : Any> chat(
prompt: Prompt<CreateChatCompletionRequestModel>,
prompt: Prompt,
target: KType = typeOf<A>(),
api: ChatApi = fromEnvironment(::ChatApi),
config: Config = Config(),
api: Chat = OpenAI(config).chat,
conversation: Conversation = Conversation()
): A {
val kind =
(target.classifier as? KClass<*>)?.serializer()?.descriptor?.kind
?: error("Cannot find SerialKind for $target")
return when (kind) {
SerialKind.ENUM -> invokeEnum<A>(prompt, target, api, conversation)
SerialKind.ENUM -> invokeEnum<A>(prompt, target, config, api, conversation)
else -> {
chat(
target = target,
Expand Down
74 changes: 0 additions & 74 deletions core/src/commonMain/kotlin/com/xebia/functional/xef/Audio.kt

This file was deleted.

83 changes: 83 additions & 0 deletions core/src/commonMain/kotlin/com/xebia/functional/xef/Config.kt
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
package com.xebia.functional.xef

import arrow.core.nonEmptyListOf
import com.xebia.functional.openai.Config as OpenAIConfig
import com.xebia.functional.openai.generated.api.OpenAI
import com.xebia.functional.xef.env.getenv
import io.ktor.client.*
import io.ktor.client.engine.*
import io.ktor.client.plugins.*
import io.ktor.client.plugins.contentnegotiation.*
import io.ktor.client.plugins.logging.*
import io.ktor.client.request.*
import io.ktor.http.*
import io.ktor.serialization.kotlinx.json.*
import kotlinx.serialization.json.Json

data class Config(
val baseUrl: String = getenv(HOST_ENV_VAR) ?: "https://api.openai.com/v1/",
val token: String? = null,
val org: String? = getenv(ORG_ENV_VAR),
val json: Json = Json {
ignoreUnknownKeys = true
prettyPrint = true
isLenient = true
explicitNulls = false
useArrayPolymorphism = true
},
val streamingPrefix: String = "data:",
val streamingDelimiter: String = "data: [DONE]"
)

private const val ORG_ENV_VAR = "OPENAI_ORG"
private const val HOST_ENV_VAR = "OPENAI_HOST"
private const val KEY_ENV_VAR = "OPENAI_TOKEN"

/**
* Constructor that mimics the behavior of "ApiClient", but without the additional layer in between.
* Just simple fun on top of generated API.
*/
fun OpenAI(
config: Config = Config(),
httpClientEngine: HttpClientEngine? = null,
httpClientConfig: ((HttpClientConfig<*>) -> Unit)? = null,
logRequests: Boolean = false
): OpenAI {
val token =
config.token
?: getenv(KEY_ENV_VAR)
?: throw AIError.Env.OpenAI(nonEmptyListOf("missing $KEY_ENV_VAR env var"))
val clientConfig: HttpClientConfig<*>.() -> Unit = {
install(ContentNegotiation) { json(config.json) }
install(HttpTimeout) {
requestTimeoutMillis = 45 * 1000
connectTimeoutMillis = 45 * 1000
socketTimeoutMillis = 45 * 1000
}
install(HttpRequestRetry) {
maxRetries = 5
retryIf { _, response -> !response.status.isSuccess() }
retryOnExceptionIf { _, _ -> true }
delayMillis { retry -> retry * 1000L }
}
install(Logging) { level = if (logRequests) LogLevel.ALL else LogLevel.NONE }
httpClientConfig?.invoke(this)
defaultRequest {
url(config.baseUrl)
config.org?.let { headers.append("org", it) }
bearerAuth(token)
}
}
val client = httpClientEngine?.let { HttpClient(it, clientConfig) } ?: HttpClient(clientConfig)
return OpenAI(
client,
OpenAIConfig(
baseUrl = config.baseUrl,
token = token,
org = config.org,
json = config.json,
streamingPrefix = config.streamingPrefix,
streamingDelimiter = config.streamingDelimiter
)
)
}

0 comments on commit dc55080

Please sign in to comment.