Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
.devcontainer/** linguist-vendored=true
src/types/generated/** linguist-generated=true
22 changes: 22 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,28 @@

All notable changes to this project will be documented in this file.

## [0.4.0-rc.1](https://github.com/inference-gateway/typescript-sdk/compare/v0.3.5-rc.1...v0.4.0-rc.1) (2025-03-31)

### ✨ Features

* Add reasoning_content field to chunk message in OpenAPI specification ([4de08ed](https://github.com/inference-gateway/typescript-sdk/commit/4de08ed46f6078f77838bd9c4bae5e46eb12476c))

## [0.3.5-rc.1](https://github.com/inference-gateway/typescript-sdk/compare/v0.3.4...v0.3.5-rc.1) (2025-03-31)

### ♻️ Improvements

* Update type exports and add type generation task ([919679e](https://github.com/inference-gateway/typescript-sdk/commit/919679eac8142e25b5abcefd63ae00bc187f2a67))

### 🐛 Bug Fixes

* Correct regex pattern for release candidate branches in configuration ([33db013](https://github.com/inference-gateway/typescript-sdk/commit/33db013392c8a1a15cc5a3bebb0f4c6d58a73d41))
* Update release configuration to correctly match release candidate branches ([03d91e1](https://github.com/inference-gateway/typescript-sdk/commit/03d91e1d94d1fc11e50a535ba131ef2ca089653e))

### 🔧 Miscellaneous

* Remove unnecessary line from .gitattributes ([66407b4](https://github.com/inference-gateway/typescript-sdk/commit/66407b4cba0bf96af457dbb66818f48da3a4abda))
* Update .gitattributes to mark generated types as linguist-generated ([67f3d68](https://github.com/inference-gateway/typescript-sdk/commit/67f3d682ba1e131f9e416c45e097c76dfeec4bf6))

## [0.3.4](https://github.com/inference-gateway/typescript-sdk/compare/v0.3.3...v0.3.4) (2025-03-31)

### ♻️ Improvements
Expand Down
8 changes: 7 additions & 1 deletion Taskfile.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
---
version: "3"
version: '3'

tasks:
oas-download:
Expand All @@ -21,3 +21,9 @@ tasks:
desc: Run tests
cmds:
- npm run test

generate-types:
desc: Generate TypeScript types from OpenAPI specification
cmds:
- npx openapi-typescript openapi.yaml --enum --enum-values --dedupe-enums=true --root-types -o src/types/generated/index.ts
- npx prettier --write src/types/generated/index.ts
3 changes: 3 additions & 0 deletions openapi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -904,6 +904,9 @@ components:
content:
type: string
description: The contents of the chunk message.
reasoning_content:
type: string
description: The reasoning content of the chunk message.
tool_calls:
type: array
items:
Expand Down
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@inference-gateway/sdk",
"version": "0.3.4",
"version": "0.4.0-rc.1",
"description": "An SDK written in Typescript for the [Inference Gateway](https://github.com/inference-gateway/inference-gateway).",
"main": "dist/src/index.js",
"types": "dist/src/index.d.ts",
Expand Down
58 changes: 34 additions & 24 deletions src/client.ts
Original file line number Diff line number Diff line change
@@ -1,13 +1,24 @@
import {
Error as ApiError,
ChatCompletionMessageToolCall,
ChatCompletionRequest,
ChatCompletionResponse,
ChatCompletionStreamCallbacks,
ChatCompletionStreamResponse,
ListModelsResponse,
import type {
Provider,
} from './types';
SchemaChatCompletionMessageToolCall,
SchemaCreateChatCompletionRequest,
SchemaCreateChatCompletionResponse,
SchemaCreateChatCompletionStreamResponse,
SchemaError,
SchemaListModelsResponse,
} from './types/generated';
import { ChatCompletionToolType } from './types/generated';

interface ChatCompletionStreamCallbacks {
onOpen?: () => void;
onChunk?: (chunk: SchemaCreateChatCompletionStreamResponse) => void;
onContent?: (content: string) => void;
onTool?: (toolCall: SchemaChatCompletionMessageToolCall) => void;
onFinish?: (
response: SchemaCreateChatCompletionStreamResponse | null
) => void;
onError?: (error: SchemaError) => void;
}

export interface ClientOptions {
baseURL?: string;
Expand Down Expand Up @@ -90,7 +101,7 @@ export class InferenceGatewayClient {
});

if (!response.ok) {
const error = (await response.json()) as ApiError;
const error: SchemaError = await response.json();
throw new Error(
error.error || `HTTP error! status: ${response.status}`
);
Expand All @@ -105,12 +116,12 @@ export class InferenceGatewayClient {
/**
* Lists the currently available models.
*/
async listModels(provider?: Provider): Promise<ListModelsResponse> {
async listModels(provider?: Provider): Promise<SchemaListModelsResponse> {
const query: Record<string, string> = {};
if (provider) {
query.provider = provider;
}
return this.request<ListModelsResponse>(
return this.request<SchemaListModelsResponse>(
'/models',
{ method: 'GET' },
query
Expand All @@ -121,14 +132,14 @@ export class InferenceGatewayClient {
* Creates a chat completion.
*/
async createChatCompletion(
request: ChatCompletionRequest,
request: SchemaCreateChatCompletionRequest,
provider?: Provider
): Promise<ChatCompletionResponse> {
): Promise<SchemaCreateChatCompletionResponse> {
const query: Record<string, string> = {};
if (provider) {
query.provider = provider;
}
return this.request<ChatCompletionResponse>(
return this.request<SchemaCreateChatCompletionResponse>(
'/chat/completions',
{
method: 'POST',
Expand All @@ -142,7 +153,7 @@ export class InferenceGatewayClient {
* Creates a streaming chat completion.
*/
async streamChatCompletion(
request: ChatCompletionRequest,
request: SchemaCreateChatCompletionRequest,
callbacks: ChatCompletionStreamCallbacks,
provider?: Provider
): Promise<void> {
Expand Down Expand Up @@ -186,7 +197,7 @@ export class InferenceGatewayClient {
});

if (!response.ok) {
const error = (await response.json()) as ApiError;
const error: SchemaError = await response.json();
throw new Error(
error.error || `HTTP error! status: ${response.status}`
);
Expand Down Expand Up @@ -215,14 +226,13 @@ export class InferenceGatewayClient {
const data = line.slice(5).trim();

if (data === '[DONE]') {
callbacks.onFinish?.(
null as unknown as ChatCompletionStreamResponse
);
callbacks.onFinish?.(null);
return;
}

try {
const chunk = JSON.parse(data) as ChatCompletionStreamResponse;
const chunk: SchemaCreateChatCompletionStreamResponse =
JSON.parse(data);
callbacks.onChunk?.(chunk);

const content = chunk.choices[0]?.delta?.content;
Expand All @@ -232,9 +242,9 @@ export class InferenceGatewayClient {

const toolCalls = chunk.choices[0]?.delta?.tool_calls;
if (toolCalls && toolCalls.length > 0) {
const toolCall: ChatCompletionMessageToolCall = {
const toolCall: SchemaChatCompletionMessageToolCall = {
id: toolCalls[0].id || '',
type: 'function',
type: ChatCompletionToolType.function,
function: {
name: toolCalls[0].function?.name || '',
arguments: toolCalls[0].function?.arguments || '',
Expand All @@ -249,7 +259,7 @@ export class InferenceGatewayClient {
}
}
} catch (error) {
const apiError: ApiError = {
const apiError: SchemaError = {
error: (error as Error).message || 'Unknown error',
};
callbacks.onError?.(apiError);
Expand Down
2 changes: 1 addition & 1 deletion src/index.ts
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
export * from './client';
export * from './types';
export * from './types/generated';
Loading