1- import type { OpenRouterChatSettings } from './types/openrouter-chat-settings' ;
1+ import type { OpenRouterChatSettings } from './types/openrouter-chat-settings'
22
33import {
44 convertReadableStreamToArray ,
55 StreamingTestServer ,
6- } from '@ai-sdk/provider-utils/test' ;
7- import { describe , expect , it } from 'vitest' ;
6+ } from '@ai-sdk/provider-utils/test'
7+ import { describe , expect , it } from 'bun:test'
88
9- import { OpenRouterChatLanguageModel } from './openrouter-chat-language-model' ;
9+ import { OpenRouterChatLanguageModel } from './openrouter-chat-language-model'
1010
1111describe ( 'OpenRouter Streaming Usage Accounting' , ( ) => {
1212 const server = new StreamingTestServer (
13- 'https://api.openrouter.ai/chat/completions' ,
14- ) ;
13+ 'https://api.openrouter.ai/chat/completions'
14+ )
1515
16- server . setupTestEnvironment ( ) ;
16+ server . setupTestEnvironment ( )
1717
1818 function prepareStreamResponse ( includeUsage = true ) {
1919 server . responseChunks = [
2020 `data: {"id":"test-id","model":"test-model","choices":[{"delta":{"content":"Hello"},"index":0}]}\n\n` ,
2121 `data: {"choices":[{"finish_reason":"stop","index":0}]}\n\n` ,
22- ] ;
22+ ]
2323
2424 if ( includeUsage ) {
2525 server . responseChunks . push (
26- `data: {"usage":{"prompt_tokens":10,"prompt_tokens_details":{"cached_tokens":5},"completion_tokens":20,"completion_tokens_details":{"reasoning_tokens":8},"total_tokens":30,"cost":0.0015},"choices":[]}\n\n` ,
27- ) ;
26+ `data: {"usage":{"prompt_tokens":10,"prompt_tokens_details":{"cached_tokens":5},"completion_tokens":20,"completion_tokens_details":{"reasoning_tokens":8},"total_tokens":30,"cost":0.0015},"choices":[]}\n\n`
27+ )
2828 }
2929
30- server . responseChunks . push ( 'data: [DONE]\n\n' ) ;
30+ server . responseChunks . push ( 'data: [DONE]\n\n' )
3131 }
3232
3333 it ( 'should include stream_options.include_usage in request when enabled' , async ( ) => {
34- prepareStreamResponse ( ) ;
34+ prepareStreamResponse ( )
3535
3636 // Create model with usage accounting enabled
3737 const settings : OpenRouterChatSettings = {
3838 usage : { include : true } ,
39- } ;
39+ }
4040
4141 const model = new OpenRouterChatLanguageModel ( 'test-model' , settings , {
4242 provider : 'openrouter.chat' ,
4343 url : ( ) => 'https://api.openrouter.ai/chat/completions' ,
4444 headers : ( ) => ( { } ) ,
4545 compatibility : 'strict' ,
4646 fetch : global . fetch ,
47- } ) ;
47+ } )
4848
4949 // Call the model with streaming
5050 await model . doStream ( {
@@ -57,32 +57,32 @@ describe('OpenRouter Streaming Usage Accounting', () => {
5757 ] ,
5858 maxTokens : 100 ,
5959 inputFormat : 'messages' ,
60- } ) ;
60+ } )
6161
6262 // Verify stream options
63- const requestBody = await server . getRequestBodyJson ( ) ;
64- expect ( requestBody ) . toBeDefined ( ) ;
65- expect ( requestBody . stream ) . toBe ( true ) ;
63+ const requestBody = await server . getRequestBodyJson ( )
64+ expect ( requestBody ) . toBeDefined ( )
65+ expect ( requestBody . stream ) . toBe ( true )
6666 expect ( requestBody . stream_options ) . toEqual ( {
6767 include_usage : true ,
68- } ) ;
69- } ) ;
68+ } )
69+ } )
7070
7171 it ( 'should include provider-specific metadata in finish event when usage accounting is enabled' , async ( ) => {
72- prepareStreamResponse ( true ) ;
72+ prepareStreamResponse ( true )
7373
7474 // Create model with usage accounting enabled
7575 const settings : OpenRouterChatSettings = {
7676 usage : { include : true } ,
77- } ;
77+ }
7878
7979 const model = new OpenRouterChatLanguageModel ( 'test-model' , settings , {
8080 provider : 'openrouter.chat' ,
8181 url : ( ) => 'https://api.openrouter.ai/chat/completions' ,
8282 headers : ( ) => ( { } ) ,
8383 compatibility : 'strict' ,
8484 fetch : global . fetch ,
85- } ) ;
85+ } )
8686
8787 // Call the model with streaming
8888 const result = await model . doStream ( {
@@ -95,46 +95,46 @@ describe('OpenRouter Streaming Usage Accounting', () => {
9595 ] ,
9696 maxTokens : 100 ,
9797 inputFormat : 'messages' ,
98- } ) ;
98+ } )
9999
100100 // Read all chunks from the stream
101- const chunks = await convertReadableStreamToArray ( result . stream ) ;
101+ const chunks = await convertReadableStreamToArray ( result . stream )
102102
103103 // Find the finish chunk
104- const finishChunk = chunks . find ( ( chunk ) => chunk . type === 'finish' ) ;
105- expect ( finishChunk ) . toBeDefined ( ) ;
104+ const finishChunk = chunks . find ( ( chunk ) => chunk . type === 'finish' )
105+ expect ( finishChunk ) . toBeDefined ( )
106106
107107 // Verify metadata is included
108- expect ( finishChunk ?. providerMetadata ) . toBeDefined ( ) ;
109- const openrouterData = finishChunk ?. providerMetadata ?. openrouter ;
110- expect ( openrouterData ) . toBeDefined ( ) ;
108+ expect ( finishChunk ?. providerMetadata ) . toBeDefined ( )
109+ const openrouterData = finishChunk ?. providerMetadata ?. openrouter
110+ expect ( openrouterData ) . toBeDefined ( )
111111
112- const usage = openrouterData ?. usage ;
112+ const usage = openrouterData ?. usage
113113 expect ( usage ) . toMatchObject ( {
114114 promptTokens : 10 ,
115115 completionTokens : 20 ,
116116 totalTokens : 30 ,
117117 cost : 0.0015 ,
118118 promptTokensDetails : { cachedTokens : 5 } ,
119119 completionTokensDetails : { reasoningTokens : 8 } ,
120- } ) ;
121- } ) ;
120+ } )
121+ } )
122122
123123 it ( 'should not include provider-specific metadata when usage accounting is disabled' , async ( ) => {
124- prepareStreamResponse ( false ) ;
124+ prepareStreamResponse ( false )
125125
126126 // Create model with usage accounting disabled
127127 const settings : OpenRouterChatSettings = {
128128 // No usage property
129- } ;
129+ }
130130
131131 const model = new OpenRouterChatLanguageModel ( 'test-model' , settings , {
132132 provider : 'openrouter.chat' ,
133133 url : ( ) => 'https://api.openrouter.ai/chat/completions' ,
134134 headers : ( ) => ( { } ) ,
135135 compatibility : 'strict' ,
136136 fetch : global . fetch ,
137- } ) ;
137+ } )
138138
139139 // Call the model with streaming
140140 const result = await model . doStream ( {
@@ -147,16 +147,16 @@ describe('OpenRouter Streaming Usage Accounting', () => {
147147 ] ,
148148 maxTokens : 100 ,
149149 inputFormat : 'messages' ,
150- } ) ;
150+ } )
151151
152152 // Read all chunks from the stream
153- const chunks = await convertReadableStreamToArray ( result . stream ) ;
153+ const chunks = await convertReadableStreamToArray ( result . stream )
154154
155155 // Find the finish chunk
156- const finishChunk = chunks . find ( ( chunk ) => chunk . type === 'finish' ) ;
157- expect ( finishChunk ) . toBeDefined ( ) ;
156+ const finishChunk = chunks . find ( ( chunk ) => chunk . type === 'finish' )
157+ expect ( finishChunk ) . toBeDefined ( )
158158
159159 // Verify that provider metadata is not included
160- expect ( finishChunk ?. providerMetadata ?. openrouter ) . toBeUndefined ( ) ;
161- } ) ;
162- } ) ;
160+ expect ( finishChunk ?. providerMetadata ?. openrouter ) . toBeUndefined ( )
161+ } )
162+ } )
0 commit comments