@@ -972,103 +972,90 @@ describeIntegration("IpcMain sendMessage integration tests", () => {
972972 const { env, workspaceId, cleanup } = await setupWorkspace ( provider ) ;
973973
974974 try {
975- // Phase 1: Send large messages until context error occurs
975+ // Phase 1: Build up large conversation history to exceed context limit
976+ // HACK: Use HistoryService directly to populate history without API calls.
977+ // This is a test-only shortcut. Real application code should NEVER bypass IPC.
978+ const historyService = new HistoryService ( env . config ) ;
979+
976980 // gpt-4o-mini has ~128k token context window
977- // Each chunk is ~10k tokens (40k chars / 4 chars per token)
978- const largeChunk = "A" . repeat ( 40000 ) ;
979- let contextError : unknown = null ;
980-
981- // Send up to 20 large messages (200k tokens total)
982- // Should exceed 128k context limit and trigger error
983- for ( let i = 0 ; i < 20 ; i ++ ) {
984- const result = await sendMessageWithModel (
985- env . mockIpcRenderer ,
986- workspaceId ,
987- largeChunk ,
988- provider ,
989- model ,
990- { disableAutoTruncation : true }
991- ) ;
981+ // Create ~50k chars per message (~12.5k tokens)
982+ const messageSize = 50_000 ;
983+ const largeText = "A" . repeat ( messageSize ) ;
992984
993- if ( ! result . success ) {
994- contextError = result . error ;
995- break ;
996- }
985+ // Need ~150k tokens to exceed 128k context limit
986+ // 12 messages × 12.5k tokens = 150k tokens
987+ const messageCount = 12 ;
997988
998- // Wait for stream completion or error
999- const collector = createEventCollector ( env . sentEvents , workspaceId ) ;
1000-
1001- // Poll for either stream-end or stream-error
1002- const startTime = Date . now ( ) ;
1003- let foundEvent = false ;
1004- while ( Date . now ( ) - startTime < 60000 && ! foundEvent ) {
1005- collector . collect ( ) ; // Collect new events from sentEvents
1006-
1007- const streamEnd = collector . getEvents ( ) . find (
1008- ( e ) => "type" in e && e . type === "stream-end"
1009- ) ;
1010- const streamError = collector . getEvents ( ) . find (
1011- ( e ) => "type" in e && e . type === "stream-error"
1012- ) ;
1013-
1014- if ( streamError && "error" in streamError ) {
1015- contextError = streamError . error ;
1016- foundEvent = true ;
1017- break ;
1018- }
1019-
1020- if ( streamEnd ) {
1021- foundEvent = true ;
1022- break ;
1023- }
1024-
1025- await new Promise ( ( resolve ) => setTimeout ( resolve , 100 ) ) ;
1026- }
1027-
1028- if ( ! foundEvent ) {
1029- throw new Error ( "Timeout waiting for stream-end or stream-error" ) ;
1030- }
1031-
1032- // If we got a stream error, break out of the loop
1033- if ( contextError ) {
1034- break ;
1035- }
1036-
1037- assertStreamSuccess ( collector ) ;
1038- env . sentEvents . length = 0 ; // Clear events for next iteration
989+ // Build conversation history with alternating user/assistant messages
990+ for ( let i = 0 ; i < messageCount ; i ++ ) {
991+ const isUser = i % 2 === 0 ;
992+ const role = isUser ? "user" : "assistant" ;
993+ const message = createCmuxMessage ( `history-msg-${ i } ` , role , largeText , { } ) ;
994+
995+ const result = await historyService . appendToHistory ( workspaceId , message ) ;
996+ expect ( result . success ) . toBe ( true ) ;
1039997 }
1040998
1041- // Verify we hit a context error
1042- expect ( contextError ) . not . toBeNull ( ) ;
999+ // Now send a new message with auto-truncation disabled - should trigger error
1000+ const result = await sendMessageWithModel (
1001+ env . mockIpcRenderer ,
1002+ workspaceId ,
1003+ "This should trigger a context error" ,
1004+ provider ,
1005+ model ,
1006+ { disableAutoTruncation : true }
1007+ ) ;
1008+
1009+ // IPC call itself should succeed (errors come through stream events)
1010+ expect ( result . success ) . toBe ( true ) ;
1011+
1012+ // Wait for either stream-end or stream-error
1013+ const collector = createEventCollector ( env . sentEvents , workspaceId ) ;
1014+ await Promise . race ( [
1015+ collector . waitForEvent ( "stream-end" , 10000 ) ,
1016+ collector . waitForEvent ( "stream-error" , 10000 ) ,
1017+ ] ) ;
1018+
1019+ // Should have received error event with context exceeded error
1020+ expect ( collector . hasError ( ) ) . toBe ( true ) ;
1021+
10431022 // Check that error message contains context-related keywords
1044- const errorStr = JSON . stringify ( contextError ) . toLowerCase ( ) ;
1045- expect (
1046- errorStr . includes ( "context" ) ||
1047- errorStr . includes ( "length" ) ||
1048- errorStr . includes ( "exceed" ) ||
1049- errorStr . includes ( "token" )
1050- ) . toBe ( true ) ;
1023+ const errorEvents = collector
1024+ . getEvents ( )
1025+ . filter ( ( e ) => "type" in e && e . type === "stream-error" ) ;
1026+ expect ( errorEvents . length ) . toBeGreaterThan ( 0 ) ;
1027+
1028+ const errorEvent = errorEvents [ 0 ] ;
1029+ if ( errorEvent && "error" in errorEvent ) {
1030+ const errorStr = String ( errorEvent . error ) . toLowerCase ( ) ;
1031+ expect (
1032+ errorStr . includes ( "context" ) ||
1033+ errorStr . includes ( "length" ) ||
1034+ errorStr . includes ( "exceed" ) ||
1035+ errorStr . includes ( "token" )
1036+ ) . toBe ( true ) ;
1037+ }
10511038
10521039 // Phase 2: Send message with auto-truncation enabled (should succeed)
10531040 env . sentEvents . length = 0 ;
10541041 const successResult = await sendMessageWithModel (
10551042 env . mockIpcRenderer ,
10561043 workspaceId ,
1057- "Final message after auto truncation" ,
1044+ "This should succeed with auto- truncation" ,
10581045 provider ,
10591046 model
10601047 // disableAutoTruncation defaults to false (auto-truncation enabled)
10611048 ) ;
10621049
10631050 expect ( successResult . success ) . toBe ( true ) ;
1064- const collector = createEventCollector ( env . sentEvents , workspaceId ) ;
1065- await collector . waitForEvent ( "stream-end" , 60000 ) ;
1066- assertStreamSuccess ( collector ) ;
1051+ const successCollector = createEventCollector ( env . sentEvents , workspaceId ) ;
1052+ await successCollector . waitForEvent ( "stream-end" , 30000 ) ;
1053+ assertStreamSuccess ( successCollector ) ;
10671054 } finally {
10681055 await cleanup ( ) ;
10691056 }
10701057 } ,
1071- 180000 // 3 minute timeout for heavy test with multiple API calls
1058+ 60000 // 1 minute timeout (much faster since we don't make many API calls)
10721059 ) ;
10731060 } ) ;
10741061} ) ;
0 commit comments