Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 16 additions & 2 deletions src/components/ThinkingSlider.tsx
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import React, { useEffect, useId } from "react";
import styled from "@emotion/styled";
import type { ThinkingLevel } from "@/types/thinking";
import type { ThinkingLevel, ThinkingLevelOn } from "@/types/thinking";
import { useThinkingLevel } from "@/hooks/useThinkingLevel";
import { TooltipWrapper, Tooltip } from "./Tooltip";
import { formatKeybind, KEYBINDS } from "@/utils/ui/keybinds";
import { getThinkingPolicyForModel } from "@/utils/thinking/policy";
import { updatePersistedState } from "@/hooks/usePersistedState";
import { getLastThinkingByModelKey } from "@/constants/storage";

const ThinkingSliderContainer = styled.div`
display: flex;
Expand Down Expand Up @@ -193,6 +195,16 @@ export const ThinkingSliderComponent: React.FC<ThinkingControlProps> = ({ modelS

const value = thinkingLevelToValue(thinkingLevel);

const handleThinkingLevelChange = (newLevel: ThinkingLevel) => {
setThinkingLevel(newLevel);
// Also save to lastThinkingByModel for Ctrl+Shift+T toggle memory
// Only save active levels (not "off") - matches useAIViewKeybinds logic
if (newLevel !== "off") {
const lastThinkingKey = getLastThinkingByModelKey(modelString);
updatePersistedState(lastThinkingKey, newLevel as ThinkingLevelOn);
}
};

return (
<TooltipWrapper>
<ThinkingSliderContainer>
Expand All @@ -203,7 +215,9 @@ export const ThinkingSliderComponent: React.FC<ThinkingControlProps> = ({ modelS
max="3"
step="1"
value={value}
onChange={(e) => setThinkingLevel(valueToThinkingLevel(parseInt(e.target.value)))}
onChange={(e) =>
handleThinkingLevelChange(valueToThinkingLevel(parseInt(e.target.value)))
}
id={sliderId}
role="slider"
aria-valuemin={0}
Expand Down
175 changes: 175 additions & 0 deletions src/utils/messages/retryEligibility.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
import { describe, it, expect } from "@jest/globals";
import { hasInterruptedStream } from "./retryEligibility";
import type { DisplayedMessage } from "@/types/message";

describe("hasInterruptedStream", () => {
it("returns false for empty messages", () => {
expect(hasInterruptedStream([])).toBe(false);
});

it("returns true for stream-error message", () => {
const messages: DisplayedMessage[] = [
{
type: "user",
id: "user-1",
historyId: "user-1",
content: "Hello",
historySequence: 1,
},
{
type: "stream-error",
id: "error-1",
historyId: "assistant-1",
error: "Connection failed",
errorType: "network",
historySequence: 2,
},
];
expect(hasInterruptedStream(messages)).toBe(true);
});

it("returns true for partial assistant message", () => {
const messages: DisplayedMessage[] = [
{
type: "user",
id: "user-1",
historyId: "user-1",
content: "Hello",
historySequence: 1,
},
{
type: "assistant",
id: "assistant-1",
historyId: "assistant-1",
content: "Incomplete response",
historySequence: 2,
streamSequence: 0,
isStreaming: false,
isPartial: true,
isLastPartOfMessage: true,
isCompacted: false,
},
];
expect(hasInterruptedStream(messages)).toBe(true);
});

it("returns true for partial tool message", () => {
const messages: DisplayedMessage[] = [
{
type: "user",
id: "user-1",
historyId: "user-1",
content: "Hello",
historySequence: 1,
},
{
type: "tool",
id: "tool-1",
historyId: "assistant-1",
toolName: "bash",
toolCallId: "call-1",
args: { script: "echo test" },
status: "interrupted",
isPartial: true,
historySequence: 2,
streamSequence: 0,
isLastPartOfMessage: true,
},
];
expect(hasInterruptedStream(messages)).toBe(true);
});

it("returns true for partial reasoning message", () => {
const messages: DisplayedMessage[] = [
{
type: "user",
id: "user-1",
historyId: "user-1",
content: "Hello",
historySequence: 1,
},
{
type: "reasoning",
id: "reasoning-1",
historyId: "assistant-1",
content: "Let me think...",
historySequence: 2,
streamSequence: 0,
isStreaming: false,
isPartial: true,
isLastPartOfMessage: true,
},
];
expect(hasInterruptedStream(messages)).toBe(true);
});

it("returns false for completed messages", () => {
const messages: DisplayedMessage[] = [
{
type: "user",
id: "user-1",
historyId: "user-1",
content: "Hello",
historySequence: 1,
},
{
type: "assistant",
id: "assistant-1",
historyId: "assistant-1",
content: "Complete response",
historySequence: 2,
streamSequence: 0,
isStreaming: false,
isPartial: false,
isLastPartOfMessage: true,
isCompacted: false,
},
];
expect(hasInterruptedStream(messages)).toBe(false);
});

it("returns true when last message is user message (app restarted during slow model)", () => {
const messages: DisplayedMessage[] = [
{
type: "user",
id: "user-1",
historyId: "user-1",
content: "Hello",
historySequence: 1,
},
{
type: "assistant",
id: "assistant-1",
historyId: "assistant-1",
content: "Complete response",
historySequence: 2,
streamSequence: 0,
isStreaming: false,
isPartial: false,
isLastPartOfMessage: true,
isCompacted: false,
},
{
type: "user",
id: "user-2",
historyId: "user-2",
content: "Another question",
historySequence: 3,
},
];
expect(hasInterruptedStream(messages)).toBe(true);
});

it("returns true when user message has no response (slow model scenario)", () => {
const messages: DisplayedMessage[] = [
{
type: "user",
id: "user-1",
historyId: "user-1",
content: "Hello",
historySequence: 1,
},
];
expect(hasInterruptedStream(messages)).toBe(true);
});
});
8 changes: 8 additions & 0 deletions src/utils/messages/retryEligibility.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,13 @@ import type { DisplayedMessage } from "@/types/message";
* - useResumeManager: To determine if workspace is eligible for auto-retry
*
* This ensures DRY - both use the same logic for what constitutes a retryable state.
*
* Returns true if:
* 1. Last message is a stream-error
* 2. Last message is a partial assistant/tool/reasoning message
* 3. Last message is a user message (indicating we sent it but never got a response)
* - This handles app restarts during slow model responses (models can take 30-60s to first token)
* - User messages are only at the end when response hasn't started/completed
*/
export function hasInterruptedStream(messages: DisplayedMessage[]): boolean {
if (messages.length === 0) return false;
Expand All @@ -16,6 +23,7 @@ export function hasInterruptedStream(messages: DisplayedMessage[]): boolean {

return (
lastMessage.type === "stream-error" || // Stream errored out
lastMessage.type === "user" || // No response received yet (e.g., app restarted during slow model)
(lastMessage.type === "assistant" && lastMessage.isPartial === true) ||
(lastMessage.type === "tool" && lastMessage.isPartial === true) ||
(lastMessage.type === "reasoning" && lastMessage.isPartial === true)
Expand Down