Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .roo/rules/rules.md
Original file line number Diff line number Diff line change
Expand Up @@ -177,3 +177,8 @@ Also when adding content to the end of files prefer to use the new append_file t
- **ALWAYS verify the current working directory before executing commands**
- Either run "pwd" first to verify the directory, or do a "cd" to the correct absolute directory before running commands
- When running tests, do not "cd" to the pkg directory and then run the test. This screws up the cwd and you never recover. run the test from the project root instead.

### Testing / Compiling Go Code

No need to run a `go build` or a `go run` to just check if the Go code compiles. VSCode's errors/problems cover this well.
If there are no Go errors in VSCode you can assume the code compiles fine.
4 changes: 2 additions & 2 deletions cmd/server/main-server.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,15 +129,15 @@ func sendTelemetryWrapper() {
defer func() {
panichandler.PanicHandler("sendTelemetryWrapper", recover())
}()
ctx, cancelFn := context.WithTimeout(context.Background(), 10*time.Second)
ctx, cancelFn := context.WithTimeout(context.Background(), 15*time.Second)
defer cancelFn()
beforeSendActivityUpdate(ctx)
client, err := wstore.DBGetSingleton[*waveobj.Client](ctx)
if err != nil {
log.Printf("[error] getting client data for telemetry: %v\n", err)
return
}
err = wcloud.SendAllTelemetry(ctx, client.OID)
err = wcloud.SendAllTelemetry(client.OID)
if err != nil {
log.Printf("[error] sending telemetry: %v\n", err)
}
Expand Down
65 changes: 43 additions & 22 deletions frontend/app/aipanel/aimessage.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,41 @@ import { getFileIcon } from "./ai-utils";
import { WaveUIMessage, WaveUIMessagePart } from "./aitypes";
import { WaveAIModel } from "./waveai-model";

const AIThinking = memo(({ message = "AI is thinking..." }: { message?: string }) => (
<div className="flex items-center gap-2">
<div className="animate-pulse flex items-center">
<i className="fa fa-circle text-[10px]"></i>
<i className="fa fa-circle text-[10px] mx-1"></i>
<i className="fa fa-circle text-[10px]"></i>
const AIThinking = memo(({ message = "AI is thinking...", reasoningText }: { message?: string; reasoningText?: string }) => {
const scrollRef = useRef<HTMLDivElement>(null);

useEffect(() => {
if (scrollRef.current && reasoningText) {
scrollRef.current.scrollTop = scrollRef.current.scrollHeight;
}
}, [reasoningText]);

const displayText = reasoningText ? (() => {
const lastDoubleNewline = reasoningText.lastIndexOf("\n\n");
return lastDoubleNewline !== -1 ? reasoningText.substring(lastDoubleNewline + 2) : reasoningText;
})() : "";

return (
<div className="flex flex-col gap-1">
<div className="flex items-center gap-2">
<div className="animate-pulse flex items-center">
<i className="fa fa-circle text-[10px]"></i>
<i className="fa fa-circle text-[10px] mx-1"></i>
<i className="fa fa-circle text-[10px]"></i>
</div>
{message && <span className="text-sm text-gray-400">{message}</span>}
</div>
{displayText && (
<div
ref={scrollRef}
className="text-sm text-gray-500 overflow-y-auto max-h-[2lh] max-w-[600px] pl-9"
>
{displayText}
</div>
)}
</div>
{message && <span className="text-sm text-gray-400">{message}</span>}
</div>
));
);
});

AIThinking.displayName = "AIThinking";

Expand Down Expand Up @@ -428,35 +453,31 @@ const groupMessageParts = (parts: WaveUIMessagePart[]): MessagePart[] => {
return grouped;
};

const getThinkingMessage = (parts: WaveUIMessagePart[], isStreaming: boolean, role: string): string | null => {
const getThinkingMessage = (parts: WaveUIMessagePart[], isStreaming: boolean, role: string): { message: string; reasoningText?: string } | null => {
if (!isStreaming || role !== "assistant") {
return null;
}

// Check if there are any pending-approval tool calls - this takes priority
const hasPendingApprovals = parts.some(
(part) => part.type === "data-tooluse" && part.data?.approval === "needs-approval"
);

if (hasPendingApprovals) {
return "Waiting for Tool Approvals...";
return { message: "Waiting for Tool Approvals..." };
}

const lastPart = parts[parts.length - 1];

// Check if the last part is a reasoning part
if (lastPart?.type === "reasoning") {
return "AI is thinking...";
const reasoningContent = lastPart.text || "";
return { message: "AI is thinking...", reasoningText: reasoningContent };
}

// Only hide thinking indicator if the last part is text and not empty
// (this means text is actively streaming)
if (lastPart?.type === "text" && lastPart.text) {
return null;
}

// For all other cases (including finish-step, tooluse, etc.), show dots
return "";
return { message: "" };
};

export const AIMessage = memo(({ message, isStreaming }: AIMessageProps) => {
Expand All @@ -466,7 +487,7 @@ export const AIMessage = memo(({ message, isStreaming }: AIMessageProps) => {
(part): part is WaveUIMessagePart & { type: "data-userfile" } => part.type === "data-userfile"
);

const thinkingMessage = getThinkingMessage(parts, isStreaming, message.role);
const thinkingData = getThinkingMessage(parts, isStreaming, message.role);
const groupedParts = groupMessageParts(displayParts);

return (
Expand All @@ -477,7 +498,7 @@ export const AIMessage = memo(({ message, isStreaming }: AIMessageProps) => {
message.role === "user" ? "py-2 bg-accent-800 text-white max-w-[calc(100%-20px)]" : null
)}
>
{displayParts.length === 0 && !isStreaming && !thinkingMessage ? (
{displayParts.length === 0 && !isStreaming && !thinkingData ? (
<div className="whitespace-pre-wrap break-words">(no text content)</div>
) : (
<>
Expand All @@ -490,9 +511,9 @@ export const AIMessage = memo(({ message, isStreaming }: AIMessageProps) => {
</div>
)
)}
{thinkingMessage != null && (
{thinkingData != null && (
<div className="mt-2">
<AIThinking message={thinkingMessage} />
<AIThinking message={thinkingData.message} reasoningText={thinkingData.reasoningText} />
</div>
)}
</>
Expand Down
4 changes: 2 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

103 changes: 90 additions & 13 deletions pkg/aiusechat/openai/openai-backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,47 @@ type openaiResponseFunctionCallArgumentsDoneEvent struct {
Arguments string `json:"arguments"`
}

type openaiResponseReasoningSummaryPartAddedEvent struct {
Type string `json:"type"`
SequenceNumber int `json:"sequence_number"`
ItemId string `json:"item_id"`
OutputIndex int `json:"output_index"`
SummaryIndex int `json:"summary_index"`
Part openaiReasoningSummaryPart `json:"part"`
}

type openaiResponseReasoningSummaryPartDoneEvent struct {
Type string `json:"type"`
SequenceNumber int `json:"sequence_number"`
ItemId string `json:"item_id"`
OutputIndex int `json:"output_index"`
SummaryIndex int `json:"summary_index"`
Part openaiReasoningSummaryPart `json:"part"`
}

type openaiReasoningSummaryPart struct {
Type string `json:"type"`
Text string `json:"text"`
}

type openaiResponseReasoningSummaryTextDeltaEvent struct {
Type string `json:"type"`
SequenceNumber int `json:"sequence_number"`
ItemId string `json:"item_id"`
OutputIndex int `json:"output_index"`
SummaryIndex int `json:"summary_index"`
Delta string `json:"delta"`
}

type openaiResponseReasoningSummaryTextDoneEvent struct {
Type string `json:"type"`
SequenceNumber int `json:"sequence_number"`
ItemId string `json:"item_id"`
OutputIndex int `json:"output_index"`
SummaryIndex int `json:"summary_index"`
Text string `json:"text"`
}

// ---------- OpenAI Response Structure Types ----------

type openaiResponse struct {
Expand Down Expand Up @@ -256,12 +297,12 @@ type openaiResponse struct {
}

type openaiOutputItem struct {
Id string `json:"id"`
Type string `json:"type"`
Status string `json:"status,omitempty"`
Content []OpenAIMessageContent `json:"content,omitempty"`
Role string `json:"role,omitempty"`
Summary []string `json:"summary,omitempty"`
Id string `json:"id"`
Type string `json:"type"`
Status string `json:"status,omitempty"`
Content []OpenAIMessageContent `json:"content,omitempty"`
Role string `json:"role,omitempty"`
Summary []openaiReasoningSummaryPart `json:"summary,omitempty"`

// tools (type="function_call")
Name string `json:"name,omitempty"`
Expand Down Expand Up @@ -320,10 +361,11 @@ const (
)

type openaiBlockState struct {
kind openaiBlockKind
localID string // For SSE streaming to UI
toolCallID string // For function calls
toolName string // For function calls
kind openaiBlockKind
localID string // For SSE streaming to UI
toolCallID string // For function calls
toolName string // For function calls
summaryCount int // For reasoning: number of summary parts seen
}

type openaiStreamingState struct {
Expand Down Expand Up @@ -635,11 +677,12 @@ func handleOpenAIEvent(

switch ev.Item.Type {
case "reasoning":
// Handle reasoning item for UI streaming
// Create reasoning block - emit start immediately
id := uuid.New().String()
state.blockMap[ev.Item.Id] = &openaiBlockState{
kind: openaiBlockReasoning,
localID: id,
kind: openaiBlockReasoning,
localID: id,
summaryCount: 0,
}
_ = sse.AiMsgReasoningStart(id)
case "message":
Expand Down Expand Up @@ -836,6 +879,40 @@ func handleOpenAIEvent(
case "response.output_text.annotation.added":
return nil, nil

case "response.reasoning_summary_part.added":
var ev openaiResponseReasoningSummaryPartAddedEvent
if err := json.Unmarshal([]byte(data), &ev); err != nil {
_ = sse.AiMsgError(err.Error())
return &uctypes.WaveStopReason{Kind: uctypes.StopKindError, ErrorType: "decode", ErrorText: err.Error()}, nil
}

if st := state.blockMap[ev.ItemId]; st != nil && st.kind == openaiBlockReasoning {
if st.summaryCount > 0 {
// Not the first summary part, emit separator
_ = sse.AiMsgReasoningDelta(st.localID, "\n\n")
}
st.summaryCount++
}
return nil, nil

case "response.reasoning_summary_part.done":
return nil, nil

case "response.reasoning_summary_text.delta":
var ev openaiResponseReasoningSummaryTextDeltaEvent
if err := json.Unmarshal([]byte(data), &ev); err != nil {
_ = sse.AiMsgError(err.Error())
return &uctypes.WaveStopReason{Kind: uctypes.StopKindError, ErrorType: "decode", ErrorText: err.Error()}, nil
}

if st := state.blockMap[ev.ItemId]; st != nil && st.kind == openaiBlockReasoning {
_ = sse.AiMsgReasoningDelta(st.localID, ev.Delta)
}
return nil, nil

case "response.reasoning_summary_text.done":
return nil, nil

default:
logutil.DevPrintf("OpenAI: unknown event: %s, data: %s", eventName, data)
return nil, nil
Expand Down
4 changes: 4 additions & 0 deletions pkg/aiusechat/openai/openai-convertmessage.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,7 @@ func debugPrintReq(req *OpenAIRequest, endpoint string) {
if len(toolNames) > 0 {
log.Printf("tools: %s\n", strings.Join(toolNames, ","))
}
// log.Printf("reasoning %v\n", req.Reasoning)

log.Printf("inputs (%d):", len(req.Input))
for idx, input := range req.Input {
Expand Down Expand Up @@ -234,6 +235,9 @@ func buildOpenAIHTTPRequest(ctx context.Context, inputs []any, chatOpts uctypes.
reqBody.Reasoning = &ReasoningType{
Effort: opts.ThinkingLevel, // low, medium, high map directly
}
if opts.Model == "gpt-5" {
reqBody.Reasoning.Summary = "auto"
}
Comment on lines +238 to +240
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Broaden the gpt‑5 model match before enabling auto summary.

OpenAI’s released model IDs almost always ship with suffixed variants (e.g., gpt-5.1, gpt-5.1-mini). With the strict equality check, those common IDs won’t request the reasoning summary, so the UI never receives the new reasoning deltas. Please loosen the check (e.g., strings.HasPrefix(opts.Model, "gpt-5")) or otherwise cover the known variants so the feature is exercised across the gpt-5 family.

-		if opts.Model == "gpt-5" {
+		if strings.HasPrefix(opts.Model, "gpt-5") {
 			reqBody.Reasoning.Summary = "auto"
 		}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
if opts.Model == "gpt-5" {
reqBody.Reasoning.Summary = "auto"
}
if strings.HasPrefix(opts.Model, "gpt-5") {
reqBody.Reasoning.Summary = "auto"
}
🤖 Prompt for AI Agents
In pkg/aiusechat/openai/openai-convertmessage.go around lines 238 to 240, the
code currently enables auto reasoning summary only when opts.Model == "gpt-5",
which misses common suffixed variants; change the check to detect model name
prefixes (e.g., use strings.HasPrefix(opts.Model, "gpt-5")) so any gpt-5.*
variant will enable reqBody.Reasoning.Summary = "auto", and ensure the strings
package is imported if not already.

}

// Set temperature if provided
Expand Down
8 changes: 5 additions & 3 deletions pkg/wcloud/wcloud.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ func sendTEvents(clientId string) (int, error) {
return totalEvents, nil
}

func SendAllTelemetry(ctx context.Context, clientId string) error {
func SendAllTelemetry(clientId string) error {
defer func() {
ctx, cancelFn := context.WithTimeout(context.Background(), 2*time.Second)
defer cancelFn()
Expand All @@ -225,14 +225,16 @@ func SendAllTelemetry(ctx context.Context, clientId string) error {
if err != nil {
return err
}
err = sendTelemetry(ctx, clientId)
err = sendTelemetry(clientId)
if err != nil {
return err
}
return nil
}

func sendTelemetry(ctx context.Context, clientId string) error {
func sendTelemetry(clientId string) error {
ctx, cancelFn := context.WithTimeout(context.Background(), WCloudDefaultTimeout)
defer cancelFn()
activity, err := telemetry.GetNonUploadedActivity(ctx)
if err != nil {
return fmt.Errorf("cannot get activity: %v", err)
Expand Down
4 changes: 2 additions & 2 deletions pkg/wshrpc/wshserver/wshserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -899,7 +899,7 @@ func (ws WshServer) SendTelemetryCommand(ctx context.Context) error {
if err != nil {
return fmt.Errorf("getting client data for telemetry: %v", err)
}
return wcloud.SendAllTelemetry(ctx, client.OID)
return wcloud.SendAllTelemetry(client.OID)
}

func (ws *WshServer) WaveAIEnableTelemetryCommand(ctx context.Context) error {
Expand Down Expand Up @@ -936,7 +936,7 @@ func (ws *WshServer) WaveAIEnableTelemetryCommand(ctx context.Context) error {
}

// Immediately send telemetry to cloud
err = wcloud.SendAllTelemetry(ctx, client.OID)
err = wcloud.SendAllTelemetry(client.OID)
if err != nil {
log.Printf("error sending telemetry after enabling: %v", err)
}
Expand Down
Loading