Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
958a776
feat: add file tools and grep search functionality
minpeter Jan 7, 2026
72b9458
feat: 추가된 모든 파일 커밋
minpeter Jan 7, 2026
32225b3
Add option to show finish reason in stream renderer
minpeter Jan 7, 2026
6021ba3
Increase max output tokens and integrate message history for
minpeter Jan 7, 2026
6b90e28
Add MessageHistory class for managing chat message state
minpeter Jan 7, 2026
1cffbe2
Refactor agent implementation and file structure
minpeter Jan 7, 2026
10a54b4
Refactor command tool imports and restructure shell module
minpeter Jan 7, 2026
362cc15
Upgrade @ai-sdk/openai-compatible to v2.0.4 and update related
minpeter Jan 7, 2026
efb42fd
Refactor command handling to use new command system
minpeter Jan 7, 2026
49f87f3
Make registerCommand exported and add render command
minpeter Jan 7, 2026
627bdf1
Refactor project structure to use agent manager and modularize command
minpeter Jan 7, 2026
b18cad9
docs: update README with correct command /model and remove non-existe…
minpeter Jan 7, 2026
1b7230a
Implement clear command and update agent functionality
minpeter Jan 7, 2026
dd7256f
Update message history context with improved type safety and function…
minpeter Jan 7, 2026
a541ed7
Add unit tests for trimTrailingNewlines function and implement newline
minpeter Jan 7, 2026
cbcab5e
Add just-bash for shell command execution
minpeter Jan 7, 2026
1d1052f
Refactor tools structure and add tool execution approval
minpeter Jan 7, 2026
2eaa7af
Implement interactive code changes for chat interface and stream rend…
minpeter Jan 7, 2026
686d70d
Update message history context and environment configuration
minpeter Jan 7, 2026
4045f56
feat: add DEBUG_SHOW_FINISH_REASON environment variable and option\n\…
minpeter Jan 7, 2026
47f98e6
Update system prompt to reflect all available tools
minpeter Jan 7, 2026
d2e8508
Add additional changes from working directory
minpeter Jan 7, 2026
192dcb4
Add tool approval functionality and interaction handling
minpeter Jan 7, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 2 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -87,11 +87,7 @@ You: /help
Available commands:
/help - Show this help message
/clear - Clear current conversation
/save - Save current conversation
/load <id> - Load a saved conversation
/list - List all saved conversations
/delete <id> - Delete a saved conversation
/models - List and select available AI models
/model - Show and select available AI models
/render - Render conversation as raw prompt text
/quit - Exit the program

Expand Down Expand Up @@ -132,7 +128,7 @@ code-editing-agent/

## Model

Uses `LGAI-EXAONE/K-EXAONE-236B-A23B` via FriendliAI serverless endpoints by default. Use `/models` command to switch models.
Uses `LGAI-EXAONE/K-EXAONE-236B-A23B` via FriendliAI serverless endpoints by default. Use `/model` command to switch models.

## License

Expand Down
13 changes: 11 additions & 2 deletions bun.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
"node": ">=18"
},
"dependencies": {
"@ai-sdk/openai-compatible": "^2.0.4",
"@friendliai/ai-provider": "^1.0.3",
"@t3-oss/env-core": "^0.13.10",
"ai": "^6.0.7",
Expand Down
221 changes: 48 additions & 173 deletions src/agent.ts
Original file line number Diff line number Diff line change
@@ -1,187 +1,62 @@
import type { TextStreamPart } from "ai";
import {
type LanguageModel,
type ModelMessage,
stepCountIs,
streamText,
} from "ai";
import { createFriendli } from "@friendliai/ai-provider";
import type { ModelMessage } from "ai";
import { ToolLoopAgent, wrapLanguageModel } from "ai";
import { SYSTEM_PROMPT } from "./context/system-prompt";
import { env } from "./env";
import { SYSTEM_PROMPT } from "./prompts/system";
import type { tools } from "./tools/index";
import { tools as agentTools } from "./tools/index";
import {
printAIPrefix,
printChunk,
printNewline,
printReasoningChunk,
printReasoningEnd,
printReasoningPrefix,
printTool,
} from "./utils/colors";
import { withRetry } from "./utils/retry";

type StreamChunk = TextStreamPart<typeof tools>;

interface StreamState {
hasStartedText: boolean;
hasStartedReasoning: boolean;
}

function endReasoningIfNeeded(state: StreamState): void {
if (state.hasStartedReasoning) {
printReasoningEnd();
state.hasStartedReasoning = false;
}
}

function endTextIfNeeded(state: StreamState): void {
if (state.hasStartedText) {
printNewline();
state.hasStartedText = false;
}
}
import { trimLeadingNewlinesMiddleware } from "./middleware/trim-leading-newlines";
import { tools } from "./tools";

const DEFAULT_MODEL_ID = "Qwen/Qwen3-235B-A22B-Instruct-2507";
const OUTPUT_TOKEN_MAX = 32_000;

const friendli = createFriendli({
apiKey: env.FRIENDLI_TOKEN,
includeUsage: true,
});

const createAgent = (modelId: string) =>
new ToolLoopAgent({
model: wrapLanguageModel({
model: friendli(modelId),
middleware: trimLeadingNewlinesMiddleware,
}),
instructions: SYSTEM_PROMPT,
tools: {
...tools,
},
maxOutputTokens: OUTPUT_TOKEN_MAX,
providerOptions: {
friendli: {
chat_template_kwargs: {
enable_thinking: true,
},
},
},
});

function handleReasoningDelta(chunk: StreamChunk, state: StreamState): void {
if (chunk.type !== "reasoning-delta") {
return;
}
if (!state.hasStartedReasoning) {
printReasoningPrefix();
state.hasStartedReasoning = true;
}
printReasoningChunk(chunk.text);
}
class AgentManager {
private modelId: string = DEFAULT_MODEL_ID;

function handleTextDelta(chunk: StreamChunk, state: StreamState): void {
if (chunk.type !== "text-delta") {
return;
}
endReasoningIfNeeded(state);
if (!state.hasStartedText) {
printAIPrefix();
state.hasStartedText = true;
getModelId(): string {
return this.modelId;
}
printChunk(chunk.text);
}

function handleToolCall(chunk: StreamChunk, state: StreamState): void {
if (chunk.type !== "tool-call") {
return;
setModelId(modelId: string): void {
this.modelId = modelId;
}
endReasoningIfNeeded(state);
endTextIfNeeded(state);
printTool(chunk.toolName, chunk.input);
}

function logDebugChunk(chunk: StreamChunk, chunkCount: number): void {
const skipTypes = ["text-delta", "reasoning-delta", "tool-result"];
if (!skipTypes.includes(chunk.type)) {
console.log(`[DEBUG] #${chunkCount} type: ${chunk.type}`);
getInstructions(): string {
return SYSTEM_PROMPT;
}
}

function logDebugError(chunk: StreamChunk): void {
if (chunk.type === "error") {
console.log("[DEBUG] Error:", chunk.error);
getTools() {
return tools;
}
}

function logDebugFinish(chunk: StreamChunk): void {
if (chunk.type === "finish") {
console.log(`[DEBUG] Finish reason: ${chunk.finishReason}`);
stream(messages: ModelMessage[]) {
const agent = createAgent(this.modelId);
return agent.stream({ messages });
}
}

const DEFAULT_MAX_STEPS = 255;

export class Agent {
private model: LanguageModel;
private conversation: ModelMessage[] = [];
private readonly maxSteps: number;

constructor(model: LanguageModel, maxSteps = DEFAULT_MAX_STEPS) {
this.model = model;
this.maxSteps = maxSteps;
}

getModel(): LanguageModel {
return this.model;
}

setModel(model: LanguageModel): void {
this.model = model;
}

getConversation(): ModelMessage[] {
return [...this.conversation];
}

loadConversation(messages: ModelMessage[]): void {
this.conversation = [...messages];
}

clearConversation(): void {
this.conversation = [];
}

async chat(userInput: string): Promise<void> {
this.conversation.push({
role: "user",
content: userInput,
});

await withRetry(async () => {
await this.executeStreamingChat();
});
}

private async executeStreamingChat(): Promise<void> {
const result = streamText({
model: this.model,
system: SYSTEM_PROMPT,
messages: this.conversation,
tools: agentTools,
stopWhen: stepCountIs(this.maxSteps),
providerOptions: {
friendliai: {
// enable_thinking for hybrid reasoning models
chat_template_kwargs: {
enable_thinking: true,
},
},
},
});

const state: StreamState = {
hasStartedText: false,
hasStartedReasoning: false,
};

let chunkCount = 0;
const debug = env.DEBUG_CHUNK_LOG;

for await (const chunk of result.fullStream) {
chunkCount++;

if (debug) {
logDebugChunk(chunk, chunkCount);
logDebugError(chunk);
logDebugFinish(chunk);
}

handleReasoningDelta(chunk, state);
handleTextDelta(chunk, state);
handleToolCall(chunk, state);
}

endReasoningIfNeeded(state);
endTextIfNeeded(state);

const response = await result.response;
if (debug) {
console.log(`[DEBUG] Total chunks: ${chunkCount}`);
console.log(`[DEBUG] Response messages: ${response.messages.length}`);
}
this.conversation.push(...response.messages);
}
}
export const agentManager = new AgentManager();
16 changes: 16 additions & 0 deletions src/commands/clear.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import type { MessageHistory } from "../context/message-history";
import type { Command, CommandContext, CommandResult } from "./types";

export const createClearCommand = (
messageHistory: MessageHistory
): Command => ({
name: "clear",
description: "Clear current conversation history",
execute: (_context: CommandContext): CommandResult => {
messageHistory.clear();
return {
success: true,
message: "Conversation history cleared.",
};
},
});
18 changes: 18 additions & 0 deletions src/commands/help.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import type { Command, CommandResult } from "./types";

export const createHelpCommand = (
getCommands: () => Map<string, Command>
): Command => ({
name: "help",
description: "Show available commands",
execute: (): CommandResult => {
const commandList = Array.from(getCommands().values())
.map((cmd) => ` /${cmd.name} - ${cmd.description}`)
.join("\n");

return {
success: true,
message: `Available commands:\n${commandList}`,
};
},
});
Loading