Skip to content

Commit 81d89f2

Browse files
bwlclaude
andcommitted
Add forest write and node synthesize commands
Two new LLM-powered content generation features: 1. forest write <topic> - Generate comprehensive articles (2000-3000 words) on any topic - Uses GPT-5 with configurable reasoning effort (low/medium/high) - Configurable verbosity (brief/medium/high) - Automatically saves as new node with auto-linking - Preview mode available with --preview flag - Supports --max-tokens to control output length 2. forest node synthesize <id1> <id2> [...] - Synthesize new article from 2+ existing notes - Reads all source nodes and generates coherent synthesis - Automatically creates connections to source material - Same configuration options as write command Both commands: - Support JSON output mode - Use OpenAI API (requires OPENAI_API_KEY) - Create properly tagged nodes with embeddings - Auto-link into the graph by default (can disable with --no-auto-link) - Show token usage and estimated cost Example usage: forest write "Knowledge graphs and semantic networks" forest write "Systems thinking" --reasoning high --verbosity high forest node synthesize abc123 def456 --preview 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
1 parent b179ccf commit 81d89f2

3 files changed

Lines changed: 693 additions & 0 deletions

File tree

src/cli/commands/write.ts

Lines changed: 187 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,187 @@
1+
import { writeArticleCore, WriteModel, WriteReasoningEffort, WriteVerbosity } from '../../core/write';
2+
import { createNodeCore } from '../../core/nodes';
3+
import { formatId, handleError } from '../shared/utils';
4+
import { COMMAND_TLDR, emitTldrAndExit } from '../tldr';
5+
6+
type ClercModule = typeof import('clerc');
7+
8+
type WriteFlags = {
9+
model?: string;
10+
reasoning?: string;
11+
verbosity?: string;
12+
preview?: boolean;
13+
autoLink?: boolean;
14+
maxTokens?: number;
15+
tldr?: string;
16+
};
17+
18+
export function createWriteCommand(clerc: ClercModule) {
19+
return clerc.defineCommand(
20+
{
21+
name: 'write',
22+
description: 'Use GPT-5 to write a comprehensive article on any topic',
23+
parameters: ['<topic>'],
24+
flags: {
25+
model: {
26+
type: String,
27+
description: 'Model to use: gpt-5, gpt-5-mini, or gpt-4o (default: gpt-5)',
28+
},
29+
reasoning: {
30+
type: String,
31+
description: 'Reasoning effort: minimal, low, medium, high (default: high)',
32+
},
33+
verbosity: {
34+
type: String,
35+
description: 'Output verbosity: low, medium, high (default: high)',
36+
},
37+
preview: {
38+
type: Boolean,
39+
description: 'Preview article without saving as a new node',
40+
},
41+
autoLink: {
42+
type: Boolean,
43+
description: 'Auto-link the new node to related nodes (default: true)',
44+
default: true,
45+
},
46+
maxTokens: {
47+
type: Number,
48+
description: 'Maximum output tokens (default: auto-set by verbosity - low:4096, medium:8192, high:16384)',
49+
},
50+
tldr: {
51+
type: String,
52+
description: 'Output command metadata for agent consumption (--tldr or --tldr=json)',
53+
},
54+
},
55+
},
56+
async ({ parameters, flags }: { parameters: { topic?: string }; flags: WriteFlags }) => {
57+
try {
58+
// Handle TLDR request first
59+
if (flags.tldr !== undefined) {
60+
const jsonMode = flags.tldr === 'json';
61+
emitTldrAndExit(COMMAND_TLDR.write, jsonMode);
62+
}
63+
await runWrite(parameters.topic, flags);
64+
} catch (error) {
65+
handleError(error);
66+
}
67+
},
68+
);
69+
}
70+
71+
async function runWrite(topic: string | undefined, flags: WriteFlags) {
72+
if (!topic || topic.trim().length === 0) {
73+
console.error('✖ Provide a topic to write about.');
74+
console.error(' Usage: forest write "topic description"');
75+
console.error(' Example: forest write "the role of mycorrhizal networks in forest ecology"');
76+
process.exitCode = 1;
77+
return;
78+
}
79+
80+
// Validate model and reasoning options
81+
const model = validateModel(flags.model);
82+
const reasoning = validateReasoning(flags.reasoning);
83+
const verbosity = validateVerbosity(flags.verbosity);
84+
85+
console.log('');
86+
console.log('Writing configuration:');
87+
console.log(` Topic: ${topic}`);
88+
console.log(` Model: ${model}`);
89+
console.log(` Reasoning: ${reasoning}`);
90+
console.log(` Verbosity: ${verbosity}`);
91+
console.log('');
92+
console.log('Calling OpenAI API to generate article...');
93+
console.log('(This may take 30-60 seconds for high reasoning)');
94+
console.log('');
95+
96+
// Call write core
97+
const result = await writeArticleCore(topic, {
98+
model,
99+
reasoning,
100+
verbosity,
101+
maxTokens: flags.maxTokens,
102+
});
103+
104+
// Display results
105+
console.log('='.repeat(80));
106+
console.log(`ARTICLE GENERATED`);
107+
console.log('='.repeat(80));
108+
console.log('');
109+
console.log(`Title: ${result.title}`);
110+
console.log('');
111+
console.log('Tags:', result.suggestedTags.join(', '));
112+
console.log('');
113+
console.log('Body Preview (first 500 chars):');
114+
console.log('-'.repeat(80));
115+
console.log(result.body.slice(0, 500) + (result.body.length > 500 ? '...' : ''));
116+
console.log('-'.repeat(80));
117+
console.log('');
118+
console.log('Metadata:');
119+
console.log(` Model: ${result.model}`);
120+
console.log(` Reasoning effort: ${result.reasoningEffort}`);
121+
console.log(` Verbosity: ${result.verbosity}`);
122+
console.log(` Tokens used: ${result.tokensUsed.reasoning} reasoning + ${result.tokensUsed.output} output`);
123+
console.log(` Estimated cost: $${result.cost.toFixed(4)}`);
124+
console.log(` Article length: ${result.body.length} characters, ~${Math.round(result.body.split(/\s+/).length)} words`);
125+
console.log('');
126+
127+
// If preview mode, stop here
128+
if (flags.preview) {
129+
console.log('Preview mode - article not saved.');
130+
console.log('');
131+
console.log('Full article:');
132+
console.log('='.repeat(80));
133+
console.log(result.body);
134+
console.log('='.repeat(80));
135+
return;
136+
}
137+
138+
// Save as new node
139+
console.log('Saving article as new node...');
140+
const autoLink = typeof flags.autoLink === 'boolean' ? flags.autoLink : true;
141+
142+
const nodeResult = await createNodeCore({
143+
title: result.title,
144+
body: result.body,
145+
tags: result.suggestedTags,
146+
autoLink,
147+
});
148+
149+
console.log('');
150+
console.log(`✔ Created article node: ${nodeResult.node.title}`);
151+
console.log(` id: ${formatId(nodeResult.node.id)}`);
152+
console.log(` tags: ${nodeResult.node.tags.join(', ')}`);
153+
if (autoLink) {
154+
console.log(` edges: ${nodeResult.linking.edgesCreated} accepted, ${nodeResult.linking.suggestionsCreated} suggested`);
155+
}
156+
console.log('');
157+
}
158+
159+
function validateModel(modelFlag: string | undefined): WriteModel {
160+
if (!modelFlag) return 'gpt-5';
161+
const normalized = modelFlag.toLowerCase();
162+
if (normalized === 'gpt-5' || normalized === 'gpt-5-mini' || normalized === 'gpt-4o') {
163+
return normalized as WriteModel;
164+
}
165+
console.error(`⚠ Invalid model "${modelFlag}", using default: gpt-5`);
166+
return 'gpt-5';
167+
}
168+
169+
function validateReasoning(reasoningFlag: string | undefined): WriteReasoningEffort {
170+
if (!reasoningFlag) return 'high'; // Default to high for quality articles
171+
const normalized = reasoningFlag.toLowerCase();
172+
if (['minimal', 'low', 'medium', 'high'].includes(normalized)) {
173+
return normalized as WriteReasoningEffort;
174+
}
175+
console.error(`⚠ Invalid reasoning effort "${reasoningFlag}", using default: high`);
176+
return 'high';
177+
}
178+
179+
function validateVerbosity(verbosityFlag: string | undefined): WriteVerbosity {
180+
if (!verbosityFlag) return 'high'; // Default to high for comprehensive articles
181+
const normalized = verbosityFlag.toLowerCase();
182+
if (['low', 'medium', 'high'].includes(normalized)) {
183+
return normalized as WriteVerbosity;
184+
}
185+
console.error(`⚠ Invalid verbosity "${verbosityFlag}", using default: high`);
186+
return 'high';
187+
}

0 commit comments

Comments
 (0)