From ffa39a93788846bc3e7fe43dc764e53e2564c031 Mon Sep 17 00:00:00 2001 From: openhands Date: Mon, 3 Nov 2025 15:06:37 +0000 Subject: [PATCH 1/7] Add SDK persistence directory structure documentation and move to top-level - Add information about persistence directory containing JSON files similar to trajectory.json from V0 - Move Persistence guide from Conversation Features to top-level in SDK Guides - Include directory structure example showing how conversations are stored --- docs.json | 2 +- sdk/guides/convo-persistence.mdx | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/docs.json b/docs.json index 586529e7..30b5ff77 100644 --- a/docs.json +++ b/docs.json @@ -182,6 +182,7 @@ "sdk/guides/custom-tools", "sdk/guides/mcp", "sdk/guides/skill", + "sdk/guides/convo-persistence", "sdk/guides/context-condenser", "sdk/guides/agent-delegation", "sdk/guides/security", @@ -208,7 +209,6 @@ { "group": "Conversation Features", "pages": [ - "sdk/guides/convo-persistence", "sdk/guides/convo-pause-and-resume", "sdk/guides/convo-send-message-while-running", "sdk/guides/convo-async" diff --git a/sdk/guides/convo-persistence.mdx b/sdk/guides/convo-persistence.mdx index b126c121..831bc73c 100644 --- a/sdk/guides/convo-persistence.mdx +++ b/sdk/guides/convo-persistence.mdx @@ -176,6 +176,20 @@ The conversation state includes comprehensive information that allows seamless r For the complete implementation details, see the [ConversationState class](https://github.com/OpenHands/software-agent-sdk/blob/main/openhands-sdk/openhands/sdk/conversation/state.py) in the source code. +### Persistence Directory Structure + +When you set a `persistence_dir`, your conversation will be persisted to a directory containing JSON files that mirror the `trajectory.json` file structure from OpenHands V0. Each conversation is stored as a separate JSON file named with its conversation ID. + +**Directory structure:** +``` +.conversations/ +├── .json +├── .json +└── ... +``` + +Each JSON file contains the serialized conversation state with all events, messages, and metadata - similar to how `trajectory.json` worked in V0. This makes it easy to inspect, debug, or migrate conversation data between systems. + ## Next Steps - **[Pause and Resume](/sdk/guides/convo-pause-and-resume)** - Control execution flow From a2dcfc15b659b7a922ca7ef20372f6f16565166c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 3 Nov 2025 15:07:37 +0000 Subject: [PATCH 2/7] docs: sync code blocks from agent-sdk examples Synced from agent-sdk ref: main --- sdk/getting-started.mdx | 4 +++ sdk/guides/agent-custom.mdx | 4 +++ sdk/guides/agent-server/docker-sandbox.mdx | 4 +++ sdk/guides/agent-server/local-server.mdx | 4 +++ sdk/guides/agent-stuck-detector.mdx | 4 +++ sdk/guides/context-condenser.mdx | 4 +++ sdk/guides/convo-async.mdx | 4 +++ sdk/guides/convo-pause-and-resume.mdx | 4 +++ sdk/guides/convo-persistence.mdx | 4 +++ .../convo-send-message-while-running.mdx | 4 +++ sdk/guides/custom-tools.mdx | 4 +++ sdk/guides/hello-world.mdx | 4 +++ sdk/guides/llm-image-input.mdx | 4 +++ sdk/guides/llm-reasoning.mdx | 10 +++++++- sdk/guides/llm-registry.mdx | 17 +++++++------ sdk/guides/llm-routing.mdx | 4 +++ sdk/guides/mcp.mdx | 4 +++ sdk/guides/metrics.mdx | 25 +++++++++++++------ sdk/guides/secrets.mdx | 4 +++ sdk/guides/skill.mdx | 4 +++ 20 files changed, 103 insertions(+), 17 deletions(-) diff --git a/sdk/getting-started.mdx b/sdk/getting-started.mdx index 23651fcc..aa60f225 100644 --- a/sdk/getting-started.mdx +++ b/sdk/getting-started.mdx @@ -101,6 +101,10 @@ conversation = Conversation(agent=agent, workspace=cwd) # Send a message and let the agent run conversation.send_message("Write 3 facts about the current project into FACTS.txt.") conversation.run() + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` Run the example: diff --git a/sdk/guides/agent-custom.mdx b/sdk/guides/agent-custom.mdx index ff50bcf5..8136a780 100644 --- a/sdk/guides/agent-custom.mdx +++ b/sdk/guides/agent-custom.mdx @@ -146,6 +146,10 @@ print("\nCreated files:") for file_path in workspace_dir.rglob("*"): if file_path.is_file(): print(f" - {file_path.relative_to(workspace_dir)}") + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example diff --git a/sdk/guides/agent-server/docker-sandbox.mdx b/sdk/guides/agent-server/docker-sandbox.mdx index 34472da4..6e2d53d5 100644 --- a/sdk/guides/agent-server/docker-sandbox.mdx +++ b/sdk/guides/agent-server/docker-sandbox.mdx @@ -121,6 +121,10 @@ with DockerWorkspace( conversation.send_message("Great! Now delete that file.") conversation.run() logger.info("✅ Second task completed!") + + # Report cost (must be before conversation.close()) + cost = conversation.conversation_stats.get_combined_metrics().accumulated_cost + print(f"EXAMPLE_COST: {cost}") finally: print("\n🧹 Cleaning up conversation...") conversation.close() diff --git a/sdk/guides/agent-server/local-server.mdx b/sdk/guides/agent-server/local-server.mdx index 99bc73b1..00cbc26e 100644 --- a/sdk/guides/agent-server/local-server.mdx +++ b/sdk/guides/agent-server/local-server.mdx @@ -253,6 +253,10 @@ with ManagedAPIServer(port=8001) as server: if isinstance(event, ConversationStateUpdateEvent): logger.info(f" - {event}") + # Report cost (must be before conversation.close()) + cost = conversation.conversation_stats.get_combined_metrics().accumulated_cost + print(f"EXAMPLE_COST: {cost}") + finally: # Clean up print("\n🧹 Cleaning up conversation...") diff --git a/sdk/guides/agent-stuck-detector.mdx b/sdk/guides/agent-stuck-detector.mdx index 5a02f7ce..1f268576 100644 --- a/sdk/guides/agent-stuck-detector.mdx +++ b/sdk/guides/agent-stuck-detector.mdx @@ -85,6 +85,10 @@ print("=" * 100) print("Conversation finished. Got the following LLM messages:") for i, message in enumerate(llm_messages): print(f"Message {i}: {str(message)[:200]}") + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example diff --git a/sdk/guides/context-condenser.mdx b/sdk/guides/context-condenser.mdx index f25d388f..d0a9f978 100644 --- a/sdk/guides/context-condenser.mdx +++ b/sdk/guides/context-condenser.mdx @@ -194,6 +194,10 @@ print(f"Total LLM messages collected: {len(llm_messages)}") print("\nThe condenser automatically summarized older conversation history") print("when the conversation exceeded the configured max_size threshold.") print("This helps manage context length while preserving important information.") + +# Report cost +cost = conversation.conversation_stats.get_combined_metrics().accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example diff --git a/sdk/guides/convo-async.mdx b/sdk/guides/convo-async.mdx index 82b0a5d1..bf077cd6 100644 --- a/sdk/guides/convo-async.mdx +++ b/sdk/guides/convo-async.mdx @@ -104,6 +104,10 @@ async def main(): for i, message in enumerate(llm_messages): print(f"Message {i}: {str(message)[:200]}") + # Report cost + cost = llm.metrics.accumulated_cost + print(f"EXAMPLE_COST: {cost}") + if __name__ == "__main__": asyncio.run(main()) diff --git a/sdk/guides/convo-pause-and-resume.mdx b/sdk/guides/convo-pause-and-resume.mdx index d6462177..dd84e86c 100644 --- a/sdk/guides/convo-pause-and-resume.mdx +++ b/sdk/guides/convo-pause-and-resume.mdx @@ -105,6 +105,10 @@ print(f"Status before resume: {conversation.state.agent_status}") conversation.run() print(f"Final status: {conversation.state.agent_status}") + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example diff --git a/sdk/guides/convo-persistence.mdx b/sdk/guides/convo-persistence.mdx index 831bc73c..7f2a7c0d 100644 --- a/sdk/guides/convo-persistence.mdx +++ b/sdk/guides/convo-persistence.mdx @@ -110,6 +110,10 @@ conversation = Conversation( print("Sending message to deserialized conversation...") conversation.send_message("Hey what did you create? Return an agent finish action") conversation.run() + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example diff --git a/sdk/guides/convo-send-message-while-running.mdx b/sdk/guides/convo-send-message-while-running.mdx index ef32f817..a3613645 100644 --- a/sdk/guides/convo-send-message-while-running.mdx +++ b/sdk/guides/convo-send-message-while-running.mdx @@ -152,6 +152,10 @@ if os.path.exists(document_path): os.remove(document_path) else: print("WARNING: Document.txt was not created") + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example diff --git a/sdk/guides/custom-tools.mdx b/sdk/guides/custom-tools.mdx index a78e5492..6ca26ff6 100644 --- a/sdk/guides/custom-tools.mdx +++ b/sdk/guides/custom-tools.mdx @@ -228,6 +228,10 @@ print("=" * 100) print("Conversation finished. Got the following LLM messages:") for i, message in enumerate(llm_messages): print(f"Message {i}: {str(message)[:200]}") + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example diff --git a/sdk/guides/hello-world.mdx b/sdk/guides/hello-world.mdx index 7e13f53f..1b1a39db 100644 --- a/sdk/guides/hello-world.mdx +++ b/sdk/guides/hello-world.mdx @@ -39,6 +39,10 @@ conversation = Conversation(agent=agent, workspace=cwd) # Send a message and let the agent run conversation.send_message("Write 3 facts about the current project into FACTS.txt.") conversation.run() + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example diff --git a/sdk/guides/llm-image-input.mdx b/sdk/guides/llm-image-input.mdx index 034e15da..c048f688 100644 --- a/sdk/guides/llm-image-input.mdx +++ b/sdk/guides/llm-image-input.mdx @@ -110,6 +110,10 @@ print("=" * 100) print("Conversation finished. Got the following LLM messages:") for i, message in enumerate(llm_messages): print(f"Message {i}: {str(message)[:200]}") + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example diff --git a/sdk/guides/llm-reasoning.mdx b/sdk/guides/llm-reasoning.mdx index 7fc3f7bd..e59c3bf5 100644 --- a/sdk/guides/llm-reasoning.mdx +++ b/sdk/guides/llm-reasoning.mdx @@ -82,6 +82,10 @@ conversation.send_message( ) conversation.run() print("✅ Done!") + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example @@ -157,7 +161,7 @@ logger = get_logger(__name__) api_key = os.getenv("LLM_API_KEY") or os.getenv("OPENAI_API_KEY") assert api_key, "Set LLM_API_KEY or OPENAI_API_KEY in your environment." -model = os.getenv("LLM_MODEL", "openhands/gpt-5-codex") +model = "openhands/gpt-5-mini-2025-08-07" # Use a model that supports Responses API base_url = os.getenv("LLM_BASE_URL") llm = LLM( @@ -203,6 +207,10 @@ print("Conversation finished. Got the following LLM messages:") for i, message in enumerate(llm_messages): ms = str(message) print(f"Message {i}: {ms[:200]}{'...' if len(ms) > 200 else ''}") + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example diff --git a/sdk/guides/llm-registry.mdx b/sdk/guides/llm-registry.mdx index d7f417f9..0d5c8af2 100644 --- a/sdk/guides/llm-registry.mdx +++ b/sdk/guides/llm-registry.mdx @@ -88,18 +88,19 @@ same_llm = llm_registry.get("agent") print(f"Same LLM instance: {llm is same_llm}") # Demonstrate requesting a completion directly from an LLM -completion_response = llm.completion( +resp = llm.completion( messages=[ Message(role="user", content=[TextContent(text="Say hello in one word.")]) ] ) -# Access the response content -raw_response = completion_response.raw_response -if raw_response.choices and raw_response.choices[0].message: # type: ignore - content = raw_response.choices[0].message.content # type: ignore - print(f"Direct completion response: {content}") -else: - print("No response content available") +# Access the response content via OpenHands LLMResponse +msg = resp.message +texts = [c.text for c in msg.content if isinstance(c, TextContent)] +print(f"Direct completion response: {texts[0] if texts else str(msg)}") + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example diff --git a/sdk/guides/llm-routing.mdx b/sdk/guides/llm-routing.mdx index 57a959bc..78779b9c 100644 --- a/sdk/guides/llm-routing.mdx +++ b/sdk/guides/llm-routing.mdx @@ -108,6 +108,10 @@ print("=" * 100) print("Conversation finished. Got the following LLM messages:") for i, message in enumerate(llm_messages): print(f"Message {i}: {str(message)[:200]}") + +# Report cost +cost = conversation.conversation_stats.get_combined_metrics().accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example diff --git a/sdk/guides/mcp.mdx b/sdk/guides/mcp.mdx index 77a12dc4..a2fe824a 100644 --- a/sdk/guides/mcp.mdx +++ b/sdk/guides/mcp.mdx @@ -101,6 +101,10 @@ print("=" * 100) print("Conversation finished. Got the following LLM messages:") for i, message in enumerate(llm_messages): print(f"Message {i}: {str(message)[:200]}") + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example diff --git a/sdk/guides/metrics.mdx b/sdk/guides/metrics.mdx index 2023d718..895a450f 100644 --- a/sdk/guides/metrics.mdx +++ b/sdk/guides/metrics.mdx @@ -97,6 +97,10 @@ assert llm.metrics is not None print( f"Conversation finished. Final LLM metrics with details: {llm.metrics.model_dump()}" ) + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example @@ -219,18 +223,19 @@ same_llm = llm_registry.get("agent") print(f"Same LLM instance: {llm is same_llm}") # Demonstrate requesting a completion directly from an LLM -completion_response = llm.completion( +resp = llm.completion( messages=[ Message(role="user", content=[TextContent(text="Say hello in one word.")]) ] ) -# Access the response content -raw_response = completion_response.raw_response -if raw_response.choices and raw_response.choices[0].message: # type: ignore - content = raw_response.choices[0].message.content # type: ignore - print(f"Direct completion response: {content}") -else: - print("No response content available") +# Access the response content via OpenHands LLMResponse +msg = resp.message +texts = [c.text for c in msg.content if isinstance(c, TextContent)] +print(f"Direct completion response: {texts[0] if texts else str(msg)}") + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example @@ -377,6 +382,10 @@ print( tablefmt="github", ) ) + +# Report cost +cost = conversation.conversation_stats.get_combined_metrics().accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example diff --git a/sdk/guides/secrets.mdx b/sdk/guides/secrets.mdx index e2185463..77ade9c0 100644 --- a/sdk/guides/secrets.mdx +++ b/sdk/guides/secrets.mdx @@ -66,6 +66,10 @@ conversation.run() conversation.send_message("just echo $SECRET_FUNCTION_TOKEN") conversation.run() + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example diff --git a/sdk/guides/skill.mdx b/sdk/guides/skill.mdx index 33e0a485..f1ec4bc1 100644 --- a/sdk/guides/skill.mdx +++ b/sdk/guides/skill.mdx @@ -116,6 +116,10 @@ print("=" * 100) print("Conversation finished. Got the following LLM messages:") for i, message in enumerate(llm_messages): print(f"Message {i}: {str(message)[:200]}") + +# Report cost +cost = llm.metrics.accumulated_cost +print(f"EXAMPLE_COST: {cost}") ``` ```bash Running the Example From 9c38acf721a652cd943ac0e85c05f44e14b965ab Mon Sep 17 00:00:00 2001 From: openhands Date: Mon, 3 Nov 2025 15:12:46 +0000 Subject: [PATCH 3/7] Update persistence directory structure to show subdirectories per conversation - Clarify structure is .conversations// with each conversation in its own subdirectory - Update directory tree to show state.json files within conversation subdirectories --- sdk/guides/convo-persistence.mdx | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/sdk/guides/convo-persistence.mdx b/sdk/guides/convo-persistence.mdx index 7f2a7c0d..265a453e 100644 --- a/sdk/guides/convo-persistence.mdx +++ b/sdk/guides/convo-persistence.mdx @@ -182,17 +182,19 @@ For the complete implementation details, see the [ConversationState class](https ### Persistence Directory Structure -When you set a `persistence_dir`, your conversation will be persisted to a directory containing JSON files that mirror the `trajectory.json` file structure from OpenHands V0. Each conversation is stored as a separate JSON file named with its conversation ID. +When you set a `persistence_dir`, your conversation will be persisted to a directory containing JSON files that mirror the `trajectory.json` file structure from OpenHands V0. The structure follows the pattern `.conversations//`, where each conversation has its own subdirectory. **Directory structure:** ``` .conversations/ -├── .json -├── .json +├── / +│ └── state.json +├── / +│ └── state.json └── ... ``` -Each JSON file contains the serialized conversation state with all events, messages, and metadata - similar to how `trajectory.json` worked in V0. This makes it easy to inspect, debug, or migrate conversation data between systems. +Each `state.json` file contains the serialized conversation state with all events, messages, and metadata - similar to how `trajectory.json` worked in V0. This makes it easy to inspect, debug, or migrate conversation data between systems. ## Next Steps From 101b9e8c88bf8bc361d3effdb679c26931a6e6f1 Mon Sep 17 00:00:00 2001 From: all-hands-bot Date: Mon, 3 Nov 2025 15:13:25 +0000 Subject: [PATCH 4/7] sync(openapi): agent-sdk/main eb2ca5d --- openapi/agent-sdk.json | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/openapi/agent-sdk.json b/openapi/agent-sdk.json index 88682d80..b1e65f8f 100644 --- a/openapi/agent-sdk.json +++ b/openapi/agent-sdk.json @@ -2084,7 +2084,7 @@ "llm_response_id": { "type": "string", "title": "Llm Response Id", - "description": "Groups related actions from same LLM response. This helps in tracking and managing results of parallel function calling from the same LLM response." + "description": "Completion or Response ID of the LLM response that generated this eventE.g., Can be used to group related actions from same LLM response. This helps in tracking and managing results of parallel function calling from the same LLM response." }, "security_risk": { "$ref": "#/components/schemas/SecurityRisk", @@ -3059,10 +3059,18 @@ ], "title": "Summary Offset", "description": "An optional offset to the start of the resulting view indicating where the summary should be inserted." + }, + "llm_response_id": { + "type": "string", + "title": "Llm Response Id", + "description": "Completion or Response ID of the LLM response that generated this event" } }, "additionalProperties": false, "type": "object", + "required": [ + "llm_response_id" + ], "title": "Condensation", "description": "This action indicates a condensation of the conversation history is happening." }, @@ -3241,9 +3249,9 @@ "$ref": "#/components/schemas/ConversationStats-Output", "description": "Conversation statistics for tracking LLM metrics" }, - "secrets_manager": { - "$ref": "#/components/schemas/SecretsManager-Output", - "description": "Manager for handling secrets and sensitive data" + "secret_registry": { + "$ref": "#/components/schemas/SecretRegistry-Output", + "description": "Registry for handling secrets and sensitive data" }, "title": { "anyOf": [ @@ -4801,6 +4809,18 @@ "$ref": "#/components/schemas/Message", "description": "The exact LLM message for this message event" }, + "llm_response_id": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Llm Response Id", + "description": "Completion or Response ID of the LLM response that generated this eventIf the source != 'agent', this field is None" + }, "activated_skills": { "items": { "type": "string" @@ -5279,7 +5299,7 @@ "title": "ResponseLatency", "description": "Metric tracking the round-trip time per completion call." }, - "SecretsManager-Output": { + "SecretRegistry-Output": { "properties": { "secret_sources": { "additionalProperties": { @@ -5304,8 +5324,8 @@ } }, "type": "object", - "title": "SecretsManager", - "description": "Manages secrets and injects them into bash commands when needed.\n\nThe secrets manager stores a mapping of secret keys to SecretSources\nthat retrieve the actual secret values. When a bash command is about to be\nexecuted, it scans the command for any secret keys and injects the corresponding\nenvironment variables.\n\nSecret sources will redact / encrypt their sensitive values as appropriate when\nserializing, depending on the content of the context. If a context is present\nand contains a 'cipher' object, this is used for encryption. If it contains a\nboolean 'expose_secrets' flag set to True, secrets are dunped in plain text.\nOtherwise secrets are redacted.\n\nAdditionally, it tracks the latest exported values to enable consistent masking\neven when callable secrets fail on subsequent calls." + "title": "SecretRegistry", + "description": "Manages secrets and injects them into bash commands when needed.\n\nThe secret registry stores a mapping of secret keys to SecretSources\nthat retrieve the actual secret values. When a bash command is about to be\nexecuted, it scans the command for any secret keys and injects the corresponding\nenvironment variables.\n\nSecret sources will redact / encrypt their sensitive values as appropriate when\nserializing, depending on the content of the context. If a context is present\nand contains a 'cipher' object, this is used for encryption. If it contains a\nboolean 'expose_secrets' flag set to True, secrets are dunped in plain text.\nOtherwise secrets are redacted.\n\nAdditionally, it tracks the latest exported values to enable consistent masking\neven when callable secrets fail on subsequent calls." }, "SecurityAnalyzerBase": { "properties": { From a9fedb52ef4b0c6b4199db3102516f3b8c0bfafa Mon Sep 17 00:00:00 2001 From: openhands Date: Mon, 3 Nov 2025 16:54:11 +0000 Subject: [PATCH 5/7] Fix persistence directory structure documentation - Update directory structure to show accurate file organization from SDK source - Change default path from .conversations/ to workspace/conversations/ - Show base_state.json (not state.json) as the main state file - Document events/ subdirectory with individual event files - Add event file naming pattern (event-{idx:05d}-{event-id}.json) - Clarify that events are stored separately for granular access Co-authored-by: openhands --- sdk/guides/convo-persistence.mdx | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/sdk/guides/convo-persistence.mdx b/sdk/guides/convo-persistence.mdx index 265a453e..8cca2fb9 100644 --- a/sdk/guides/convo-persistence.mdx +++ b/sdk/guides/convo-persistence.mdx @@ -182,19 +182,28 @@ For the complete implementation details, see the [ConversationState class](https ### Persistence Directory Structure -When you set a `persistence_dir`, your conversation will be persisted to a directory containing JSON files that mirror the `trajectory.json` file structure from OpenHands V0. The structure follows the pattern `.conversations//`, where each conversation has its own subdirectory. +When you set a `persistence_dir`, your conversation will be persisted to a directory structure where each conversation has its own subdirectory. By default, the persistence directory is `workspace/conversations/` (unless you specify a custom path). **Directory structure:** ``` -.conversations/ +workspace/conversations/ ├── / -│ └── state.json +│ ├── base_state.json # Base conversation state +│ └── events/ # Event files directory +│ ├── event-00000-.json +│ ├── event-00001-.json +│ └── ... ├── / -│ └── state.json -└── ... +│ ├── base_state.json +│ └── events/ +│ └── ... ``` -Each `state.json` file contains the serialized conversation state with all events, messages, and metadata - similar to how `trajectory.json` worked in V0. This makes it easy to inspect, debug, or migrate conversation data between systems. +Each conversation directory contains: +- **`base_state.json`**: The core conversation state including agent configuration, execution status, statistics, and metadata +- **`events/`**: A subdirectory containing individual event files, each named with a sequential index and event ID (e.g., `event-00000-abc123.json`) + +This structure provides granular access to conversation history, making it easy to inspect individual events, debug agent behavior, or process conversation data programmatically. ## Next Steps From 85ec7ac472bd4f308ac126ed089182bac2571159 Mon Sep 17 00:00:00 2001 From: openhands Date: Mon, 3 Nov 2025 16:55:31 +0000 Subject: [PATCH 6/7] Clarify relationship between events directory and trajectory.json - Explain that events/ directory contains the same data as trajectory.json from V0 - Highlight the benefit of splitting into individual files (performance, granular access) - Mention memory efficiency advantage Co-authored-by: openhands --- sdk/guides/convo-persistence.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/guides/convo-persistence.mdx b/sdk/guides/convo-persistence.mdx index 8cca2fb9..edaf5ded 100644 --- a/sdk/guides/convo-persistence.mdx +++ b/sdk/guides/convo-persistence.mdx @@ -203,7 +203,7 @@ Each conversation directory contains: - **`base_state.json`**: The core conversation state including agent configuration, execution status, statistics, and metadata - **`events/`**: A subdirectory containing individual event files, each named with a sequential index and event ID (e.g., `event-00000-abc123.json`) -This structure provides granular access to conversation history, making it easy to inspect individual events, debug agent behavior, or process conversation data programmatically. +The collection of event files in the `events/` directory represents the same trajectory data you would find in the `trajectory.json` file from OpenHands V0, but split into individual files for better performance and granular access. This structure makes it easy to inspect individual events, debug agent behavior, or process conversation data programmatically without loading the entire trajectory into memory. ## Next Steps From 6e9235e82b90f4a2ac34d7670f777899952329cf Mon Sep 17 00:00:00 2001 From: Xingyao Wang Date: Tue, 4 Nov 2025 01:26:59 +0800 Subject: [PATCH 7/7] Apply suggestion from @xingyaoww --- sdk/guides/convo-persistence.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/guides/convo-persistence.mdx b/sdk/guides/convo-persistence.mdx index edaf5ded..c7ec8c51 100644 --- a/sdk/guides/convo-persistence.mdx +++ b/sdk/guides/convo-persistence.mdx @@ -203,7 +203,7 @@ Each conversation directory contains: - **`base_state.json`**: The core conversation state including agent configuration, execution status, statistics, and metadata - **`events/`**: A subdirectory containing individual event files, each named with a sequential index and event ID (e.g., `event-00000-abc123.json`) -The collection of event files in the `events/` directory represents the same trajectory data you would find in the `trajectory.json` file from OpenHands V0, but split into individual files for better performance and granular access. This structure makes it easy to inspect individual events, debug agent behavior, or process conversation data programmatically without loading the entire trajectory into memory. +The collection of event files in the `events/` directory represents the same trajectory data you would find in the `trajectory.json` file from OpenHands V0, but split into individual files for better performance and granular access. ## Next Steps