Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 75 additions & 1 deletion Gradata/src/gradata/_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -1369,7 +1369,81 @@ def _cloud_sync_session(
)

except Exception as e:
_log.debug("Cloud sync failed (non-fatal): %s", e)
_log.warning("Cloud sync failed (non-fatal): %s", e, exc_info=True)


def cloud_sync_tick(brain_dir: str | Path, session_number: int) -> None:
"""Hook-safe cloud sync that doesn't require an instantiated Brain.

Reads lessons from lessons.md and session corrections from system.db,
then runs the same telemetry path as ``brain_end_session()``.

Called by the Stop hook so cloud sync actually fires from Claude Code
sessions — Claude Code never calls ``brain.end_session()`` directly.
Never raises.
"""
try:
import json as _json
import sqlite3
from pathlib import Path as _Path

bd = _Path(brain_dir)
if not bd.is_dir():
return

all_lessons: list[Lesson] = []
lessons_path = bd / "lessons.md"
if lessons_path.is_file():
try:
from gradata.enhancements.self_improvement._confidence import (
parse_lessons,
)

all_lessons = parse_lessons(lessons_path.read_text(encoding="utf-8"))
except Exception as e:
_log.debug("cloud_sync_tick: parse_lessons failed: %s", e)

session_corrections: list[dict] = []
db_path = bd / "system.db"
if db_path.is_file() and session_number:
try:
with sqlite3.connect(db_path) as conn:
rows = conn.execute(
"SELECT data_json FROM events WHERE type = 'CORRECTION' AND session = ?",
(session_number,),
).fetchall()
for (raw,) in rows:
try:
parsed = _json.loads(raw) if isinstance(raw, str) else raw
if isinstance(parsed, dict):
session_corrections.append(parsed)
except (TypeError, _json.JSONDecodeError):
continue
except sqlite3.Error as e:
_log.debug("cloud_sync_tick: db read failed: %s", e)

# _cloud_sync_session only reads `.dir` and `.db_path` from brain —
# a minimal stub lets us reuse the full telemetry/event path without
# paying the cost of a fresh Brain() with migrations + FTS init.
# `db_path` may not exist for a fresh brain that has only lessons.md;
# downstream `compute_metrics` already tolerates that with a None-path
# short-circuit, so we pass it through unchanged rather than guarding
# here. Sync still completes and `last_sync_at` still updates.
class _BrainStub:
def __init__(self, d: _Path, db: _Path) -> None:
self.dir = d
self.db_path = db

stub = _BrainStub(bd, db_path)
_cloud_sync_session(
stub, # type: ignore[arg-type]
session_number,
all_lessons,
session_corrections,
{},
)
except Exception as e:
_log.warning("cloud_sync_tick failed: %s", e, exc_info=True)


def _parse_toml_cloud(config_path: Path) -> dict:
Expand Down
79 changes: 79 additions & 0 deletions Gradata/src/gradata/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -1023,6 +1023,61 @@ def cmd_rule(args):
print(f"error: unknown rule subcommand: {sub}", file=sys.stderr)


def cmd_skill_export(args):
"""Export graduated rules as an Anthropic Claude Skill folder.

Produces ``<output-dir>/<slug>/SKILL.md`` ready to drop into
``.claude/skills/`` or any Skills-aware harness.
"""
from gradata.enhancements.skill_export import export_skill, write_skill

brain_root = _resolve_brain_root(args)
lessons_path: Path | None = None
try:
brain = _get_brain(args)
lessons_path = brain._find_lessons_path()
except Exception:
lessons_path = None
Comment on lines +1034 to +1040
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Resolve brain root once to avoid cross-brain exports

Line 1034 resolves brain_root via _resolve_brain_root() (default ./brain), but Lines 1037-1038 resolve lessons_path via _get_brain() (default cwd). That can export rules from one brain while loading meta-principles from another.

Suggested fix
 def cmd_skill_export(args):
@@
-    brain_root = _resolve_brain_root(args)
-    lessons_path: Path | None = None
-    try:
-        brain = _get_brain(args)
-        lessons_path = brain._find_lessons_path()
-    except Exception:
-        lessons_path = None
+    # Keep one canonical resolution path so rules + meta read from same brain.
+    lessons_path: Path | None = None
+    try:
+        brain = _get_brain(args)
+        brain_root = Path(brain.dir)
+        lessons_path = brain._find_lessons_path()
+    except Exception:
+        brain_root = Path(
+            env_str("GRADATA_BRAIN") or getattr(args, "brain_dir", None) or Path.cwd()
+        )
+        lessons_path = None

Based on learnings: cli.py brain resolution precedence should be GRADATA_BRAIN > --brain-dir > cwd.

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
brain_root = _resolve_brain_root(args)
lessons_path: Path | None = None
try:
brain = _get_brain(args)
lessons_path = brain._find_lessons_path()
except Exception:
lessons_path = None
# Keep one canonical resolution path so rules + meta read from same brain.
lessons_path: Path | None = None
try:
brain = _get_brain(args)
brain_root = Path(brain.dir)
lessons_path = brain._find_lessons_path()
except Exception:
brain_root = Path(
env_str("GRADATA_BRAIN") or getattr(args, "brain_dir", None) or Path.cwd()
)
lessons_path = None
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@Gradata/src/gradata/cli.py` around lines 1034 - 1040, Resolve and reuse the
same brain_root when constructing the Brain so lessons/meta don't come from a
different directory: call _resolve_brain_root(args) once (brain_root) and pass
that resolved path into the brain creation/lookup instead of letting
_get_brain() default to cwd; update the call around brain = _get_brain(args) to
use the resolved brain_root (or add a brain_root argument to _get_brain) and
then call lessons_path = brain._find_lessons_path() so precedence follows
GRADATA_BRAIN > --brain-dir > cwd consistently.


name = args.name.strip()
if not name:
print("error: skill name required", file=sys.stderr)
return

output_dir = getattr(args, "output_dir", None)
if output_dir:
skill_md = write_skill(
brain_root,
name=name,
output_dir=Path(output_dir),
description=getattr(args, "description", None),
category=getattr(args, "category", None),
include_meta=not getattr(args, "no_meta", False),
lessons_path=lessons_path,
)
print(f"Wrote skill to {skill_md}")
return

text = export_skill(
brain_root,
name=name,
description=getattr(args, "description", None),
category=getattr(args, "category", None),
include_meta=not getattr(args, "no_meta", False),
lessons_path=lessons_path,
)
print(text, end="")


def cmd_skill(args):
"""Dispatch `gradata skill <subcommand>`."""
sub = getattr(args, "skill_cmd", None)
if sub == "export":
cmd_skill_export(args)
else:
print(f"error: unknown skill subcommand: {sub}", file=sys.stderr)


def cmd_hooks(args):
"""Manage Claude Code hook integration."""
action = args.action
Expand Down Expand Up @@ -1239,6 +1294,29 @@ def main():
"--limit", type=int, default=500, help="Max events per page (1..1000)"
)

# skill — export graduated rules as an Anthropic Claude Skill folder
p_skill = sub.add_parser("skill", help="Export brain as a Claude Skill folder")
skill_sub = p_skill.add_subparsers(dest="skill_cmd", required=True)
p_skill_export = skill_sub.add_parser(
"export", help="Export graduated rules as a Claude Skill (SKILL.md)"
)
p_skill_export.add_argument("name", help="Skill name (becomes folder name + frontmatter name)")
p_skill_export.add_argument(
"--output-dir",
"-o",
help="Write Skill folder under this dir (default: print SKILL.md to stdout)",
)
p_skill_export.add_argument(
"--description",
help="Frontmatter description (default: auto-generated from rule categories)",
)
p_skill_export.add_argument("--category", help="Only include rules in this category")
p_skill_export.add_argument(
"--no-meta",
action="store_true",
help="Skip injectable meta-principles section",
)

# rule — user-declared rules (fast-track to RULE tier, try hook install)
p_rule = sub.add_parser("rule", help="Manage user-declared rules")
rule_sub = p_rule.add_subparsers(dest="rule_cmd", required=True)
Expand Down Expand Up @@ -1279,6 +1357,7 @@ def main():
commands["demo"] = cmd_demo
commands["hooks"] = cmd_hooks
commands["rule"] = cmd_rule
commands["skill"] = cmd_skill
commands["seed"] = cmd_seed
commands["mine"] = cmd_mine
commands["cloud"] = cmd_cloud
Expand Down
4 changes: 3 additions & 1 deletion Gradata/src/gradata/cloud/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,10 @@ def sync(self) -> dict:
return {"status": "not_connected"}

try:
# Backend route: POST /api/v1/sync (see cloud/app/routes/sync.py).
# DEFAULT_ENDPOINT already includes /api/v1 so we append /sync only.
return self._post(
"/brains/sync",
"/sync",
{
"brain_id": self._brain_id,
"manifest": self._read_local_manifest(),
Expand Down
20 changes: 15 additions & 5 deletions Gradata/src/gradata/cloud/sync.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,11 +201,16 @@ def _post(self, path: str, payload: dict, timeout: float = 10.0) -> dict | None:
with urllib.request.urlopen(req, timeout=timeout) as resp:
body = resp.read().decode()
return json.loads(body) if body else {}
except (urllib.error.URLError, urllib.error.HTTPError, OSError) as e:
log.debug("cloud POST %s failed: %s", path, e)
except urllib.error.HTTPError as e:
# Surface HTTP errors at WARNING — silent 4xx/5xx is how the
# 'last_sync never updates' bug hid for months.
log.warning("cloud POST %s failed: HTTP %s %s", path, e.code, e.reason)
return None
except (urllib.error.URLError, OSError) as e:
log.warning("cloud POST %s failed (network): %s", path, e)
return None
except json.JSONDecodeError:
log.debug("cloud response non-JSON for %s", path)
log.warning("cloud response non-JSON for %s", path)
return {}

def sync_metrics(self, payload: TelemetryPayload) -> bool:
Expand All @@ -215,7 +220,10 @@ def sync_metrics(self, payload: TelemetryPayload) -> bool:
"""
if not self.enabled:
return False
result = self._post("/telemetry/metrics", asdict(payload))
# Backend mounts the metrics router under /api/v1 (see
# cloud/app/main.py → app.include_router(router, prefix="/api/v1")
# and cloud/app/routes/metrics.py → @router.post("/telemetry/metrics")).
result = self._post("/api/v1/telemetry/metrics", asdict(payload))
Comment on lines +223 to +226
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Critical: Double /api/v1 prefix causes incorrect URL construction.

The api_base (default: https://api.gradata.ai/api/v1) already includes /api/v1, but the path /api/v1/telemetry/metrics also includes it. This results in:

https://api.gradata.ai/api/v1/api/v1/telemetry/metrics

The path should be /telemetry/metrics (without the /api/v1 prefix) since api_base already contains the version segment.

🐛 Proposed fix
-        # Backend mounts the metrics router under /api/v1 (see
-        # cloud/app/main.py → app.include_router(router, prefix="/api/v1")
-        # and cloud/app/routes/metrics.py → `@router.post`("/telemetry/metrics")).
-        result = self._post("/api/v1/telemetry/metrics", asdict(payload))
+        # Backend mounts the metrics router under /api/v1 (see
+        # cloud/app/main.py → app.include_router(router, prefix="/api/v1")
+        # and cloud/app/routes/metrics.py → `@router.post`("/telemetry/metrics")).
+        # api_base already includes /api/v1, so we only append the route path.
+        result = self._post("/telemetry/metrics", asdict(payload))
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
# Backend mounts the metrics router under /api/v1 (see
# cloud/app/main.py → app.include_router(router, prefix="/api/v1")
# and cloud/app/routes/metrics.py → @router.post("/telemetry/metrics")).
result = self._post("/api/v1/telemetry/metrics", asdict(payload))
# Backend mounts the metrics router under /api/v1 (see
# cloud/app/main.py → app.include_router(router, prefix="/api/v1")
# and cloud/app/routes/metrics.py → `@router.post`("/telemetry/metrics")).
# api_base already includes /api/v1, so we only append the route path.
result = self._post("/telemetry/metrics", asdict(payload))
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@Gradata/src/gradata/cloud/sync.py` around lines 223 - 226, The POST call in
sync.py is constructing a double "/api/v1" by calling
self._post("/api/v1/telemetry/metrics", asdict(payload)) even though api_base
already includes "/api/v1"; update the call to use the path "/telemetry/metrics"
(or otherwise join api_base and path without duplicating the version segment) so
the request URL becomes {api_base}/telemetry/metrics; change the invocation in
the method where result = self._post(..., asdict(payload)) accordingly and keep
using the same _post method and payload variable.

if result is not None:
self.config.last_sync_at = payload.sent_at
save_config(self.brain_dir, self.config)
Expand All @@ -231,7 +239,9 @@ def contribute_corpus(self, anonymized_patterns: list[dict]) -> bool:
"""
if not self.enabled or not self.config.contribute_corpus:
return False
result = self._post("/corpus/contribute", {"patterns": anonymized_patterns})
# Backend mounts the corpus router under /api/v1 (same prefix as
# telemetry — see cloud/app/main.py).
result = self._post("/api/v1/corpus/contribute", {"patterns": anonymized_patterns})
Comment on lines +242 to +244
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Same double-prefix issue for corpus contribution path.

🐛 Proposed fix
-        # Backend mounts the corpus router under /api/v1 (same prefix as
-        # telemetry — see cloud/app/main.py).
-        result = self._post("/api/v1/corpus/contribute", {"patterns": anonymized_patterns})
+        # Backend mounts the corpus router under /api/v1 (same prefix as
+        # telemetry — see cloud/app/main.py).
+        # api_base already includes /api/v1, so we only append the route path.
+        result = self._post("/corpus/contribute", {"patterns": anonymized_patterns})
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
# Backend mounts the corpus router under /api/v1 (same prefix as
# telemetry — see cloud/app/main.py).
result = self._post("/api/v1/corpus/contribute", {"patterns": anonymized_patterns})
# Backend mounts the corpus router under /api/v1 (same prefix as
# telemetry — see cloud/app/main.py).
# api_base already includes /api/v1, so we only append the route path.
result = self._post("/corpus/contribute", {"patterns": anonymized_patterns})
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@Gradata/src/gradata/cloud/sync.py` around lines 242 - 244, The call to
self._post is using a duplicated API prefix ("/api/v1/corpus/contribute") even
though the backend already mounts the router under /api/v1; change the endpoint
passed to self._post in the code that assigns result (the line calling
self._post("/api/v1/corpus/contribute", {"patterns": anonymized_patterns})) to
use the route without the "/api/v1" prefix (e.g. "/corpus/contribute") so the
request path is correct.

return result is not None


Expand Down
Loading
Loading