-
Notifications
You must be signed in to change notification settings - Fork 0
revert(autoresearch): undo 6 defeaturing knob-cuts from PR #136 #141
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -164,12 +164,10 @@ def _read_brain_prompt(brain_dir: Path) -> str | None: | |
| result.append(line) | ||
| i += 1 | ||
| text = "\n".join(result) | ||
| # Strip lower-priority sections (Active guidance, Current disposition). | ||
| # Non-negotiables are the hardest constraints and are sufficient for session | ||
| # context; the guidance/disposition sections are ~140 tokens of softer context | ||
| # that the JIT hook covers per-prompt when relevant. Saves ~140 tok/session. | ||
| # Opt back in with GRADATA_WISDOM_FULL=1 for ablation. | ||
| if os.environ.get("GRADATA_WISDOM_FULL", "0") != "1": | ||
| # Active guidance + Current disposition sections kept by default — they | ||
| # carry softer behavioral context the model needs at session start. Set | ||
| # GRADATA_WISDOM_FULL=0 to strip them (ablation only). | ||
| if os.environ.get("GRADATA_WISDOM_FULL", "1") != "1": | ||
| for marker in ("Active guidance", "Current disposition"): | ||
| idx = text.find(marker) | ||
| if idx != -1: | ||
|
|
@@ -184,10 +182,7 @@ def _read_brain_prompt(brain_dir: Path) -> str | None: | |
| count=1, | ||
| ) | ||
| # Limit to first GRADATA_WISDOM_MAX_RULES non-negotiable rules. | ||
| # Reduced 11→9→6→3: keep only the top-3 "Never" attribution/data/booking rules | ||
| # which address the highest-stakes errors. Mid-tier rules fire via JIT when | ||
| # contextually relevant and are retrievable via brain.search(). Saves ~59 tok. | ||
| wisdom_max_rules = int(os.environ.get("GRADATA_WISDOM_MAX_RULES", "3")) | ||
| wisdom_max_rules = int(os.environ.get("GRADATA_WISDOM_MAX_RULES", "9")) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Harden Line 185 uses raw Proposed defensive parse + clamp- wisdom_max_rules = int(os.environ.get("GRADATA_WISDOM_MAX_RULES", "9"))
+ raw_max_rules = os.environ.get("GRADATA_WISDOM_MAX_RULES", "9").strip()
+ try:
+ wisdom_max_rules = max(0, int(raw_max_rules))
+ except ValueError:
+ wisdom_max_rules = 9Based on learnings: 🤖 Prompt for AI Agents |
||
| if wisdom_max_rules > 0: | ||
| rule_lines = [ln for ln in text.split("\n") if ln.startswith("- ")] | ||
| if len(rule_lines) > wisdom_max_rules: | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -443,7 +443,7 @@ def test_implicit_feedback_detects_negation(tmp_path, monkeypatch): | |
| monkeypatch.setenv("GRADATA_BRAIN_DIR", str(tmp_path)) | ||
| with patch("gradata.hooks.implicit_feedback.emit_hook_event") as mock_emit: | ||
| result = feedback_main({"message": "No, that's wrong. Do it differently."}) | ||
| assert result is None | ||
| assert result == {"result": "[fb:neg]"} | ||
| event_types = [call.args[0] for call in mock_emit.call_args_list] | ||
| assert "IMPLICIT_FEEDBACK" in event_types | ||
| signals = mock_emit.call_args_list[0].args[2]["signals"] | ||
|
|
@@ -454,7 +454,7 @@ def test_implicit_feedback_detects_reminder(tmp_path, monkeypatch): | |
| monkeypatch.setenv("GRADATA_BRAIN_DIR", str(tmp_path)) | ||
| with patch("gradata.hooks.implicit_feedback.emit_hook_event") as mock_emit: | ||
| result = feedback_main({"message": "I told you to always plan first before building."}) | ||
| assert result is None | ||
| assert result == {"result": "[fb:rem]"} | ||
| event_types = [call.args[0] for call in mock_emit.call_args_list] | ||
| assert "IMPLICIT_FEEDBACK" in event_types | ||
| signals = mock_emit.call_args_list[0].args[2]["signals"] | ||
|
|
@@ -465,7 +465,7 @@ def test_implicit_feedback_detects_challenge(tmp_path, monkeypatch): | |
| monkeypatch.setenv("GRADATA_BRAIN_DIR", str(tmp_path)) | ||
| with patch("gradata.hooks.implicit_feedback.emit_hook_event") as mock_emit: | ||
| result = feedback_main({"message": "Are you sure that's correct? It doesn't look right."}) | ||
| assert result is None | ||
| assert result is not None and "chal" in result["result"] | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧹 Nitpick | 🔵 Trivial Tighten these assertions to prevent false-positive passes. Line 468 and Line 486 currently allow broad outputs, so conflicting or extra tags can slip through unnoticed. Prefer exact expected payloads for deterministic regression checks. Suggested test tightening- assert result is not None and "chal" in result["result"]
+ assert result == {"result": "[fb:chal]"}
@@
- assert result is not None and result["result"].startswith("[fb:")
+ assert result == {"result": "[fb:rem,chal]"}Also applies to: 486-486 🤖 Prompt for AI Agents |
||
| event_types = [call.args[0] for call in mock_emit.call_args_list] | ||
| assert "IMPLICIT_FEEDBACK" in event_types | ||
| signals = mock_emit.call_args_list[0].args[2]["signals"] | ||
|
|
@@ -483,7 +483,7 @@ def test_implicit_feedback_emits_event(tmp_path): | |
| patch("gradata.hooks.implicit_feedback.emit_hook_event") as mock_emit, | ||
| ): | ||
| result = feedback_main({"message": "I told you not to do that, are you sure?"}) | ||
| assert result is None | ||
| assert result is not None and result["result"].startswith("[fb:") | ||
| event_types = [call.args[0] for call in mock_emit.call_args_list] | ||
| assert "IMPLICIT_FEEDBACK" in event_types | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Resolve conflicting
approval+ negative signals before emitting/returning feedback.Line 205 currently returns all detected signal types, but the current flow can classify a single message as both negative and approval (e.g., challenge phrasing containing “that’s correct”). That can emit
OUTPUT_ACCEPTED(Line 188) and also return negative feedback in the same turn, which is contradictory.Suggested fix
🤖 Prompt for AI Agents