Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CLAUDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ The API enforces that nobody marks their own work as done:
Ask these questions:

1. **Does this weaken chain-of-custody?** If a single session could now both implement and verify/report, the change is WRONG.
2. **Does this give agents destructive capabilities?** Destructive operations (DELETE, archive) must stay at `role: :user`. Agents and orchestrators should not be able to delete projects, budgets, or resolve anomalies via MCP tools.
2. **Does this give agents destructive capabilities?** ALL destructive operations (any DELETE, archive, budget/token corrections, cost anomaly resolution, tenant audit key rotation) must stay at `role: :user`. Constructive and metadata work-breakdown operations (create/update epics, stories, dependencies, imports, backfills for pre-loopctl work) are at `role: :orchestrator` so an autonomous orchestrator can compose a project and record state without human intervention. Agents (`role: :agent`) can never write work-breakdown data — only read it. The rule of thumb: if the operation REMOVES data, it requires `:user`.
3. **Does this collapse trust boundaries?** The role hierarchy exists so that agents can't self-promote. Lowering a role requirement is fine for read operations and for operations the role logically needs (orchestrators creating projects). It's wrong for operations that serve as a security gate.
4. **Does this affect RLS?** New tables must use `ENABLE ROW LEVEL SECURITY` (not `FORCE`) since the production role (`schema_admin`) is the table owner without BYPASSRLS.

Expand Down
92 changes: 87 additions & 5 deletions lib/loopctl/import_export.ex
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ defmodule Loopctl.ImportExport do
epic_deps_data = Map.get(data, "epic_dependencies", [])

with :ok <- validate_payload_structure(epics_data),
{:ok, epics_data} <- normalize_numbers(epics_data),
:ok <- validate_no_duplicate_numbers(epics_data),
:ok <- check_no_existing_conflicts(tenant_id, project_id, epics_data) do
execute_fresh_import(
Expand Down Expand Up @@ -108,6 +109,7 @@ defmodule Loopctl.ImportExport do
epic_deps_data = Map.get(data, "epic_dependencies", [])

with :ok <- validate_payload_structure(epics_data),
{:ok, epics_data} <- normalize_numbers(epics_data),
:ok <- validate_no_duplicate_numbers(epics_data) do
execute_merge_import(
tenant_id,
Expand Down Expand Up @@ -870,6 +872,53 @@ defmodule Loopctl.ImportExport do
"e.g. [{\"id\": \"AC-1\", \"description\": \"Feature works\"}]"}
end

# Normalizes epic numbers to integers and story numbers to strings so the
# merge lookup keys (pulled from DB rows) match the payload. Clients often
# serialize these as the "wrong" JSON type (integer epic numbers as strings,
# or string story numbers as integers) and the merge path used to silently
# treat those as "epic does not exist" -> insert -> unique constraint 422.
defp normalize_numbers(epics_data) do
epics_data
|> Enum.with_index()
|> Enum.reduce_while({:ok, []}, fn {epic, index}, {:ok, acc} ->
case normalize_epic_number(epic["number"]) do
{:ok, n} ->
stories = normalize_story_numbers(Map.get(epic, "stories", []))
normalized_epic = epic |> Map.put("number", n) |> Map.put("stories", stories)
{:cont, {:ok, [normalized_epic | acc]}}

:error ->
{:halt,
{:error, :validation,
"epics[#{index}].number: must be a positive integer (got #{inspect(epic["number"])})"}}
end
end)
|> case do
{:ok, rev} -> {:ok, Enum.reverse(rev)}
other -> other
end
end

defp normalize_epic_number(n) when is_integer(n) and n > 0, do: {:ok, n}

defp normalize_epic_number(n) when is_binary(n) do
case Integer.parse(n) do
{int, ""} when int > 0 -> {:ok, int}
_ -> :error
end
end

defp normalize_epic_number(_), do: :error

defp normalize_story_numbers(stories) when is_list(stories) do
Enum.map(stories, fn
%{"number" => n} = story when is_integer(n) -> Map.put(story, "number", to_string(n))
story -> story
end)
end

defp normalize_story_numbers(other), do: other

defp validate_no_duplicate_numbers(epics_data) do
# Check duplicate epic numbers
epic_numbers = Enum.map(epics_data, & &1["number"])
Expand Down Expand Up @@ -1428,12 +1477,45 @@ defmodule Loopctl.ImportExport do
end)
end)

error_details =
Enum.map_join(errors, "; ", fn {field, messages} ->
"#{path}.#{field}: #{Enum.join(messages, ", ")}"
end)
case translate_domain_error(changeset, path) do
nil -> format_raw_changeset_errors(errors, path)
domain_message -> domain_message
end
end

error_details
defp format_raw_changeset_errors(errors, path) do
Enum.map_join(errors, "; ", fn {field, messages} ->
"#{path}.#{field}: #{Enum.join(messages, ", ")}"
end)
end

# Converts opaque changeset errors into actionable domain messages.
# Ecto reports a composite unique_constraint violation on the FIRST field of
# the constraint (e.g. `tenant_id` for `[:tenant_id, :project_id, :number]`),
# which reads nonsensically to API clients. Translate those into "Epic N
# already exists -- use merge=true".
defp translate_domain_error(%Ecto.Changeset{data: %mod{}} = changeset, path)
when mod in [Epic, Story] do
if unique_number_violation?(changeset) do
number = Ecto.Changeset.get_field(changeset, :number)
entity = if mod == Epic, do: "Epic", else: "Story"

"#{path}: #{entity} #{number} already exists in this project. " <>
"Use `merge=true` to update the existing record or pick a new number."
end
end

defp translate_domain_error(_changeset, _path), do: nil

# Epic/Story unique_constraint is on (tenant_id, project_id, number), and
# Ecto auto-names the index with "_number_index". Match on that substring in
# `constraint_name` so future unique constraints on other fields don't
# trigger a misleading "X already exists" translation.
defp unique_number_violation?(changeset) do
Enum.any?(changeset.errors, fn {_field, {_msg, opts}} ->
Keyword.get(opts, :constraint) == :unique and
(Keyword.get(opts, :constraint_name) || "") |> String.contains?("number")
end)
end

defp handle_import_error(_step, reason) when is_binary(reason) do
Expand Down
263 changes: 263 additions & 0 deletions lib/loopctl/progress.ex
Original file line number Diff line number Diff line change
Expand Up @@ -951,6 +951,269 @@ defmodule Loopctl.Progress do
unwrap_verification_transaction(multi)
end

@doc """
Backfills a story's status when the work was completed outside loopctl.

Sets both `agent_status` and `verified_status` to fully-done values in one
shot, BUT only for stories that never entered loopctl's dispatch lifecycle
(`assigned_agent_id IS NULL` and `verified_status` not already `:verified`
or `:rejected`). This structural guard is what keeps backfill from being a
chain-of-custody shortcut: if a story was dispatched to an agent, the
normal report/review/verify flow must be used — NOT backfill.

Records a provenance marker in `metadata.backfill` and writes an audit
event with `action: "backfilled"` (source: `"pre_loopctl"`) so the trust
chain is legible: the work is marked done but explicitly labeled as
pre-existing rather than loopctl-verified. Emits `story.backfilled` on
the webhook channel.

## Parameters

* `tenant_id` — the tenant UUID
* `story_id` — the story UUID
* `params` — map with `reason` (required, string), optional `evidence_url`, `pr_number`
* `opts` — keyword list with `:actor_id`, `:actor_label`

## Returns

* `{:ok, %Story{}}` on success
* `{:error, :not_found}` — story not in tenant
* `{:error, :reason_required}` — `reason` missing or blank
* `{:error, :already_verified}` — story already `:verified` (idempotent no-op)
* `{:error, :story_rejected}` — story is `:rejected`; investigate instead of papering over
* `{:error, :story_has_dispatch_lineage}` — story has an `assigned_agent_id`; use the normal verify flow
* `{:error, %Ecto.Changeset{}}` — persistence error surfaced from Multi step
"""
@spec backfill_story(Ecto.UUID.t(), Ecto.UUID.t(), map(), keyword()) ::
{:ok, Story.t()}
| {:error,
:not_found
| :reason_required
| :reason_too_long
| :invalid_pr_number
| :invalid_evidence_url
| :evidence_url_too_long
| :already_verified
| :story_rejected
| :story_has_dispatch_lineage
| Ecto.Changeset.t()}
def backfill_story(tenant_id, story_id, params, opts \\ []) do
actor_id = Keyword.get(opts, :actor_id)
actor_label = Keyword.get(opts, :actor_label)
reason = params |> Map.get("reason") |> normalize_string()

with :ok <- validate_backfill_reason(reason),
{:ok, pr_number} <- cast_pr_number(Map.get(params, "pr_number")),
{:ok, evidence_url} <-
validate_evidence_url(params |> Map.get("evidence_url") |> normalize_string()) do
do_backfill(tenant_id, story_id, reason, evidence_url, pr_number, actor_id, actor_label)
end
end

defp validate_backfill_reason(nil), do: {:error, :reason_required}

defp validate_backfill_reason(reason) when byte_size(reason) > 2_000,
do: {:error, :reason_too_long}

defp validate_backfill_reason(_reason), do: :ok

defp cast_pr_number(nil), do: {:ok, nil}
defp cast_pr_number(n) when is_integer(n) and n > 0, do: {:ok, n}

defp cast_pr_number(n) when is_binary(n) do
case Integer.parse(n) do
{int, ""} when int > 0 -> {:ok, int}
_ -> {:error, :invalid_pr_number}
end
end

defp cast_pr_number(_), do: {:error, :invalid_pr_number}

defp validate_evidence_url(nil), do: {:ok, nil}

defp validate_evidence_url(url) when is_binary(url) do
cond do
byte_size(url) > 2_000 ->
{:error, :evidence_url_too_long}

not String.match?(url, ~r{\Ahttps?://}) ->
{:error, :invalid_evidence_url}

# Reject userinfo (user:password@) — common place for leaked tokens
String.match?(url, ~r{\Ahttps?://[^/]*@}) ->
{:error, :invalid_evidence_url}

true ->
{:ok, url}
end
end

defp do_backfill(tenant_id, story_id, reason, evidence_url, pr_number, actor_id, actor_label) do
multi =
Multi.new()
|> Multi.run(:lock, fn _repo, _changes -> lock_story(tenant_id, story_id) end)
|> Multi.run(:guard, fn _repo, %{lock: story} -> guard_backfillable(story) end)
|> Multi.run(:story, fn _repo, %{lock: story} ->
apply_backfill_status(story, reason, evidence_url, pr_number)
end)
|> Audit.log_in_multi(:audit, fn %{story: updated, lock: old} ->
%{
tenant_id: tenant_id,
entity_type: "story",
entity_id: updated.id,
action: "backfilled",
actor_type: "api_key",
actor_id: actor_id,
actor_label: actor_label,
old_state: %{
"agent_status" => to_string(old.agent_status),
"verified_status" => to_string(old.verified_status)
},
new_state: %{
"agent_status" => to_string(updated.agent_status),
"verified_status" => to_string(updated.verified_status),
"source" => "pre_loopctl",
"reason" => reason,
"evidence_url" => evidence_url,
"pr_number" => pr_number
}
}
end)
|> EventGenerator.generate_events(:webhook_events, fn %{story: updated} ->
%{
tenant_id: tenant_id,
event_type: "story.backfilled",
project_id: updated.project_id,
payload: %{
"event" => "story.backfilled",
"story_id" => updated.id,
"project_id" => updated.project_id,
"epic_id" => updated.epic_id,
"source" => "pre_loopctl",
"actor_id" => actor_id,
"actor_label" => actor_label,
"reason" => reason,
"evidence_url" => evidence_url,
"pr_number" => pr_number,
"timestamp" => DateTime.to_iso8601(DateTime.utc_now())
}
}
end)

case AdminRepo.transaction(multi) do
{:ok, %{story: updated}} ->
handle_backfill_success(updated)

{:error, :guard, :already_verified, %{lock: story}} ->
idempotent_check(
story,
tenant_id,
story_id,
reason,
evidence_url,
pr_number,
actor_id,
actor_label
)

{:error, _step, err, _changes} ->
{:error, err}
end
end

# Idempotency: if the story is already verified AND its existing backfill
# metadata matches the incoming params, treat the retry as success and
# return 200 instead of 422. This makes backfill_story safe to retry after
# a client-side timeout.
defp idempotent_check(
story,
_tenant_id,
_story_id,
reason,
evidence_url,
pr_number,
_actor_id,
_actor_label
) do
existing = get_in(story.metadata, ["backfill"]) || %{}

same_payload? =
Map.get(existing, "reason") == reason and
Map.get(existing, "evidence_url") == evidence_url and
Map.get(existing, "pr_number") == pr_number

if same_payload? do
{:ok, story}
else
{:error, :already_verified}
end
end

defp handle_backfill_success(story), do: {:ok, story}

# Backfill is ONLY for stories that never entered loopctl's dispatch lifecycle.
# The refusal conditions are:
#
# - verified (nothing to do)
# - rejected (investigate, don't paper over)
# - agent_status is past :pending (contract/claim/start/report all indicate
# an agent engaged with this story)
# - any dispatch lineage field is set (implementer_dispatch_id,
# verifier_dispatch_id). These are the "ever-dispatched" markers that
# force_unclaim_story does NOT clear — relying on assigned_agent_id alone
# is bypassable via force_unclaim → backfill.
defp guard_backfillable(%{verified_status: :verified}), do: {:error, :already_verified}
defp guard_backfillable(%{verified_status: :rejected}), do: {:error, :story_rejected}

defp guard_backfillable(%{agent_status: status}) when status != :pending,
do: {:error, :story_has_dispatch_lineage}

defp guard_backfillable(%{assigned_agent_id: agent_id}) when not is_nil(agent_id),
do: {:error, :story_has_dispatch_lineage}

defp guard_backfillable(%{implementer_dispatch_id: id}) when not is_nil(id),
do: {:error, :story_has_dispatch_lineage}

defp guard_backfillable(%{verifier_dispatch_id: id}) when not is_nil(id),
do: {:error, :story_has_dispatch_lineage}

defp guard_backfillable(_story), do: {:ok, :ok}

defp apply_backfill_status(story, reason, evidence_url, pr_number) do
now = DateTime.utc_now()

backfill_meta = %{
"reason" => reason,
"evidence_url" => evidence_url,
"pr_number" => pr_number,
"backfilled_at" => DateTime.to_iso8601(now)
}

new_metadata = Map.put(story.metadata || %{}, "backfill", backfill_meta)

story
|> Ecto.Changeset.change(%{
agent_status: :reported_done,
verified_status: :verified,
reported_done_at: story.reported_done_at || now,
verified_at: now,
metadata: new_metadata
})
|> AdminRepo.update()
end

defp normalize_string(nil), do: nil
defp normalize_string(""), do: nil

defp normalize_string(s) when is_binary(s) do
case String.trim(s) do
"" -> nil
trimmed -> trimmed
end
end

defp normalize_string(_), do: nil

@doc """
Rejects a story: orchestrator marks it as failing verification.

Expand Down
Loading
Loading