Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
189 changes: 175 additions & 14 deletions docs/api/openapi.json
Original file line number Diff line number Diff line change
Expand Up @@ -7187,6 +7187,72 @@
]
}
},
"/api/v1/release-sources/poll-now-all": {
"post": {
"tags": [
"Releases"
],
"summary": "Trigger a manual poll for *every* enabled release source.",
"description": "Walks the enabled sources and enqueues one `PollReleaseSource` task per\nsource. Disabled sources are skipped silently. Per-source enqueue\nfailures don't fail the request — they're logged and reported in the\nresponse counts so the admin can spot a partial failure without\nre-checking each row.",
"operationId": "poll_release_sources_now_all",
"responses": {
"202": {
"description": "Poll tasks enqueued",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/PollAllNowResponse"
}
}
}
},
"403": {
"description": "PluginsManage permission required"
}
},
"security": [
{
"jwt_bearer": []
},
{
"api_key": []
}
]
}
},
"/api/v1/release-sources/reset-all": {
"post": {
"tags": [
"Releases"
],
"summary": "Reset *every* release source to a clean slate.",
"description": "Loops over all sources (enabled and disabled — when you're nuking the\nledger, skipping disabled rows would leave a confusing partial state)\nand applies the per-source reset: delete every owned `release_ledger`\nrow + clear the transient poll state (`etag`, `last_polled_at`,\n`last_error`, `last_error_at`, `last_summary`). User-managed fields\n(`enabled`, `cron_schedule`, `display_name`, `config`) are preserved.\n\nPer-source failures don't fail the whole request — they're counted in\n`failed` and logged. Does *not* auto-enqueue any polls; the admin can\nfollow up with `poll-now-all` if they want immediate re-fetch.\n\n**Destructive and not undoable** — the UI confirms before calling.",
"operationId": "reset_all_release_sources",
"responses": {
"200": {
"description": "Sources reset",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ResetAllReleaseSourcesResponse"
}
}
}
},
"403": {
"description": "PluginsManage permission required"
}
},
"security": [
{
"jwt_bearer": []
},
{
"api_key": []
}
]
}
},
"/api/v1/release-sources/{source_id}": {
"patch": {
"tags": [
Expand Down Expand Up @@ -29777,13 +29843,15 @@
"createdAt"
],
"properties": {
"chapter": {
"chapters": {
"type": [
"number",
"array",
"null"
],
"format": "double",
"description": "Decimal supports `12.5` etc."
"items": {
"$ref": "#/components/schemas/ReleaseSpanDto"
},
"description": "Full chapter coverage as a normalized span list. Decimals supported\n(`c12.5` → `[{start: 12.5, end: 12.5}]`). `null` when the upstream\ntitle carried no chapter info."
},
"confidence": {
"type": "number",
Expand Down Expand Up @@ -29870,12 +29938,15 @@
"type": "string",
"description": "`announced` | `dismissed` | `marked_acquired` | `hidden`."
},
"volume": {
"volumes": {
"type": [
"integer",
"array",
"null"
],
"format": "int32"
"items": {
"$ref": "#/components/schemas/ReleaseSpanDto"
},
"description": "Full volume coverage as a normalized span list. `null` semantics\nmirror [`Self::chapters`]."
}
}
},
Expand Down Expand Up @@ -32318,6 +32389,38 @@
}
}
},
"PollAllNowResponse": {
"type": "object",
"description": "Response shape from the `poll-now-all` endpoint.\n\nReports how many enabled sources had a poll task enqueued in this call.\nDisabled sources are skipped silently. `coalesced` counts sources whose\nexisting in-flight task absorbed the request (no new task was created).\n`failed` counts sources where the enqueue itself errored — those are\nlogged server-side; the response stays 202 to avoid having one bad\nsource block the rest.",
"required": [
"considered",
"enqueued",
"coalesced",
"failed"
],
"properties": {
"coalesced": {
"type": "integer",
"description": "Sources whose pending/running poll absorbed the request.",
"minimum": 0
},
"considered": {
"type": "integer",
"description": "Total enabled sources considered.",
"minimum": 0
},
"enqueued": {
"type": "integer",
"description": "Sources for which a fresh poll task was enqueued.",
"minimum": 0
},
"failed": {
"type": "integer",
"description": "Sources whose enqueue failed (see server logs).",
"minimum": 0
}
}
},
"PollNowResponse": {
"type": "object",
"description": "Response shape from the `poll-now` endpoint.\n\n`status` is `enqueued` after a successful enqueue. The `message` carries\nthe task ID for follow-up (`tasks.id`); the task runs asynchronously, so\nthis response does not reflect poll outcome.",
Expand Down Expand Up @@ -33195,13 +33298,15 @@
"createdAt"
],
"properties": {
"chapter": {
"chapters": {
"type": [
"number",
"array",
"null"
],
"format": "double",
"description": "Decimal supports `12.5` etc."
"items": {
"$ref": "#/components/schemas/ReleaseSpanDto"
},
"description": "Full chapter coverage as a normalized span list. Decimals supported\n(`c12.5` → `[{start: 12.5, end: 12.5}]`). `null` when the upstream\ntitle carried no chapter info."
},
"confidence": {
"type": "number",
Expand Down Expand Up @@ -33288,12 +33393,15 @@
"type": "string",
"description": "`announced` | `dismissed` | `marked_acquired` | `hidden`."
},
"volume": {
"volumes": {
"type": [
"integer",
"array",
"null"
],
"format": "int32"
"items": {
"$ref": "#/components/schemas/ReleaseSpanDto"
},
"description": "Full volume coverage as a normalized span list. `null` semantics\nmirror [`Self::chapters`]."
}
}
},
Expand Down Expand Up @@ -33479,6 +33587,26 @@
}
}
},
"ReleaseSpanDto": {
"type": "object",
"description": "Inclusive numeric span. Single values are encoded as `start == end`\n(`{ start: 5, end: 5 }`). The release ledger surfaces volume / chapter\ncoverage as a list of these so disjoint compilations (`v01-04 + v06-09`)\nsurvive end-to-end.",
"required": [
"start",
"end"
],
"properties": {
"end": {
"type": "number",
"format": "double",
"example": 9.0
},
"start": {
"type": "number",
"format": "double",
"example": 1.0
}
}
},
"ReplaceBookMetadataRequest": {
"type": "object",
"description": "PUT request for full replacement of book metadata\n\nAll metadata fields will be replaced with the values in this request.\nOmitting a field (or setting it to null) will clear that field.",
Expand Down Expand Up @@ -34092,6 +34220,39 @@
}
}
},
"ResetAllReleaseSourcesResponse": {
"type": "object",
"description": "Response shape from the `reset-all` endpoint.\n\nReports how many sources were reset across the whole table. Unlike\n`poll-now-all`, this *includes* disabled sources — if you're nuking\nthe ledger, partial coverage would be misleading. Per-source failures\ndon't fail the request; they're counted in `failed` and logged.",
"required": [
"considered",
"reset",
"deletedLedgerEntries",
"failed"
],
"properties": {
"considered": {
"type": "integer",
"description": "Total sources considered (enabled + disabled).",
"minimum": 0
},
"deletedLedgerEntries": {
"type": "integer",
"format": "int64",
"description": "Aggregate count of `release_ledger` rows deleted across every\nsource that was successfully reset.",
"minimum": 0
},
"failed": {
"type": "integer",
"description": "Sources where reset failed (see server logs).",
"minimum": 0
},
"reset": {
"type": "integer",
"description": "Sources reset (ledger wiped + transient state cleared).",
"minimum": 0
}
}
},
"ResetReleaseSourceResponse": {
"type": "object",
"description": "Response shape from the `reset` endpoint.\n\nReturns the number of ledger rows removed so callers can show a\nconfirmation toast. The source's transient poll state (etag,\nlast_polled_at, last_error, last_summary) is also cleared, but those\nare not counted here.",
Expand Down
8 changes: 8 additions & 0 deletions migration/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,10 @@ mod m20260505_000077_add_release_sources_last_summary;
mod m20260505_000078_add_release_ledger_media_url;
// Release tracking: server-wide default cron schedule for release-source polling
mod m20260505_000079_seed_release_tracking_default_cron;
// Release tracking: per-row span lists (chapters/volumes) for compilation bundles
mod m20260508_000080_add_release_ledger_span_columns;
// Release tracking: parallel plugin_uuid FK column for cascade-on-plugin-delete
mod m20260508_000081_add_release_sources_plugin_uuid_fk;

pub struct Migrator;

Expand Down Expand Up @@ -296,6 +300,10 @@ impl MigratorTrait for Migrator {
Box::new(m20260505_000078_add_release_ledger_media_url::Migration),
// Release tracking: server-wide default cron schedule
Box::new(m20260505_000079_seed_release_tracking_default_cron::Migration),
// Release tracking: per-row span lists for compilation bundles
Box::new(m20260508_000080_add_release_ledger_span_columns::Migration),
// Release tracking: parallel plugin_uuid FK with cascade-on-plugin-delete
Box::new(m20260508_000081_add_release_sources_plugin_uuid_fk::Migration),
]
}
}
120 changes: 120 additions & 0 deletions migration/src/m20260508_000080_add_release_ledger_span_columns.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
//! Add `chapters` + `volumes` JSON span columns to `release_ledger`.
//!
//! The existing `chapter` (f64) and `volume` (i32) scalars can only hold a
//! single value per release. Real Nyaa compilations frequently cover ranges
//! and even disjoint spans (`v01-04 + v06-09`) — we silently squashed those
//! to the start value, which both mislabeled the inbox and broke the
//! "auto-ignore when fully owned" decision.
//!
//! After this migration:
//! - `chapters` is a JSON array of `[{ "start": Number, "end": Number }, ...]`
//! describing every chapter the release covers. `null` when the upstream
//! title carries no chapter info at all.
//! - `volumes` mirrors the shape, with integer-valued spans.
//! - The legacy `chapter` / `volume` scalars stay around as the *primary
//! value* used for SQL `ORDER BY` (cheap, indexable, no DB-specific
//! JSON-path syntax). The repo derives them as `max(span.end)` on insert
//! so "release covering content up to N" sorts by N.
//!
//! Backfill maps every existing single-value row into a one-element span
//! list so the new columns are populated for the historic ledger before the
//! ingestion path stops writing scalars directly.

use sea_orm::{ConnectionTrait, DbBackend, Statement};
use sea_orm_migration::prelude::*;

#[derive(DeriveMigrationName)]
pub struct Migration;

#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
Table::alter()
.table(Alias::new("release_ledger"))
.add_column(ColumnDef::new(Alias::new("chapters")).json_binary())
.to_owned(),
)
.await?;
manager
.alter_table(
Table::alter()
.table(Alias::new("release_ledger"))
.add_column(ColumnDef::new(Alias::new("volumes")).json_binary())
.to_owned(),
)
.await?;

// Backfill: turn every existing scalar value into a one-element span.
// The JSON literal differs slightly across backends — Postgres prefers
// `jsonb_build_array` / `jsonb_build_object` with native casting,
// SQLite has `json_array` / `json_object`. Using the build-* helpers
// keeps numeric typing intact (no string-coerced values).
let db = manager.get_connection();
let backend = db.get_database_backend();
match backend {
DbBackend::Postgres => {
db.execute(Statement::from_string(
DbBackend::Postgres,
r#"UPDATE release_ledger
SET chapters = jsonb_build_array(jsonb_build_object('start', chapter, 'end', chapter))
WHERE chapter IS NOT NULL"#
.to_string(),
))
.await?;
db.execute(Statement::from_string(
DbBackend::Postgres,
r#"UPDATE release_ledger
SET volumes = jsonb_build_array(jsonb_build_object('start', volume, 'end', volume))
WHERE volume IS NOT NULL"#
.to_string(),
))
.await?;
}
_ => {
// SQLite (and anything else we treat as "default"): use
// json_array + json_object. Numeric types round-trip through
// SQLite's typeless JSON faithfully for our integer / float
// values.
db.execute(Statement::from_string(
DbBackend::Sqlite,
r#"UPDATE release_ledger
SET chapters = json_array(json_object('start', chapter, 'end', chapter))
WHERE chapter IS NOT NULL"#
.to_string(),
))
.await?;
db.execute(Statement::from_string(
DbBackend::Sqlite,
r#"UPDATE release_ledger
SET volumes = json_array(json_object('start', volume, 'end', volume))
WHERE volume IS NOT NULL"#
.to_string(),
))
.await?;
}
}
Ok(())
}

async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.alter_table(
Table::alter()
.table(Alias::new("release_ledger"))
.drop_column(Alias::new("volumes"))
.to_owned(),
)
.await?;
manager
.alter_table(
Table::alter()
.table(Alias::new("release_ledger"))
.drop_column(Alias::new("chapters"))
.to_owned(),
)
.await?;
Ok(())
}
}
Loading
Loading