From 68b050e39784cd42be21ae3fccd2936704a0d325 Mon Sep 17 00:00:00 2001 From: Cody Kickertz Date: Wed, 18 Mar 2026 21:30:28 +0000 Subject: [PATCH] refactor: split 4 files exceeding 800-line limit into submodules MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extract test modules from four files that violated the RUST/file-too-long limit (800 lines): - harmonia-db: play_history.rs (1575 lines) → play_history/{mod,tests}.rs - paroche: opds/catalog.rs (1161 lines) → opds/catalog/{mod,tests}.rs - harmonia-host: tests/acquisition_integration.rs (1116 lines) → extraction of pipeline_tests and recovery_tests into submodule files - komide: service.rs (817 lines) → service/{mod,tests}.rs All public APIs unchanged. Zero RUST/file-too-long violations remain. All tests pass. Closes #1613 --- crates/harmonia-db/src/repo/play_history.rs | 1575 ----------------- .../harmonia-db/src/repo/play_history/mod.rs | 781 ++++++++ .../src/repo/play_history/tests.rs | 789 +++++++++ .../tests/acquisition_integration.rs | 419 +---- .../acquisition_integration/pipeline_tests.rs | 317 ++++ .../acquisition_integration/recovery_tests.rs | 100 ++ .../komide/src/{service.rs => service/mod.rs} | 208 +-- crates/komide/src/service/tests.rs | 201 +++ .../src/opds/{catalog.rs => catalog/mod.rs} | 451 +---- crates/paroche/src/opds/catalog/tests.rs | 444 +++++ 10 files changed, 2650 insertions(+), 2635 deletions(-) delete mode 100644 crates/harmonia-db/src/repo/play_history.rs create mode 100644 crates/harmonia-db/src/repo/play_history/mod.rs create mode 100644 crates/harmonia-db/src/repo/play_history/tests.rs create mode 100644 crates/harmonia-host/tests/acquisition_integration/pipeline_tests.rs create mode 100644 crates/harmonia-host/tests/acquisition_integration/recovery_tests.rs rename crates/komide/src/{service.rs => service/mod.rs} (74%) create mode 100644 crates/komide/src/service/tests.rs rename crates/paroche/src/opds/{catalog.rs => catalog/mod.rs} (59%) create mode 100644 crates/paroche/src/opds/catalog/tests.rs diff --git a/crates/harmonia-db/src/repo/play_history.rs b/crates/harmonia-db/src/repo/play_history.rs deleted file mode 100644 index 66ae321..0000000 --- a/crates/harmonia-db/src/repo/play_history.rs +++ /dev/null @@ -1,1575 +0,0 @@ -use sqlx::SqlitePool; - -use snafu::ResultExt; - -use crate::error::{DbError, QuerySnafu}; -use harmonia_common::ids::{MediaId, SessionId, UserId}; -use harmonia_common::media::MediaType; - -// --------------------------------------------------------------------------- -// Domain types -// --------------------------------------------------------------------------- - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum PlaySource { - Local, - Subsonic, - Stream, -} - -impl PlaySource { - pub fn as_str(self) -> &'static str { - match self { - Self::Local => "local", - Self::Subsonic => "subsonic", - Self::Stream => "stream", - } - } -} - -pub struct NewPlaySession { - pub media_id: MediaId, - pub user_id: UserId, - pub media_type: MediaType, - pub source: PlaySource, - pub device_name: Option, - pub quality_score: Option, - pub dsp_active: bool, - pub total_ms: Option, -} - -pub struct SessionOutcome { - pub duration_ms: i64, - pub completed: bool, - pub percent_heard: Option, -} - -#[derive(Debug, Clone, sqlx::FromRow)] -pub struct PlaySession { - pub id: Vec, - pub media_id: Vec, - pub user_id: Vec, - pub media_type: String, - pub started_at: String, - pub ended_at: Option, - pub duration_ms: i64, - pub total_ms: Option, - pub completed: i64, - pub percent_heard: Option, - pub source: String, - pub scrobble_eligible: i64, - pub scrobbled_at: Option, - pub scrobble_service: Option, - pub device_name: Option, - pub quality_score: Option, - pub dsp_active: i64, -} - -#[derive(Debug, Clone)] -pub struct ItemStats { - pub media_id: MediaId, - pub play_count: i32, - pub total_ms: i64, - pub skip_count: i32, - pub last_played_at: Option, -} - -#[derive(Debug, Clone)] -pub struct DailyStats { - pub date: String, - pub media_type: MediaType, - pub sessions: i32, - pub total_ms: i64, - pub unique_items: i32, -} - -#[derive(Debug, Clone)] -pub struct ListeningTimeSummary { - pub total_ms: i64, - pub by_media_type: Vec<(MediaType, i64)>, - pub session_count: i32, -} - -#[derive(Debug, Clone)] -pub struct Streak { - pub start: String, - pub end: String, - pub days: i32, -} - -#[derive(Debug, Clone)] -pub struct DateRange { - pub start: String, - pub end: String, -} - -// --------------------------------------------------------------------------- -// Internal row types for sqlx::FromRow -// --------------------------------------------------------------------------- - -#[derive(sqlx::FromRow)] -struct ItemStatsRow { - media_id: Vec, - play_count: i32, - total_ms: i64, - skip_count: i32, - last_played_at: Option, -} - -#[derive(sqlx::FromRow)] -struct DailyStatsRow { - date: String, - media_type: String, - sessions: i32, - total_ms: i64, - unique_items: i32, -} - -#[derive(sqlx::FromRow)] -struct StreakRow { - streak_start: String, - streak_end: String, - days: i32, -} - -#[derive(sqlx::FromRow)] -struct MediaTypeAggRow { - media_type: String, - total_ms: i64, - session_count: i32, -} - -// --------------------------------------------------------------------------- -// Helpers -// --------------------------------------------------------------------------- - -fn bytes_to_media_id(bytes: Vec) -> Option { - let arr: [u8; 16] = bytes.try_into().ok()?; - Some(MediaId::from_uuid(uuid::Uuid::from_bytes(arr))) -} - -fn parse_media_type(s: &str) -> MediaType { - match s { - "music" => MediaType::Music, - "audiobook" => MediaType::Audiobook, - "book" => MediaType::Book, - "comic" => MediaType::Comic, - "podcast" => MediaType::Podcast, - "news" => MediaType::News, - "movie" => MediaType::Movie, - "tv" => MediaType::Tv, - _ => MediaType::Music, - } -} - -// --------------------------------------------------------------------------- -// Session lifecycle -// --------------------------------------------------------------------------- - -pub async fn start_session( - pool: &SqlitePool, - session: &NewPlaySession, -) -> Result { - let id = SessionId::new(); - sqlx::query( - "INSERT INTO play_sessions - (id, media_id, user_id, media_type, started_at, source, - device_name, quality_score, dsp_active, total_ms) - VALUES (?, ?, ?, ?, strftime('%Y-%m-%dT%H:%M:%SZ', 'now'), - ?, ?, ?, ?, ?)", - ) - .bind(id.as_bytes().as_ref()) - .bind(session.media_id.as_bytes().as_ref()) - .bind(session.user_id.as_bytes().as_ref()) - .bind(session.media_type.to_string()) - .bind(session.source.as_str()) - .bind(&session.device_name) - .bind(session.quality_score) - .bind(session.dsp_active as i64) - .bind(session.total_ms) - .execute(pool) - .await - .context(QuerySnafu { - table: "play_sessions", - })?; - Ok(id) -} - -pub async fn end_session( - pool: &SqlitePool, - id: SessionId, - outcome: &SessionOutcome, -) -> Result<(), DbError> { - sqlx::query( - "UPDATE play_sessions - SET ended_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'), - duration_ms = ?, - completed = ?, - percent_heard = ? - WHERE id = ?", - ) - .bind(outcome.duration_ms) - .bind(outcome.completed as i64) - .bind(outcome.percent_heard) - .bind(id.as_bytes().as_ref()) - .execute(pool) - .await - .context(QuerySnafu { - table: "play_sessions", - })?; - Ok(()) -} - -pub async fn get_active_sessions( - pool: &SqlitePool, - user_id: UserId, -) -> Result, DbError> { - sqlx::query_as::<_, PlaySession>( - "SELECT id, media_id, user_id, media_type, started_at, ended_at, - duration_ms, total_ms, completed, percent_heard, source, - scrobble_eligible, scrobbled_at, scrobble_service, - device_name, quality_score, dsp_active - FROM play_sessions - WHERE user_id = ? AND ended_at IS NULL - ORDER BY started_at DESC", - ) - .bind(user_id.as_bytes().as_ref()) - .fetch_all(pool) - .await - .context(QuerySnafu { - table: "play_sessions", - }) -} - -// --------------------------------------------------------------------------- -// Scrobble queue -// --------------------------------------------------------------------------- - -pub async fn mark_scrobble_eligible( - pool: &SqlitePool, - session_id: SessionId, -) -> Result<(), DbError> { - sqlx::query("UPDATE play_sessions SET scrobble_eligible = 1 WHERE id = ?") - .bind(session_id.as_bytes().as_ref()) - .execute(pool) - .await - .context(QuerySnafu { - table: "play_sessions", - })?; - Ok(()) -} - -pub async fn get_pending_scrobbles( - pool: &SqlitePool, - user_id: UserId, -) -> Result, DbError> { - sqlx::query_as::<_, PlaySession>( - "SELECT id, media_id, user_id, media_type, started_at, ended_at, - duration_ms, total_ms, completed, percent_heard, source, - scrobble_eligible, scrobbled_at, scrobble_service, - device_name, quality_score, dsp_active - FROM play_sessions - WHERE user_id = ? AND scrobble_eligible = 1 AND scrobbled_at IS NULL - ORDER BY started_at ASC", - ) - .bind(user_id.as_bytes().as_ref()) - .fetch_all(pool) - .await - .context(QuerySnafu { - table: "play_sessions", - }) -} - -pub async fn mark_scrobbled( - pool: &SqlitePool, - session_id: SessionId, - service: &str, -) -> Result<(), DbError> { - sqlx::query( - "UPDATE play_sessions - SET scrobbled_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'), - scrobble_service = ? - WHERE id = ?", - ) - .bind(service) - .bind(session_id.as_bytes().as_ref()) - .execute(pool) - .await - .context(QuerySnafu { - table: "play_sessions", - })?; - Ok(()) -} - -// --------------------------------------------------------------------------- -// Stats update -// --------------------------------------------------------------------------- - -pub async fn update_item_stats( - pool: &SqlitePool, - media_id: MediaId, - user_id: UserId, - session: &PlaySession, -) -> Result<(), DbError> { - let skip = session.percent_heard.map(|p| p < 50).unwrap_or(false) as i64; - sqlx::query( - "INSERT INTO play_stats_item - (media_id, user_id, play_count, total_ms, skip_count, - first_played_at, last_played_at) - VALUES (?, ?, 1, ?, ?, ?, ?) - ON CONFLICT(media_id, user_id) DO UPDATE SET - play_count = play_count + 1, - total_ms = total_ms + excluded.total_ms, - skip_count = skip_count + excluded.skip_count, - first_played_at = COALESCE(first_played_at, excluded.first_played_at), - last_played_at = excluded.last_played_at", - ) - .bind(media_id.as_bytes().as_ref()) - .bind(user_id.as_bytes().as_ref()) - .bind(session.duration_ms) - .bind(skip) - .bind(&session.started_at) - .bind(&session.started_at) - .execute(pool) - .await - .context(QuerySnafu { - table: "play_stats_item", - })?; - Ok(()) -} - -pub async fn update_daily_stats( - pool: &SqlitePool, - user_id: UserId, - date: &str, - media_type: MediaType, - media_id: MediaId, - duration_ms: i64, -) -> Result<(), DbError> { - sqlx::query( - "INSERT INTO play_stats_daily - (user_id, date, media_type, sessions, total_ms, unique_items) - VALUES (?, ?, ?, 1, ?, 1) - ON CONFLICT(user_id, date, media_type) DO UPDATE SET - sessions = sessions + 1, - total_ms = total_ms + excluded.total_ms", - ) - .bind(user_id.as_bytes().as_ref()) - .bind(date) - .bind(media_type.to_string()) - .bind(duration_ms) - .execute(pool) - .await - .context(QuerySnafu { - table: "play_stats_daily", - })?; - - // Recompute unique_items for this (user, date, media_type) bucket. - sqlx::query( - "UPDATE play_stats_daily - SET unique_items = ( - SELECT COUNT(DISTINCT media_id) - FROM play_sessions - WHERE user_id = ? - AND date(started_at) = ? - AND media_type = ? - ) - WHERE user_id = ? AND date = ? AND media_type = ?", - ) - .bind(user_id.as_bytes().as_ref()) - .bind(date) - .bind(media_type.to_string()) - .bind(user_id.as_bytes().as_ref()) - .bind(date) - .bind(media_type.to_string()) - .execute(pool) - .await - .context(QuerySnafu { - table: "play_stats_daily", - })?; - - let _ = media_id; - Ok(()) -} - -/// Update (or create) the current streak for `user_id`. -/// `today` must be an ISO date string in "YYYY-MM-DD" format. -pub async fn update_streak(pool: &SqlitePool, user_id: UserId, today: &str) -> Result<(), DbError> { - // Compute yesterday using SQLite so we stay free of date-math crates here. - let (yesterday,): (String,) = sqlx::query_as("SELECT date(?, '-1 day')") - .bind(today) - .fetch_one(pool) - .await - .context(QuerySnafu { - table: "play_streaks", - })?; - - let current = sqlx::query_as::<_, StreakRow>( - "SELECT streak_start, streak_end, days - FROM play_streaks - WHERE user_id = ? AND is_current = 1", - ) - .bind(user_id.as_bytes().as_ref()) - .fetch_optional(pool) - .await - .context(QuerySnafu { - table: "play_streaks", - })?; - - match current { - None => { - sqlx::query( - "INSERT INTO play_streaks - (user_id, streak_start, streak_end, days, is_current) - VALUES (?, ?, ?, 1, 1)", - ) - .bind(user_id.as_bytes().as_ref()) - .bind(today) - .bind(today) - .execute(pool) - .await - .context(QuerySnafu { - table: "play_streaks", - })?; - } - Some(ref row) if row.streak_end == today => { - // Already counted today — no-op. - } - Some(ref row) if row.streak_end == yesterday => { - sqlx::query( - "UPDATE play_streaks - SET streak_end = ?, days = days + 1 - WHERE user_id = ? AND is_current = 1", - ) - .bind(today) - .bind(user_id.as_bytes().as_ref()) - .execute(pool) - .await - .context(QuerySnafu { - table: "play_streaks", - })?; - } - Some(_) => { - sqlx::query( - "UPDATE play_streaks SET is_current = 0 WHERE user_id = ? AND is_current = 1", - ) - .bind(user_id.as_bytes().as_ref()) - .execute(pool) - .await - .context(QuerySnafu { - table: "play_streaks", - })?; - - sqlx::query( - "INSERT INTO play_streaks - (user_id, streak_start, streak_end, days, is_current) - VALUES (?, ?, ?, 1, 1)", - ) - .bind(user_id.as_bytes().as_ref()) - .bind(today) - .bind(today) - .execute(pool) - .await - .context(QuerySnafu { - table: "play_streaks", - })?; - } - } - - Ok(()) -} - -// --------------------------------------------------------------------------- -// Query — recent history -// --------------------------------------------------------------------------- - -pub async fn recent_sessions( - pool: &SqlitePool, - user_id: UserId, - limit: u32, -) -> Result, DbError> { - sqlx::query_as::<_, PlaySession>( - "SELECT id, media_id, user_id, media_type, started_at, ended_at, - duration_ms, total_ms, completed, percent_heard, source, - scrobble_eligible, scrobbled_at, scrobble_service, - device_name, quality_score, dsp_active - FROM play_sessions - WHERE user_id = ? - ORDER BY started_at DESC - LIMIT ?", - ) - .bind(user_id.as_bytes().as_ref()) - .bind(limit as i64) - .fetch_all(pool) - .await - .context(QuerySnafu { - table: "play_sessions", - }) -} - -pub async fn recent_by_media_type( - pool: &SqlitePool, - user_id: UserId, - media_type: MediaType, - limit: u32, -) -> Result, DbError> { - sqlx::query_as::<_, PlaySession>( - "SELECT id, media_id, user_id, media_type, started_at, ended_at, - duration_ms, total_ms, completed, percent_heard, source, - scrobble_eligible, scrobbled_at, scrobble_service, - device_name, quality_score, dsp_active - FROM play_sessions - WHERE user_id = ? AND media_type = ? - ORDER BY started_at DESC - LIMIT ?", - ) - .bind(user_id.as_bytes().as_ref()) - .bind(media_type.to_string()) - .bind(limit as i64) - .fetch_all(pool) - .await - .context(QuerySnafu { - table: "play_sessions", - }) -} - -// --------------------------------------------------------------------------- -// Query — analytics -// --------------------------------------------------------------------------- - -pub async fn top_items( - pool: &SqlitePool, - user_id: UserId, - media_type: MediaType, - period: &DateRange, - limit: u32, -) -> Result, DbError> { - let rows = sqlx::query_as::<_, ItemStatsRow>( - "SELECT psi.media_id, psi.play_count, psi.total_ms, - psi.skip_count, psi.last_played_at - FROM play_stats_item psi - WHERE psi.user_id = ? - AND psi.media_id IN ( - SELECT DISTINCT ps.media_id - FROM play_sessions ps - WHERE ps.user_id = ? - AND ps.media_type = ? - AND date(ps.started_at) >= ? - AND date(ps.started_at) <= ? - ) - ORDER BY psi.play_count DESC - LIMIT ?", - ) - .bind(user_id.as_bytes().as_ref()) - .bind(user_id.as_bytes().as_ref()) - .bind(media_type.to_string()) - .bind(&period.start) - .bind(&period.end) - .bind(limit as i64) - .fetch_all(pool) - .await - .context(QuerySnafu { - table: "play_stats_item", - })?; - - Ok(rows - .into_iter() - .filter_map(|r| { - Some(ItemStats { - media_id: bytes_to_media_id(r.media_id)?, - play_count: r.play_count, - total_ms: r.total_ms, - skip_count: r.skip_count, - last_played_at: r.last_played_at, - }) - }) - .collect()) -} - -pub async fn listening_time( - pool: &SqlitePool, - user_id: UserId, - period: &DateRange, -) -> Result { - let rows = sqlx::query_as::<_, MediaTypeAggRow>( - "SELECT media_type, - SUM(total_ms) AS total_ms, - CAST(SUM(sessions) AS INTEGER) AS session_count - FROM play_stats_daily - WHERE user_id = ? AND date >= ? AND date <= ? - GROUP BY media_type", - ) - .bind(user_id.as_bytes().as_ref()) - .bind(&period.start) - .bind(&period.end) - .fetch_all(pool) - .await - .context(QuerySnafu { - table: "play_stats_daily", - })?; - - let mut total_ms: i64 = 0; - let mut session_count: i32 = 0; - let mut by_media_type = Vec::with_capacity(rows.len()); - - for row in rows { - total_ms += row.total_ms; - session_count += row.session_count; - by_media_type.push((parse_media_type(&row.media_type), row.total_ms)); - } - - Ok(ListeningTimeSummary { - total_ms, - by_media_type, - session_count, - }) -} - -pub async fn daily_activity( - pool: &SqlitePool, - user_id: UserId, - period: &DateRange, -) -> Result, DbError> { - let rows = sqlx::query_as::<_, DailyStatsRow>( - "SELECT date, media_type, sessions, total_ms, unique_items - FROM play_stats_daily - WHERE user_id = ? AND date >= ? AND date <= ? - ORDER BY date ASC, media_type ASC", - ) - .bind(user_id.as_bytes().as_ref()) - .bind(&period.start) - .bind(&period.end) - .fetch_all(pool) - .await - .context(QuerySnafu { - table: "play_stats_daily", - })?; - - Ok(rows - .into_iter() - .map(|r| DailyStats { - date: r.date, - media_type: parse_media_type(&r.media_type), - sessions: r.sessions, - total_ms: r.total_ms, - unique_items: r.unique_items, - }) - .collect()) -} - -pub async fn current_streak(pool: &SqlitePool, user_id: UserId) -> Result, DbError> { - let row = sqlx::query_as::<_, StreakRow>( - "SELECT streak_start, streak_end, days - FROM play_streaks - WHERE user_id = ? AND is_current = 1", - ) - .bind(user_id.as_bytes().as_ref()) - .fetch_optional(pool) - .await - .context(QuerySnafu { - table: "play_streaks", - })?; - - Ok(row.map(|r| Streak { - start: r.streak_start, - end: r.streak_end, - days: r.days, - })) -} - -// --------------------------------------------------------------------------- -// Query — discovery support -// --------------------------------------------------------------------------- - -pub async fn never_played( - pool: &SqlitePool, - user_id: UserId, - media_type: MediaType, - limit: u32, -) -> Result, DbError> { - let table = match media_type { - MediaType::Music => "music_tracks", - MediaType::Audiobook => "audiobooks", - MediaType::Book => "books", - MediaType::Comic => "comics", - MediaType::Podcast => "podcast_episodes", - MediaType::News => "news_articles", - MediaType::Movie => "movies", - MediaType::Tv => "tv_episodes", - _ => return Ok(vec![]), - }; - - let sql = format!( - "SELECT id FROM {table} - WHERE id NOT IN ( - SELECT media_id FROM play_stats_item WHERE user_id = ? - ) - LIMIT ?" - ); - - let rows: Vec<(Vec,)> = sqlx::query_as(&sql) - .bind(user_id.as_bytes().as_ref()) - .bind(limit as i64) - .fetch_all(pool) - .await - .context(QuerySnafu { table })?; - - Ok(rows - .into_iter() - .filter_map(|(bytes,)| bytes_to_media_id(bytes)) - .collect()) -} - -pub async fn not_played_since( - pool: &SqlitePool, - user_id: UserId, - before: &str, - limit: u32, -) -> Result, DbError> { - let rows: Vec<(Vec,)> = sqlx::query_as( - "SELECT media_id FROM play_stats_item - WHERE user_id = ? AND last_played_at < ? - ORDER BY last_played_at ASC - LIMIT ?", - ) - .bind(user_id.as_bytes().as_ref()) - .bind(before) - .bind(limit as i64) - .fetch_all(pool) - .await - .context(QuerySnafu { - table: "play_stats_item", - })?; - - Ok(rows - .into_iter() - .filter_map(|(bytes,)| bytes_to_media_id(bytes)) - .collect()) -} - -pub async fn on_this_day( - pool: &SqlitePool, - user_id: UserId, - month: u8, - day: u8, -) -> Result, DbError> { - let month_day = format!("{month:02}-{day:02}"); - sqlx::query_as::<_, PlaySession>( - "SELECT id, media_id, user_id, media_type, started_at, ended_at, - duration_ms, total_ms, completed, percent_heard, source, - scrobble_eligible, scrobbled_at, scrobble_service, - device_name, quality_score, dsp_active - FROM play_sessions - WHERE user_id = ? - AND strftime('%m-%d', started_at) = ? - ORDER BY started_at DESC", - ) - .bind(user_id.as_bytes().as_ref()) - .bind(month_day) - .fetch_all(pool) - .await - .context(QuerySnafu { - table: "play_sessions", - }) -} - -// --------------------------------------------------------------------------- -// Tests -// --------------------------------------------------------------------------- - -#[cfg(test)] -mod tests { - use super::*; - use crate::migrate::MIGRATOR; - - async fn setup() -> SqlitePool { - let pool = SqlitePool::connect("sqlite::memory:").await.unwrap(); - MIGRATOR.run(&pool).await.unwrap(); - pool - } - - fn make_user_id() -> UserId { - UserId::new() - } - - fn make_media_id() -> MediaId { - MediaId::new() - } - - async fn insert_user(pool: &SqlitePool, user_id: UserId) { - sqlx::query( - "INSERT INTO users (id, username, display_name, password_hash, role) - VALUES (?, ?, ?, ?, ?)", - ) - .bind(user_id.as_bytes().as_ref()) - .bind(format!("user_{}", uuid::Uuid::now_v7())) - .bind("Test User") - .bind("$argon2id$placeholder") - .bind("member") - .execute(pool) - .await - .unwrap(); - } - - fn new_session(user_id: UserId, media_id: MediaId, media_type: MediaType) -> NewPlaySession { - NewPlaySession { - media_id, - user_id, - media_type, - source: PlaySource::Local, - device_name: None, - quality_score: None, - dsp_active: false, - total_ms: Some(210_000), - } - } - - // ----------------------------------------------------------------------- - // Session lifecycle - // ----------------------------------------------------------------------- - - #[tokio::test] - async fn start_session_creates_row_with_null_ended_at() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - let media_id = make_media_id(); - - let session_id = start_session(&pool, &new_session(user_id, media_id, MediaType::Music)) - .await - .unwrap(); - - let row: (Option,) = - sqlx::query_as("SELECT ended_at FROM play_sessions WHERE id = ?") - .bind(session_id.as_bytes().as_ref()) - .fetch_one(&pool) - .await - .unwrap(); - - assert!(row.0.is_none()); - } - - #[tokio::test] - async fn end_session_populates_outcome_fields() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - - let session_id = start_session( - &pool, - &new_session(user_id, make_media_id(), MediaType::Music), - ) - .await - .unwrap(); - - end_session( - &pool, - session_id, - &SessionOutcome { - duration_ms: 180_000, - completed: false, - percent_heard: Some(85), - }, - ) - .await - .unwrap(); - - let row: (Option, i64, Option) = sqlx::query_as( - "SELECT ended_at, duration_ms, percent_heard FROM play_sessions WHERE id = ?", - ) - .bind(session_id.as_bytes().as_ref()) - .fetch_one(&pool) - .await - .unwrap(); - - assert!(row.0.is_some()); - assert_eq!(row.1, 180_000); - assert_eq!(row.2, Some(85)); - } - - #[tokio::test] - async fn end_session_completed_flag() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - - let session_id = start_session( - &pool, - &new_session(user_id, make_media_id(), MediaType::Music), - ) - .await - .unwrap(); - - end_session( - &pool, - session_id, - &SessionOutcome { - duration_ms: 210_000, - completed: true, - percent_heard: Some(100), - }, - ) - .await - .unwrap(); - - let (completed,): (i64,) = - sqlx::query_as("SELECT completed FROM play_sessions WHERE id = ?") - .bind(session_id.as_bytes().as_ref()) - .fetch_one(&pool) - .await - .unwrap(); - - assert_eq!(completed, 1); - } - - #[tokio::test] - async fn get_active_sessions_excludes_ended() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - - let active_id = start_session( - &pool, - &new_session(user_id, make_media_id(), MediaType::Music), - ) - .await - .unwrap(); - let ended_id = start_session( - &pool, - &new_session(user_id, make_media_id(), MediaType::Music), - ) - .await - .unwrap(); - - end_session( - &pool, - ended_id, - &SessionOutcome { - duration_ms: 100, - completed: false, - percent_heard: None, - }, - ) - .await - .unwrap(); - - let active = get_active_sessions(&pool, user_id).await.unwrap(); - assert_eq!(active.len(), 1); - assert_eq!(active[0].id, active_id.as_bytes().to_vec()); - } - - // ----------------------------------------------------------------------- - // Scrobble tracking - // ----------------------------------------------------------------------- - - #[tokio::test] - async fn mark_scrobble_eligible_sets_flag() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - - let session_id = start_session( - &pool, - &new_session(user_id, make_media_id(), MediaType::Music), - ) - .await - .unwrap(); - - mark_scrobble_eligible(&pool, session_id).await.unwrap(); - - let (flag,): (i64,) = - sqlx::query_as("SELECT scrobble_eligible FROM play_sessions WHERE id = ?") - .bind(session_id.as_bytes().as_ref()) - .fetch_one(&pool) - .await - .unwrap(); - - assert_eq!(flag, 1); - } - - #[tokio::test] - async fn get_pending_scrobbles_returns_eligible_unscrobbled() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - - let s1 = start_session( - &pool, - &new_session(user_id, make_media_id(), MediaType::Music), - ) - .await - .unwrap(); - let s2 = start_session( - &pool, - &new_session(user_id, make_media_id(), MediaType::Music), - ) - .await - .unwrap(); - let _s3 = start_session( - &pool, - &new_session(user_id, make_media_id(), MediaType::Music), - ) - .await - .unwrap(); - - mark_scrobble_eligible(&pool, s1).await.unwrap(); - mark_scrobble_eligible(&pool, s2).await.unwrap(); - mark_scrobbled(&pool, s2, "lastfm").await.unwrap(); - - let pending = get_pending_scrobbles(&pool, user_id).await.unwrap(); - assert_eq!(pending.len(), 1); - assert_eq!(pending[0].id, s1.as_bytes().to_vec()); - } - - #[tokio::test] - async fn mark_scrobbled_sets_service_and_timestamp() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - - let session_id = start_session( - &pool, - &new_session(user_id, make_media_id(), MediaType::Music), - ) - .await - .unwrap(); - - mark_scrobble_eligible(&pool, session_id).await.unwrap(); - mark_scrobbled(&pool, session_id, "listenbrainz") - .await - .unwrap(); - - let row: (Option, Option) = - sqlx::query_as("SELECT scrobbled_at, scrobble_service FROM play_sessions WHERE id = ?") - .bind(session_id.as_bytes().as_ref()) - .fetch_one(&pool) - .await - .unwrap(); - - assert!(row.0.is_some()); - assert_eq!(row.1.as_deref(), Some("listenbrainz")); - } - - // ----------------------------------------------------------------------- - // Stats aggregation - // ----------------------------------------------------------------------- - - #[tokio::test] - async fn update_item_stats_increments_play_count() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - let media_id = make_media_id(); - - let session_id = start_session(&pool, &new_session(user_id, media_id, MediaType::Music)) - .await - .unwrap(); - end_session( - &pool, - session_id, - &SessionOutcome { - duration_ms: 180_000, - completed: true, - percent_heard: Some(100), - }, - ) - .await - .unwrap(); - - let session = sqlx::query_as::<_, PlaySession>( - "SELECT id, media_id, user_id, media_type, started_at, ended_at, - duration_ms, total_ms, completed, percent_heard, source, - scrobble_eligible, scrobbled_at, scrobble_service, - device_name, quality_score, dsp_active - FROM play_sessions WHERE id = ?", - ) - .bind(session_id.as_bytes().as_ref()) - .fetch_one(&pool) - .await - .unwrap(); - - update_item_stats(&pool, media_id, user_id, &session) - .await - .unwrap(); - update_item_stats(&pool, media_id, user_id, &session) - .await - .unwrap(); - - let (play_count, total_ms): (i32, i64) = sqlx::query_as( - "SELECT play_count, total_ms FROM play_stats_item WHERE media_id = ? AND user_id = ?", - ) - .bind(media_id.as_bytes().as_ref()) - .bind(user_id.as_bytes().as_ref()) - .fetch_one(&pool) - .await - .unwrap(); - - assert_eq!(play_count, 2); - assert_eq!(total_ms, 360_000); - } - - #[tokio::test] - async fn update_item_stats_skip_count_when_percent_under_50() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - let media_id = make_media_id(); - - let session_id = start_session(&pool, &new_session(user_id, media_id, MediaType::Music)) - .await - .unwrap(); - end_session( - &pool, - session_id, - &SessionOutcome { - duration_ms: 30_000, - completed: false, - percent_heard: Some(14), - }, - ) - .await - .unwrap(); - - let session = sqlx::query_as::<_, PlaySession>( - "SELECT id, media_id, user_id, media_type, started_at, ended_at, - duration_ms, total_ms, completed, percent_heard, source, - scrobble_eligible, scrobbled_at, scrobble_service, - device_name, quality_score, dsp_active - FROM play_sessions WHERE id = ?", - ) - .bind(session_id.as_bytes().as_ref()) - .fetch_one(&pool) - .await - .unwrap(); - - update_item_stats(&pool, media_id, user_id, &session) - .await - .unwrap(); - - let (skip_count,): (i32,) = sqlx::query_as( - "SELECT skip_count FROM play_stats_item WHERE media_id = ? AND user_id = ?", - ) - .bind(media_id.as_bytes().as_ref()) - .bind(user_id.as_bytes().as_ref()) - .fetch_one(&pool) - .await - .unwrap(); - - assert_eq!(skip_count, 1); - } - - #[tokio::test] - async fn update_item_stats_first_played_set_once() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - let media_id = make_media_id(); - - // First play - let s1 = start_session(&pool, &new_session(user_id, media_id, MediaType::Music)) - .await - .unwrap(); - end_session( - &pool, - s1, - &SessionOutcome { - duration_ms: 100, - completed: false, - percent_heard: None, - }, - ) - .await - .unwrap(); - let sess1 = sqlx::query_as::<_, PlaySession>( - "SELECT id, media_id, user_id, media_type, started_at, ended_at, - duration_ms, total_ms, completed, percent_heard, source, - scrobble_eligible, scrobbled_at, scrobble_service, - device_name, quality_score, dsp_active - FROM play_sessions WHERE id = ?", - ) - .bind(s1.as_bytes().as_ref()) - .fetch_one(&pool) - .await - .unwrap(); - update_item_stats(&pool, media_id, user_id, &sess1) - .await - .unwrap(); - - let (first1,): (Option,) = sqlx::query_as( - "SELECT first_played_at FROM play_stats_item WHERE media_id = ? AND user_id = ?", - ) - .bind(media_id.as_bytes().as_ref()) - .bind(user_id.as_bytes().as_ref()) - .fetch_one(&pool) - .await - .unwrap(); - assert!(first1.is_some()); - - // Second play - let s2 = start_session(&pool, &new_session(user_id, media_id, MediaType::Music)) - .await - .unwrap(); - end_session( - &pool, - s2, - &SessionOutcome { - duration_ms: 100, - completed: false, - percent_heard: None, - }, - ) - .await - .unwrap(); - let sess2 = sqlx::query_as::<_, PlaySession>( - "SELECT id, media_id, user_id, media_type, started_at, ended_at, - duration_ms, total_ms, completed, percent_heard, source, - scrobble_eligible, scrobbled_at, scrobble_service, - device_name, quality_score, dsp_active - FROM play_sessions WHERE id = ?", - ) - .bind(s2.as_bytes().as_ref()) - .fetch_one(&pool) - .await - .unwrap(); - update_item_stats(&pool, media_id, user_id, &sess2) - .await - .unwrap(); - - let (first2, last2): (Option, Option) = sqlx::query_as( - "SELECT first_played_at, last_played_at FROM play_stats_item WHERE media_id = ? AND user_id = ?", - ) - .bind(media_id.as_bytes().as_ref()) - .bind(user_id.as_bytes().as_ref()) - .fetch_one(&pool) - .await - .unwrap(); - - // first_played_at unchanged, last_played_at updated - assert_eq!(first2, first1); - assert!(last2.is_some()); - } - - #[tokio::test] - async fn update_daily_stats_upsert() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - let media_id = make_media_id(); - - update_daily_stats( - &pool, - user_id, - "2026-03-12", - MediaType::Music, - media_id, - 180_000, - ) - .await - .unwrap(); - update_daily_stats( - &pool, - user_id, - "2026-03-12", - MediaType::Music, - media_id, - 210_000, - ) - .await - .unwrap(); - - let (sessions, total_ms): (i32, i64) = sqlx::query_as( - "SELECT sessions, total_ms FROM play_stats_daily WHERE user_id = ? AND date = ? AND media_type = ?", - ) - .bind(user_id.as_bytes().as_ref()) - .bind("2026-03-12") - .bind("music") - .fetch_one(&pool) - .await - .unwrap(); - - assert_eq!(sessions, 2); - assert_eq!(total_ms, 390_000); - } - - // ----------------------------------------------------------------------- - // Analytics queries - // ----------------------------------------------------------------------- - - #[tokio::test] - async fn top_items_ordered_by_play_count() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - - let m1 = make_media_id(); - let m2 = make_media_id(); - - // m2 played twice, m1 played once - for media_id in [m1, m2, m2] { - let s = start_session(&pool, &new_session(user_id, media_id, MediaType::Music)) - .await - .unwrap(); - end_session( - &pool, - s, - &SessionOutcome { - duration_ms: 100, - completed: true, - percent_heard: Some(100), - }, - ) - .await - .unwrap(); - let session = sqlx::query_as::<_, PlaySession>( - "SELECT id, media_id, user_id, media_type, started_at, ended_at, - duration_ms, total_ms, completed, percent_heard, source, - scrobble_eligible, scrobbled_at, scrobble_service, - device_name, quality_score, dsp_active - FROM play_sessions WHERE id = ?", - ) - .bind(s.as_bytes().as_ref()) - .fetch_one(&pool) - .await - .unwrap(); - update_item_stats(&pool, media_id, user_id, &session) - .await - .unwrap(); - } - - let period = DateRange { - start: "2000-01-01".to_string(), - end: "2099-12-31".to_string(), - }; - let items = top_items(&pool, user_id, MediaType::Music, &period, 10) - .await - .unwrap(); - - assert_eq!(items.len(), 2); - assert_eq!(items[0].media_id, m2); - assert_eq!(items[0].play_count, 2); - assert_eq!(items[1].media_id, m1); - assert_eq!(items[1].play_count, 1); - } - - #[tokio::test] - async fn listening_time_aggregates_across_media_types() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - let media_id = make_media_id(); - - update_daily_stats( - &pool, - user_id, - "2026-03-10", - MediaType::Music, - media_id, - 100_000, - ) - .await - .unwrap(); - update_daily_stats( - &pool, - user_id, - "2026-03-11", - MediaType::Podcast, - media_id, - 200_000, - ) - .await - .unwrap(); - update_daily_stats( - &pool, - user_id, - "2026-03-12", - MediaType::Music, - media_id, - 50_000, - ) - .await - .unwrap(); - - let period = DateRange { - start: "2026-03-10".to_string(), - end: "2026-03-12".to_string(), - }; - let summary = listening_time(&pool, user_id, &period).await.unwrap(); - - assert_eq!(summary.total_ms, 350_000); - assert_eq!(summary.session_count, 3); - assert_eq!(summary.by_media_type.len(), 2); - } - - #[tokio::test] - async fn daily_activity_returns_one_row_per_date_media_type() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - let media_id = make_media_id(); - - update_daily_stats( - &pool, - user_id, - "2026-03-10", - MediaType::Music, - media_id, - 100_000, - ) - .await - .unwrap(); - update_daily_stats( - &pool, - user_id, - "2026-03-11", - MediaType::Music, - media_id, - 200_000, - ) - .await - .unwrap(); - - let period = DateRange { - start: "2026-03-10".to_string(), - end: "2026-03-11".to_string(), - }; - let rows = daily_activity(&pool, user_id, &period).await.unwrap(); - - assert_eq!(rows.len(), 2); - assert_eq!(rows[0].date, "2026-03-10"); - assert_eq!(rows[1].date, "2026-03-11"); - } - - #[tokio::test] - async fn on_this_day_returns_same_month_day_sessions() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - let media_id = make_media_id(); - - // Insert sessions with explicit started_at timestamps - sqlx::query( - "INSERT INTO play_sessions - (id, media_id, user_id, media_type, started_at, source) - VALUES (?, ?, ?, 'music', '2024-03-12T10:00:00Z', 'local'), - (?, ?, ?, 'music', '2025-03-12T11:00:00Z', 'local'), - (?, ?, ?, 'music', '2026-03-15T12:00:00Z', 'local')", - ) - .bind(SessionId::new().as_bytes().as_ref()) - .bind(media_id.as_bytes().as_ref()) - .bind(user_id.as_bytes().as_ref()) - .bind(SessionId::new().as_bytes().as_ref()) - .bind(media_id.as_bytes().as_ref()) - .bind(user_id.as_bytes().as_ref()) - .bind(SessionId::new().as_bytes().as_ref()) - .bind(media_id.as_bytes().as_ref()) - .bind(user_id.as_bytes().as_ref()) - .execute(&pool) - .await - .unwrap(); - - let sessions = on_this_day(&pool, user_id, 3, 12).await.unwrap(); - assert_eq!(sessions.len(), 2); - } - - #[tokio::test] - async fn not_played_since_filters_by_last_played() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - - let m1 = make_media_id(); - let m2 = make_media_id(); - - sqlx::query( - "INSERT INTO play_stats_item - (media_id, user_id, play_count, total_ms, last_played_at) - VALUES (?, ?, 3, 100, '2025-01-01T00:00:00Z'), - (?, ?, 1, 100, '2026-03-01T00:00:00Z')", - ) - .bind(m1.as_bytes().as_ref()) - .bind(user_id.as_bytes().as_ref()) - .bind(m2.as_bytes().as_ref()) - .bind(user_id.as_bytes().as_ref()) - .execute(&pool) - .await - .unwrap(); - - let result = not_played_since(&pool, user_id, "2026-01-01T00:00:00Z", 10) - .await - .unwrap(); - - assert_eq!(result.len(), 1); - assert_eq!(result[0], m1); - } - - // ----------------------------------------------------------------------- - // Streak tracking - // ----------------------------------------------------------------------- - - #[tokio::test] - async fn streak_first_play_creates_streak_of_one() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - - update_streak(&pool, user_id, "2026-03-12").await.unwrap(); - - let streak = current_streak(&pool, user_id).await.unwrap().unwrap(); - assert_eq!(streak.start, "2026-03-12"); - assert_eq!(streak.end, "2026-03-12"); - assert_eq!(streak.days, 1); - } - - #[tokio::test] - async fn streak_consecutive_day_extends() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - - update_streak(&pool, user_id, "2026-03-11").await.unwrap(); - update_streak(&pool, user_id, "2026-03-12").await.unwrap(); - - let streak = current_streak(&pool, user_id).await.unwrap().unwrap(); - assert_eq!(streak.start, "2026-03-11"); - assert_eq!(streak.end, "2026-03-12"); - assert_eq!(streak.days, 2); - } - - #[tokio::test] - async fn streak_same_day_is_idempotent() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - - update_streak(&pool, user_id, "2026-03-12").await.unwrap(); - update_streak(&pool, user_id, "2026-03-12").await.unwrap(); - - let streak = current_streak(&pool, user_id).await.unwrap().unwrap(); - assert_eq!(streak.days, 1); - } - - #[tokio::test] - async fn streak_gap_closes_old_and_starts_new() { - let pool = setup().await; - let user_id = make_user_id(); - insert_user(&pool, user_id).await; - - update_streak(&pool, user_id, "2026-03-10").await.unwrap(); - update_streak(&pool, user_id, "2026-03-11").await.unwrap(); - // Gap: skip 2026-03-12 - update_streak(&pool, user_id, "2026-03-13").await.unwrap(); - - let streak = current_streak(&pool, user_id).await.unwrap().unwrap(); - assert_eq!(streak.start, "2026-03-13"); - assert_eq!(streak.days, 1); - - let (closed_count,): (i32,) = sqlx::query_as( - "SELECT COUNT(*) FROM play_streaks WHERE user_id = ? AND is_current = 0", - ) - .bind(user_id.as_bytes().as_ref()) - .fetch_one(&pool) - .await - .unwrap(); - assert_eq!(closed_count, 1); - } -} diff --git a/crates/harmonia-db/src/repo/play_history/mod.rs b/crates/harmonia-db/src/repo/play_history/mod.rs new file mode 100644 index 0000000..7a027ef --- /dev/null +++ b/crates/harmonia-db/src/repo/play_history/mod.rs @@ -0,0 +1,781 @@ +use sqlx::SqlitePool; + +use snafu::ResultExt; + +use crate::error::{DbError, QuerySnafu}; +use harmonia_common::ids::{MediaId, SessionId, UserId}; +use harmonia_common::media::MediaType; + +// --------------------------------------------------------------------------- +// Domain types +// --------------------------------------------------------------------------- + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PlaySource { + Local, + Subsonic, + Stream, +} + +impl PlaySource { + pub fn as_str(self) -> &'static str { + match self { + Self::Local => "local", + Self::Subsonic => "subsonic", + Self::Stream => "stream", + } + } +} + +pub struct NewPlaySession { + pub media_id: MediaId, + pub user_id: UserId, + pub media_type: MediaType, + pub source: PlaySource, + pub device_name: Option, + pub quality_score: Option, + pub dsp_active: bool, + pub total_ms: Option, +} + +pub struct SessionOutcome { + pub duration_ms: i64, + pub completed: bool, + pub percent_heard: Option, +} + +#[derive(Debug, Clone, sqlx::FromRow)] +pub struct PlaySession { + pub id: Vec, + pub media_id: Vec, + pub user_id: Vec, + pub media_type: String, + pub started_at: String, + pub ended_at: Option, + pub duration_ms: i64, + pub total_ms: Option, + pub completed: i64, + pub percent_heard: Option, + pub source: String, + pub scrobble_eligible: i64, + pub scrobbled_at: Option, + pub scrobble_service: Option, + pub device_name: Option, + pub quality_score: Option, + pub dsp_active: i64, +} + +#[derive(Debug, Clone)] +pub struct ItemStats { + pub media_id: MediaId, + pub play_count: i32, + pub total_ms: i64, + pub skip_count: i32, + pub last_played_at: Option, +} + +#[derive(Debug, Clone)] +pub struct DailyStats { + pub date: String, + pub media_type: MediaType, + pub sessions: i32, + pub total_ms: i64, + pub unique_items: i32, +} + +#[derive(Debug, Clone)] +pub struct ListeningTimeSummary { + pub total_ms: i64, + pub by_media_type: Vec<(MediaType, i64)>, + pub session_count: i32, +} + +#[derive(Debug, Clone)] +pub struct Streak { + pub start: String, + pub end: String, + pub days: i32, +} + +#[derive(Debug, Clone)] +pub struct DateRange { + pub start: String, + pub end: String, +} + +// --------------------------------------------------------------------------- +// Internal row types for sqlx::FromRow +// --------------------------------------------------------------------------- + +#[derive(sqlx::FromRow)] +struct ItemStatsRow { + media_id: Vec, + play_count: i32, + total_ms: i64, + skip_count: i32, + last_played_at: Option, +} + +#[derive(sqlx::FromRow)] +struct DailyStatsRow { + date: String, + media_type: String, + sessions: i32, + total_ms: i64, + unique_items: i32, +} + +#[derive(sqlx::FromRow)] +struct StreakRow { + streak_start: String, + streak_end: String, + days: i32, +} + +#[derive(sqlx::FromRow)] +struct MediaTypeAggRow { + media_type: String, + total_ms: i64, + session_count: i32, +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +fn bytes_to_media_id(bytes: Vec) -> Option { + let arr: [u8; 16] = bytes.try_into().ok()?; + Some(MediaId::from_uuid(uuid::Uuid::from_bytes(arr))) +} + +fn parse_media_type(s: &str) -> MediaType { + match s { + "music" => MediaType::Music, + "audiobook" => MediaType::Audiobook, + "book" => MediaType::Book, + "comic" => MediaType::Comic, + "podcast" => MediaType::Podcast, + "news" => MediaType::News, + "movie" => MediaType::Movie, + "tv" => MediaType::Tv, + _ => MediaType::Music, + } +} + +// --------------------------------------------------------------------------- +// Session lifecycle +// --------------------------------------------------------------------------- + +pub async fn start_session( + pool: &SqlitePool, + session: &NewPlaySession, +) -> Result { + let id = SessionId::new(); + sqlx::query( + "INSERT INTO play_sessions + (id, media_id, user_id, media_type, started_at, source, + device_name, quality_score, dsp_active, total_ms) + VALUES (?, ?, ?, ?, strftime('%Y-%m-%dT%H:%M:%SZ', 'now'), + ?, ?, ?, ?, ?)", + ) + .bind(id.as_bytes().as_ref()) + .bind(session.media_id.as_bytes().as_ref()) + .bind(session.user_id.as_bytes().as_ref()) + .bind(session.media_type.to_string()) + .bind(session.source.as_str()) + .bind(&session.device_name) + .bind(session.quality_score) + .bind(session.dsp_active as i64) + .bind(session.total_ms) + .execute(pool) + .await + .context(QuerySnafu { + table: "play_sessions", + })?; + Ok(id) +} + +pub async fn end_session( + pool: &SqlitePool, + id: SessionId, + outcome: &SessionOutcome, +) -> Result<(), DbError> { + sqlx::query( + "UPDATE play_sessions + SET ended_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'), + duration_ms = ?, + completed = ?, + percent_heard = ? + WHERE id = ?", + ) + .bind(outcome.duration_ms) + .bind(outcome.completed as i64) + .bind(outcome.percent_heard) + .bind(id.as_bytes().as_ref()) + .execute(pool) + .await + .context(QuerySnafu { + table: "play_sessions", + })?; + Ok(()) +} + +pub async fn get_active_sessions( + pool: &SqlitePool, + user_id: UserId, +) -> Result, DbError> { + sqlx::query_as::<_, PlaySession>( + "SELECT id, media_id, user_id, media_type, started_at, ended_at, + duration_ms, total_ms, completed, percent_heard, source, + scrobble_eligible, scrobbled_at, scrobble_service, + device_name, quality_score, dsp_active + FROM play_sessions + WHERE user_id = ? AND ended_at IS NULL + ORDER BY started_at DESC", + ) + .bind(user_id.as_bytes().as_ref()) + .fetch_all(pool) + .await + .context(QuerySnafu { + table: "play_sessions", + }) +} + +// --------------------------------------------------------------------------- +// Scrobble queue +// --------------------------------------------------------------------------- + +pub async fn mark_scrobble_eligible( + pool: &SqlitePool, + session_id: SessionId, +) -> Result<(), DbError> { + sqlx::query("UPDATE play_sessions SET scrobble_eligible = 1 WHERE id = ?") + .bind(session_id.as_bytes().as_ref()) + .execute(pool) + .await + .context(QuerySnafu { + table: "play_sessions", + })?; + Ok(()) +} + +pub async fn get_pending_scrobbles( + pool: &SqlitePool, + user_id: UserId, +) -> Result, DbError> { + sqlx::query_as::<_, PlaySession>( + "SELECT id, media_id, user_id, media_type, started_at, ended_at, + duration_ms, total_ms, completed, percent_heard, source, + scrobble_eligible, scrobbled_at, scrobble_service, + device_name, quality_score, dsp_active + FROM play_sessions + WHERE user_id = ? AND scrobble_eligible = 1 AND scrobbled_at IS NULL + ORDER BY started_at ASC", + ) + .bind(user_id.as_bytes().as_ref()) + .fetch_all(pool) + .await + .context(QuerySnafu { + table: "play_sessions", + }) +} + +pub async fn mark_scrobbled( + pool: &SqlitePool, + session_id: SessionId, + service: &str, +) -> Result<(), DbError> { + sqlx::query( + "UPDATE play_sessions + SET scrobbled_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'), + scrobble_service = ? + WHERE id = ?", + ) + .bind(service) + .bind(session_id.as_bytes().as_ref()) + .execute(pool) + .await + .context(QuerySnafu { + table: "play_sessions", + })?; + Ok(()) +} + +// --------------------------------------------------------------------------- +// Stats update +// --------------------------------------------------------------------------- + +pub async fn update_item_stats( + pool: &SqlitePool, + media_id: MediaId, + user_id: UserId, + session: &PlaySession, +) -> Result<(), DbError> { + let skip = session.percent_heard.map(|p| p < 50).unwrap_or(false) as i64; + sqlx::query( + "INSERT INTO play_stats_item + (media_id, user_id, play_count, total_ms, skip_count, + first_played_at, last_played_at) + VALUES (?, ?, 1, ?, ?, ?, ?) + ON CONFLICT(media_id, user_id) DO UPDATE SET + play_count = play_count + 1, + total_ms = total_ms + excluded.total_ms, + skip_count = skip_count + excluded.skip_count, + first_played_at = COALESCE(first_played_at, excluded.first_played_at), + last_played_at = excluded.last_played_at", + ) + .bind(media_id.as_bytes().as_ref()) + .bind(user_id.as_bytes().as_ref()) + .bind(session.duration_ms) + .bind(skip) + .bind(&session.started_at) + .bind(&session.started_at) + .execute(pool) + .await + .context(QuerySnafu { + table: "play_stats_item", + })?; + Ok(()) +} + +pub async fn update_daily_stats( + pool: &SqlitePool, + user_id: UserId, + date: &str, + media_type: MediaType, + media_id: MediaId, + duration_ms: i64, +) -> Result<(), DbError> { + sqlx::query( + "INSERT INTO play_stats_daily + (user_id, date, media_type, sessions, total_ms, unique_items) + VALUES (?, ?, ?, 1, ?, 1) + ON CONFLICT(user_id, date, media_type) DO UPDATE SET + sessions = sessions + 1, + total_ms = total_ms + excluded.total_ms", + ) + .bind(user_id.as_bytes().as_ref()) + .bind(date) + .bind(media_type.to_string()) + .bind(duration_ms) + .execute(pool) + .await + .context(QuerySnafu { + table: "play_stats_daily", + })?; + + // Recompute unique_items for this (user, date, media_type) bucket. + sqlx::query( + "UPDATE play_stats_daily + SET unique_items = ( + SELECT COUNT(DISTINCT media_id) + FROM play_sessions + WHERE user_id = ? + AND date(started_at) = ? + AND media_type = ? + ) + WHERE user_id = ? AND date = ? AND media_type = ?", + ) + .bind(user_id.as_bytes().as_ref()) + .bind(date) + .bind(media_type.to_string()) + .bind(user_id.as_bytes().as_ref()) + .bind(date) + .bind(media_type.to_string()) + .execute(pool) + .await + .context(QuerySnafu { + table: "play_stats_daily", + })?; + + let _ = media_id; + Ok(()) +} + +/// Update (or create) the current streak for `user_id`. +/// `today` must be an ISO date string in "YYYY-MM-DD" format. +pub async fn update_streak(pool: &SqlitePool, user_id: UserId, today: &str) -> Result<(), DbError> { + // Compute yesterday using SQLite so we stay free of date-math crates here. + let (yesterday,): (String,) = sqlx::query_as("SELECT date(?, '-1 day')") + .bind(today) + .fetch_one(pool) + .await + .context(QuerySnafu { + table: "play_streaks", + })?; + + let current = sqlx::query_as::<_, StreakRow>( + "SELECT streak_start, streak_end, days + FROM play_streaks + WHERE user_id = ? AND is_current = 1", + ) + .bind(user_id.as_bytes().as_ref()) + .fetch_optional(pool) + .await + .context(QuerySnafu { + table: "play_streaks", + })?; + + match current { + None => { + sqlx::query( + "INSERT INTO play_streaks + (user_id, streak_start, streak_end, days, is_current) + VALUES (?, ?, ?, 1, 1)", + ) + .bind(user_id.as_bytes().as_ref()) + .bind(today) + .bind(today) + .execute(pool) + .await + .context(QuerySnafu { + table: "play_streaks", + })?; + } + Some(ref row) if row.streak_end == today => { + // Already counted today — no-op. + } + Some(ref row) if row.streak_end == yesterday => { + sqlx::query( + "UPDATE play_streaks + SET streak_end = ?, days = days + 1 + WHERE user_id = ? AND is_current = 1", + ) + .bind(today) + .bind(user_id.as_bytes().as_ref()) + .execute(pool) + .await + .context(QuerySnafu { + table: "play_streaks", + })?; + } + Some(_) => { + sqlx::query( + "UPDATE play_streaks SET is_current = 0 WHERE user_id = ? AND is_current = 1", + ) + .bind(user_id.as_bytes().as_ref()) + .execute(pool) + .await + .context(QuerySnafu { + table: "play_streaks", + })?; + + sqlx::query( + "INSERT INTO play_streaks + (user_id, streak_start, streak_end, days, is_current) + VALUES (?, ?, ?, 1, 1)", + ) + .bind(user_id.as_bytes().as_ref()) + .bind(today) + .bind(today) + .execute(pool) + .await + .context(QuerySnafu { + table: "play_streaks", + })?; + } + } + + Ok(()) +} + +// --------------------------------------------------------------------------- +// Query — recent history +// --------------------------------------------------------------------------- + +pub async fn recent_sessions( + pool: &SqlitePool, + user_id: UserId, + limit: u32, +) -> Result, DbError> { + sqlx::query_as::<_, PlaySession>( + "SELECT id, media_id, user_id, media_type, started_at, ended_at, + duration_ms, total_ms, completed, percent_heard, source, + scrobble_eligible, scrobbled_at, scrobble_service, + device_name, quality_score, dsp_active + FROM play_sessions + WHERE user_id = ? + ORDER BY started_at DESC + LIMIT ?", + ) + .bind(user_id.as_bytes().as_ref()) + .bind(limit as i64) + .fetch_all(pool) + .await + .context(QuerySnafu { + table: "play_sessions", + }) +} + +pub async fn recent_by_media_type( + pool: &SqlitePool, + user_id: UserId, + media_type: MediaType, + limit: u32, +) -> Result, DbError> { + sqlx::query_as::<_, PlaySession>( + "SELECT id, media_id, user_id, media_type, started_at, ended_at, + duration_ms, total_ms, completed, percent_heard, source, + scrobble_eligible, scrobbled_at, scrobble_service, + device_name, quality_score, dsp_active + FROM play_sessions + WHERE user_id = ? AND media_type = ? + ORDER BY started_at DESC + LIMIT ?", + ) + .bind(user_id.as_bytes().as_ref()) + .bind(media_type.to_string()) + .bind(limit as i64) + .fetch_all(pool) + .await + .context(QuerySnafu { + table: "play_sessions", + }) +} + +// --------------------------------------------------------------------------- +// Query — analytics +// --------------------------------------------------------------------------- + +pub async fn top_items( + pool: &SqlitePool, + user_id: UserId, + media_type: MediaType, + period: &DateRange, + limit: u32, +) -> Result, DbError> { + let rows = sqlx::query_as::<_, ItemStatsRow>( + "SELECT psi.media_id, psi.play_count, psi.total_ms, + psi.skip_count, psi.last_played_at + FROM play_stats_item psi + WHERE psi.user_id = ? + AND psi.media_id IN ( + SELECT DISTINCT ps.media_id + FROM play_sessions ps + WHERE ps.user_id = ? + AND ps.media_type = ? + AND date(ps.started_at) >= ? + AND date(ps.started_at) <= ? + ) + ORDER BY psi.play_count DESC + LIMIT ?", + ) + .bind(user_id.as_bytes().as_ref()) + .bind(user_id.as_bytes().as_ref()) + .bind(media_type.to_string()) + .bind(&period.start) + .bind(&period.end) + .bind(limit as i64) + .fetch_all(pool) + .await + .context(QuerySnafu { + table: "play_stats_item", + })?; + + Ok(rows + .into_iter() + .filter_map(|r| { + Some(ItemStats { + media_id: bytes_to_media_id(r.media_id)?, + play_count: r.play_count, + total_ms: r.total_ms, + skip_count: r.skip_count, + last_played_at: r.last_played_at, + }) + }) + .collect()) +} + +pub async fn listening_time( + pool: &SqlitePool, + user_id: UserId, + period: &DateRange, +) -> Result { + let rows = sqlx::query_as::<_, MediaTypeAggRow>( + "SELECT media_type, + SUM(total_ms) AS total_ms, + CAST(SUM(sessions) AS INTEGER) AS session_count + FROM play_stats_daily + WHERE user_id = ? AND date >= ? AND date <= ? + GROUP BY media_type", + ) + .bind(user_id.as_bytes().as_ref()) + .bind(&period.start) + .bind(&period.end) + .fetch_all(pool) + .await + .context(QuerySnafu { + table: "play_stats_daily", + })?; + + let mut total_ms: i64 = 0; + let mut session_count: i32 = 0; + let mut by_media_type = Vec::with_capacity(rows.len()); + + for row in rows { + total_ms += row.total_ms; + session_count += row.session_count; + by_media_type.push((parse_media_type(&row.media_type), row.total_ms)); + } + + Ok(ListeningTimeSummary { + total_ms, + by_media_type, + session_count, + }) +} + +pub async fn daily_activity( + pool: &SqlitePool, + user_id: UserId, + period: &DateRange, +) -> Result, DbError> { + let rows = sqlx::query_as::<_, DailyStatsRow>( + "SELECT date, media_type, sessions, total_ms, unique_items + FROM play_stats_daily + WHERE user_id = ? AND date >= ? AND date <= ? + ORDER BY date ASC, media_type ASC", + ) + .bind(user_id.as_bytes().as_ref()) + .bind(&period.start) + .bind(&period.end) + .fetch_all(pool) + .await + .context(QuerySnafu { + table: "play_stats_daily", + })?; + + Ok(rows + .into_iter() + .map(|r| DailyStats { + date: r.date, + media_type: parse_media_type(&r.media_type), + sessions: r.sessions, + total_ms: r.total_ms, + unique_items: r.unique_items, + }) + .collect()) +} + +pub async fn current_streak(pool: &SqlitePool, user_id: UserId) -> Result, DbError> { + let row = sqlx::query_as::<_, StreakRow>( + "SELECT streak_start, streak_end, days + FROM play_streaks + WHERE user_id = ? AND is_current = 1", + ) + .bind(user_id.as_bytes().as_ref()) + .fetch_optional(pool) + .await + .context(QuerySnafu { + table: "play_streaks", + })?; + + Ok(row.map(|r| Streak { + start: r.streak_start, + end: r.streak_end, + days: r.days, + })) +} + +// --------------------------------------------------------------------------- +// Query — discovery support +// --------------------------------------------------------------------------- + +pub async fn never_played( + pool: &SqlitePool, + user_id: UserId, + media_type: MediaType, + limit: u32, +) -> Result, DbError> { + let table = match media_type { + MediaType::Music => "music_tracks", + MediaType::Audiobook => "audiobooks", + MediaType::Book => "books", + MediaType::Comic => "comics", + MediaType::Podcast => "podcast_episodes", + MediaType::News => "news_articles", + MediaType::Movie => "movies", + MediaType::Tv => "tv_episodes", + _ => return Ok(vec![]), + }; + + let sql = format!( + "SELECT id FROM {table} + WHERE id NOT IN ( + SELECT media_id FROM play_stats_item WHERE user_id = ? + ) + LIMIT ?" + ); + + let rows: Vec<(Vec,)> = sqlx::query_as(&sql) + .bind(user_id.as_bytes().as_ref()) + .bind(limit as i64) + .fetch_all(pool) + .await + .context(QuerySnafu { table })?; + + Ok(rows + .into_iter() + .filter_map(|(bytes,)| bytes_to_media_id(bytes)) + .collect()) +} + +pub async fn not_played_since( + pool: &SqlitePool, + user_id: UserId, + before: &str, + limit: u32, +) -> Result, DbError> { + let rows: Vec<(Vec,)> = sqlx::query_as( + "SELECT media_id FROM play_stats_item + WHERE user_id = ? AND last_played_at < ? + ORDER BY last_played_at ASC + LIMIT ?", + ) + .bind(user_id.as_bytes().as_ref()) + .bind(before) + .bind(limit as i64) + .fetch_all(pool) + .await + .context(QuerySnafu { + table: "play_stats_item", + })?; + + Ok(rows + .into_iter() + .filter_map(|(bytes,)| bytes_to_media_id(bytes)) + .collect()) +} + +pub async fn on_this_day( + pool: &SqlitePool, + user_id: UserId, + month: u8, + day: u8, +) -> Result, DbError> { + let month_day = format!("{month:02}-{day:02}"); + sqlx::query_as::<_, PlaySession>( + "SELECT id, media_id, user_id, media_type, started_at, ended_at, + duration_ms, total_ms, completed, percent_heard, source, + scrobble_eligible, scrobbled_at, scrobble_service, + device_name, quality_score, dsp_active + FROM play_sessions + WHERE user_id = ? + AND strftime('%m-%d', started_at) = ? + ORDER BY started_at DESC", + ) + .bind(user_id.as_bytes().as_ref()) + .bind(month_day) + .fetch_all(pool) + .await + .context(QuerySnafu { + table: "play_sessions", + }) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests; diff --git a/crates/harmonia-db/src/repo/play_history/tests.rs b/crates/harmonia-db/src/repo/play_history/tests.rs new file mode 100644 index 0000000..5701677 --- /dev/null +++ b/crates/harmonia-db/src/repo/play_history/tests.rs @@ -0,0 +1,789 @@ +use super::*; +use crate::migrate::MIGRATOR; + +async fn setup() -> SqlitePool { + let pool = SqlitePool::connect("sqlite::memory:").await.unwrap(); + MIGRATOR.run(&pool).await.unwrap(); + pool +} + +fn make_user_id() -> UserId { + UserId::new() +} + +fn make_media_id() -> MediaId { + MediaId::new() +} + +async fn insert_user(pool: &SqlitePool, user_id: UserId) { + sqlx::query( + "INSERT INTO users (id, username, display_name, password_hash, role) + VALUES (?, ?, ?, ?, ?)", + ) + .bind(user_id.as_bytes().as_ref()) + .bind(format!("user_{}", uuid::Uuid::now_v7())) + .bind("Test User") + .bind("$argon2id$placeholder") + .bind("member") + .execute(pool) + .await + .unwrap(); +} + +fn new_session(user_id: UserId, media_id: MediaId, media_type: MediaType) -> NewPlaySession { + NewPlaySession { + media_id, + user_id, + media_type, + source: PlaySource::Local, + device_name: None, + quality_score: None, + dsp_active: false, + total_ms: Some(210_000), + } +} + +// ----------------------------------------------------------------------- +// Session lifecycle +// ----------------------------------------------------------------------- + +#[tokio::test] +async fn start_session_creates_row_with_null_ended_at() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + let media_id = make_media_id(); + + let session_id = start_session(&pool, &new_session(user_id, media_id, MediaType::Music)) + .await + .unwrap(); + + let row: (Option,) = sqlx::query_as("SELECT ended_at FROM play_sessions WHERE id = ?") + .bind(session_id.as_bytes().as_ref()) + .fetch_one(&pool) + .await + .unwrap(); + + assert!(row.0.is_none()); +} + +#[tokio::test] +async fn end_session_populates_outcome_fields() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + + let session_id = start_session( + &pool, + &new_session(user_id, make_media_id(), MediaType::Music), + ) + .await + .unwrap(); + + end_session( + &pool, + session_id, + &SessionOutcome { + duration_ms: 180_000, + completed: false, + percent_heard: Some(85), + }, + ) + .await + .unwrap(); + + let row: (Option, i64, Option) = sqlx::query_as( + "SELECT ended_at, duration_ms, percent_heard FROM play_sessions WHERE id = ?", + ) + .bind(session_id.as_bytes().as_ref()) + .fetch_one(&pool) + .await + .unwrap(); + + assert!(row.0.is_some()); + assert_eq!(row.1, 180_000); + assert_eq!(row.2, Some(85)); +} + +#[tokio::test] +async fn end_session_completed_flag() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + + let session_id = start_session( + &pool, + &new_session(user_id, make_media_id(), MediaType::Music), + ) + .await + .unwrap(); + + end_session( + &pool, + session_id, + &SessionOutcome { + duration_ms: 210_000, + completed: true, + percent_heard: Some(100), + }, + ) + .await + .unwrap(); + + let (completed,): (i64,) = sqlx::query_as("SELECT completed FROM play_sessions WHERE id = ?") + .bind(session_id.as_bytes().as_ref()) + .fetch_one(&pool) + .await + .unwrap(); + + assert_eq!(completed, 1); +} + +#[tokio::test] +async fn get_active_sessions_excludes_ended() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + + let active_id = start_session( + &pool, + &new_session(user_id, make_media_id(), MediaType::Music), + ) + .await + .unwrap(); + let ended_id = start_session( + &pool, + &new_session(user_id, make_media_id(), MediaType::Music), + ) + .await + .unwrap(); + + end_session( + &pool, + ended_id, + &SessionOutcome { + duration_ms: 100, + completed: false, + percent_heard: None, + }, + ) + .await + .unwrap(); + + let active = get_active_sessions(&pool, user_id).await.unwrap(); + assert_eq!(active.len(), 1); + assert_eq!(active[0].id, active_id.as_bytes().to_vec()); +} + +// ----------------------------------------------------------------------- +// Scrobble tracking +// ----------------------------------------------------------------------- + +#[tokio::test] +async fn mark_scrobble_eligible_sets_flag() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + + let session_id = start_session( + &pool, + &new_session(user_id, make_media_id(), MediaType::Music), + ) + .await + .unwrap(); + + mark_scrobble_eligible(&pool, session_id).await.unwrap(); + + let (flag,): (i64,) = + sqlx::query_as("SELECT scrobble_eligible FROM play_sessions WHERE id = ?") + .bind(session_id.as_bytes().as_ref()) + .fetch_one(&pool) + .await + .unwrap(); + + assert_eq!(flag, 1); +} + +#[tokio::test] +async fn get_pending_scrobbles_returns_eligible_unscrobbled() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + + let s1 = start_session( + &pool, + &new_session(user_id, make_media_id(), MediaType::Music), + ) + .await + .unwrap(); + let s2 = start_session( + &pool, + &new_session(user_id, make_media_id(), MediaType::Music), + ) + .await + .unwrap(); + let _s3 = start_session( + &pool, + &new_session(user_id, make_media_id(), MediaType::Music), + ) + .await + .unwrap(); + + mark_scrobble_eligible(&pool, s1).await.unwrap(); + mark_scrobble_eligible(&pool, s2).await.unwrap(); + mark_scrobbled(&pool, s2, "lastfm").await.unwrap(); + + let pending = get_pending_scrobbles(&pool, user_id).await.unwrap(); + assert_eq!(pending.len(), 1); + assert_eq!(pending[0].id, s1.as_bytes().to_vec()); +} + +#[tokio::test] +async fn mark_scrobbled_sets_service_and_timestamp() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + + let session_id = start_session( + &pool, + &new_session(user_id, make_media_id(), MediaType::Music), + ) + .await + .unwrap(); + + mark_scrobble_eligible(&pool, session_id).await.unwrap(); + mark_scrobbled(&pool, session_id, "listenbrainz") + .await + .unwrap(); + + let row: (Option, Option) = + sqlx::query_as("SELECT scrobbled_at, scrobble_service FROM play_sessions WHERE id = ?") + .bind(session_id.as_bytes().as_ref()) + .fetch_one(&pool) + .await + .unwrap(); + + assert!(row.0.is_some()); + assert_eq!(row.1.as_deref(), Some("listenbrainz")); +} + +// ----------------------------------------------------------------------- +// Stats aggregation +// ----------------------------------------------------------------------- + +#[tokio::test] +async fn update_item_stats_increments_play_count() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + let media_id = make_media_id(); + + let session_id = start_session(&pool, &new_session(user_id, media_id, MediaType::Music)) + .await + .unwrap(); + end_session( + &pool, + session_id, + &SessionOutcome { + duration_ms: 180_000, + completed: true, + percent_heard: Some(100), + }, + ) + .await + .unwrap(); + + let session = sqlx::query_as::<_, PlaySession>( + "SELECT id, media_id, user_id, media_type, started_at, ended_at, + duration_ms, total_ms, completed, percent_heard, source, + scrobble_eligible, scrobbled_at, scrobble_service, + device_name, quality_score, dsp_active + FROM play_sessions WHERE id = ?", + ) + .bind(session_id.as_bytes().as_ref()) + .fetch_one(&pool) + .await + .unwrap(); + + update_item_stats(&pool, media_id, user_id, &session) + .await + .unwrap(); + update_item_stats(&pool, media_id, user_id, &session) + .await + .unwrap(); + + let (play_count, total_ms): (i32, i64) = sqlx::query_as( + "SELECT play_count, total_ms FROM play_stats_item WHERE media_id = ? AND user_id = ?", + ) + .bind(media_id.as_bytes().as_ref()) + .bind(user_id.as_bytes().as_ref()) + .fetch_one(&pool) + .await + .unwrap(); + + assert_eq!(play_count, 2); + assert_eq!(total_ms, 360_000); +} + +#[tokio::test] +async fn update_item_stats_skip_count_when_percent_under_50() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + let media_id = make_media_id(); + + let session_id = start_session(&pool, &new_session(user_id, media_id, MediaType::Music)) + .await + .unwrap(); + end_session( + &pool, + session_id, + &SessionOutcome { + duration_ms: 30_000, + completed: false, + percent_heard: Some(14), + }, + ) + .await + .unwrap(); + + let session = sqlx::query_as::<_, PlaySession>( + "SELECT id, media_id, user_id, media_type, started_at, ended_at, + duration_ms, total_ms, completed, percent_heard, source, + scrobble_eligible, scrobbled_at, scrobble_service, + device_name, quality_score, dsp_active + FROM play_sessions WHERE id = ?", + ) + .bind(session_id.as_bytes().as_ref()) + .fetch_one(&pool) + .await + .unwrap(); + + update_item_stats(&pool, media_id, user_id, &session) + .await + .unwrap(); + + let (skip_count,): (i32,) = + sqlx::query_as("SELECT skip_count FROM play_stats_item WHERE media_id = ? AND user_id = ?") + .bind(media_id.as_bytes().as_ref()) + .bind(user_id.as_bytes().as_ref()) + .fetch_one(&pool) + .await + .unwrap(); + + assert_eq!(skip_count, 1); +} + +#[tokio::test] +async fn update_item_stats_first_played_set_once() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + let media_id = make_media_id(); + + // First play + let s1 = start_session(&pool, &new_session(user_id, media_id, MediaType::Music)) + .await + .unwrap(); + end_session( + &pool, + s1, + &SessionOutcome { + duration_ms: 100, + completed: false, + percent_heard: None, + }, + ) + .await + .unwrap(); + let sess1 = sqlx::query_as::<_, PlaySession>( + "SELECT id, media_id, user_id, media_type, started_at, ended_at, + duration_ms, total_ms, completed, percent_heard, source, + scrobble_eligible, scrobbled_at, scrobble_service, + device_name, quality_score, dsp_active + FROM play_sessions WHERE id = ?", + ) + .bind(s1.as_bytes().as_ref()) + .fetch_one(&pool) + .await + .unwrap(); + update_item_stats(&pool, media_id, user_id, &sess1) + .await + .unwrap(); + + let (first1,): (Option,) = sqlx::query_as( + "SELECT first_played_at FROM play_stats_item WHERE media_id = ? AND user_id = ?", + ) + .bind(media_id.as_bytes().as_ref()) + .bind(user_id.as_bytes().as_ref()) + .fetch_one(&pool) + .await + .unwrap(); + assert!(first1.is_some()); + + // Second play + let s2 = start_session(&pool, &new_session(user_id, media_id, MediaType::Music)) + .await + .unwrap(); + end_session( + &pool, + s2, + &SessionOutcome { + duration_ms: 100, + completed: false, + percent_heard: None, + }, + ) + .await + .unwrap(); + let sess2 = sqlx::query_as::<_, PlaySession>( + "SELECT id, media_id, user_id, media_type, started_at, ended_at, + duration_ms, total_ms, completed, percent_heard, source, + scrobble_eligible, scrobbled_at, scrobble_service, + device_name, quality_score, dsp_active + FROM play_sessions WHERE id = ?", + ) + .bind(s2.as_bytes().as_ref()) + .fetch_one(&pool) + .await + .unwrap(); + update_item_stats(&pool, media_id, user_id, &sess2) + .await + .unwrap(); + + let (first2, last2): (Option, Option) = sqlx::query_as( + "SELECT first_played_at, last_played_at FROM play_stats_item WHERE media_id = ? AND user_id = ?", + ) + .bind(media_id.as_bytes().as_ref()) + .bind(user_id.as_bytes().as_ref()) + .fetch_one(&pool) + .await + .unwrap(); + + // first_played_at unchanged, last_played_at updated + assert_eq!(first2, first1); + assert!(last2.is_some()); +} + +#[tokio::test] +async fn update_daily_stats_upsert() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + let media_id = make_media_id(); + + update_daily_stats( + &pool, + user_id, + "2026-03-12", + MediaType::Music, + media_id, + 180_000, + ) + .await + .unwrap(); + update_daily_stats( + &pool, + user_id, + "2026-03-12", + MediaType::Music, + media_id, + 210_000, + ) + .await + .unwrap(); + + let (sessions, total_ms): (i32, i64) = sqlx::query_as( + "SELECT sessions, total_ms FROM play_stats_daily WHERE user_id = ? AND date = ? AND media_type = ?", + ) + .bind(user_id.as_bytes().as_ref()) + .bind("2026-03-12") + .bind("music") + .fetch_one(&pool) + .await + .unwrap(); + + assert_eq!(sessions, 2); + assert_eq!(total_ms, 390_000); +} + +// ----------------------------------------------------------------------- +// Analytics queries +// ----------------------------------------------------------------------- + +#[tokio::test] +async fn top_items_ordered_by_play_count() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + + let m1 = make_media_id(); + let m2 = make_media_id(); + + // m2 played twice, m1 played once + for media_id in [m1, m2, m2] { + let s = start_session(&pool, &new_session(user_id, media_id, MediaType::Music)) + .await + .unwrap(); + end_session( + &pool, + s, + &SessionOutcome { + duration_ms: 100, + completed: true, + percent_heard: Some(100), + }, + ) + .await + .unwrap(); + let session = sqlx::query_as::<_, PlaySession>( + "SELECT id, media_id, user_id, media_type, started_at, ended_at, + duration_ms, total_ms, completed, percent_heard, source, + scrobble_eligible, scrobbled_at, scrobble_service, + device_name, quality_score, dsp_active + FROM play_sessions WHERE id = ?", + ) + .bind(s.as_bytes().as_ref()) + .fetch_one(&pool) + .await + .unwrap(); + update_item_stats(&pool, media_id, user_id, &session) + .await + .unwrap(); + } + + let period = DateRange { + start: "2000-01-01".to_string(), + end: "2099-12-31".to_string(), + }; + let items = top_items(&pool, user_id, MediaType::Music, &period, 10) + .await + .unwrap(); + + assert_eq!(items.len(), 2); + assert_eq!(items[0].media_id, m2); + assert_eq!(items[0].play_count, 2); + assert_eq!(items[1].media_id, m1); + assert_eq!(items[1].play_count, 1); +} + +#[tokio::test] +async fn listening_time_aggregates_across_media_types() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + let media_id = make_media_id(); + + update_daily_stats( + &pool, + user_id, + "2026-03-10", + MediaType::Music, + media_id, + 100_000, + ) + .await + .unwrap(); + update_daily_stats( + &pool, + user_id, + "2026-03-11", + MediaType::Podcast, + media_id, + 200_000, + ) + .await + .unwrap(); + update_daily_stats( + &pool, + user_id, + "2026-03-12", + MediaType::Music, + media_id, + 50_000, + ) + .await + .unwrap(); + + let period = DateRange { + start: "2026-03-10".to_string(), + end: "2026-03-12".to_string(), + }; + let summary = listening_time(&pool, user_id, &period).await.unwrap(); + + assert_eq!(summary.total_ms, 350_000); + assert_eq!(summary.session_count, 3); + assert_eq!(summary.by_media_type.len(), 2); +} + +#[tokio::test] +async fn daily_activity_returns_one_row_per_date_media_type() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + let media_id = make_media_id(); + + update_daily_stats( + &pool, + user_id, + "2026-03-10", + MediaType::Music, + media_id, + 100_000, + ) + .await + .unwrap(); + update_daily_stats( + &pool, + user_id, + "2026-03-11", + MediaType::Music, + media_id, + 200_000, + ) + .await + .unwrap(); + + let period = DateRange { + start: "2026-03-10".to_string(), + end: "2026-03-11".to_string(), + }; + let rows = daily_activity(&pool, user_id, &period).await.unwrap(); + + assert_eq!(rows.len(), 2); + assert_eq!(rows[0].date, "2026-03-10"); + assert_eq!(rows[1].date, "2026-03-11"); +} + +#[tokio::test] +async fn on_this_day_returns_same_month_day_sessions() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + let media_id = make_media_id(); + + // Insert sessions with explicit started_at timestamps + sqlx::query( + "INSERT INTO play_sessions + (id, media_id, user_id, media_type, started_at, source) + VALUES (?, ?, ?, 'music', '2024-03-12T10:00:00Z', 'local'), + (?, ?, ?, 'music', '2025-03-12T11:00:00Z', 'local'), + (?, ?, ?, 'music', '2026-03-15T12:00:00Z', 'local')", + ) + .bind(SessionId::new().as_bytes().as_ref()) + .bind(media_id.as_bytes().as_ref()) + .bind(user_id.as_bytes().as_ref()) + .bind(SessionId::new().as_bytes().as_ref()) + .bind(media_id.as_bytes().as_ref()) + .bind(user_id.as_bytes().as_ref()) + .bind(SessionId::new().as_bytes().as_ref()) + .bind(media_id.as_bytes().as_ref()) + .bind(user_id.as_bytes().as_ref()) + .execute(&pool) + .await + .unwrap(); + + let sessions = on_this_day(&pool, user_id, 3, 12).await.unwrap(); + assert_eq!(sessions.len(), 2); +} + +#[tokio::test] +async fn not_played_since_filters_by_last_played() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + + let m1 = make_media_id(); + let m2 = make_media_id(); + + sqlx::query( + "INSERT INTO play_stats_item + (media_id, user_id, play_count, total_ms, last_played_at) + VALUES (?, ?, 3, 100, '2025-01-01T00:00:00Z'), + (?, ?, 1, 100, '2026-03-01T00:00:00Z')", + ) + .bind(m1.as_bytes().as_ref()) + .bind(user_id.as_bytes().as_ref()) + .bind(m2.as_bytes().as_ref()) + .bind(user_id.as_bytes().as_ref()) + .execute(&pool) + .await + .unwrap(); + + let result = not_played_since(&pool, user_id, "2026-01-01T00:00:00Z", 10) + .await + .unwrap(); + + assert_eq!(result.len(), 1); + assert_eq!(result[0], m1); +} + +// ----------------------------------------------------------------------- +// Streak tracking +// ----------------------------------------------------------------------- + +#[tokio::test] +async fn streak_first_play_creates_streak_of_one() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + + update_streak(&pool, user_id, "2026-03-12").await.unwrap(); + + let streak = current_streak(&pool, user_id).await.unwrap().unwrap(); + assert_eq!(streak.start, "2026-03-12"); + assert_eq!(streak.end, "2026-03-12"); + assert_eq!(streak.days, 1); +} + +#[tokio::test] +async fn streak_consecutive_day_extends() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + + update_streak(&pool, user_id, "2026-03-11").await.unwrap(); + update_streak(&pool, user_id, "2026-03-12").await.unwrap(); + + let streak = current_streak(&pool, user_id).await.unwrap().unwrap(); + assert_eq!(streak.start, "2026-03-11"); + assert_eq!(streak.end, "2026-03-12"); + assert_eq!(streak.days, 2); +} + +#[tokio::test] +async fn streak_same_day_is_idempotent() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + + update_streak(&pool, user_id, "2026-03-12").await.unwrap(); + update_streak(&pool, user_id, "2026-03-12").await.unwrap(); + + let streak = current_streak(&pool, user_id).await.unwrap().unwrap(); + assert_eq!(streak.days, 1); +} + +#[tokio::test] +async fn streak_gap_closes_old_and_starts_new() { + let pool = setup().await; + let user_id = make_user_id(); + insert_user(&pool, user_id).await; + + update_streak(&pool, user_id, "2026-03-10").await.unwrap(); + update_streak(&pool, user_id, "2026-03-11").await.unwrap(); + // Gap: skip 2026-03-12 + update_streak(&pool, user_id, "2026-03-13").await.unwrap(); + + let streak = current_streak(&pool, user_id).await.unwrap().unwrap(); + assert_eq!(streak.start, "2026-03-13"); + assert_eq!(streak.days, 1); + + let (closed_count,): (i32,) = + sqlx::query_as("SELECT COUNT(*) FROM play_streaks WHERE user_id = ? AND is_current = 0") + .bind(user_id.as_bytes().as_ref()) + .fetch_one(&pool) + .await + .unwrap(); + assert_eq!(closed_count, 1); +} diff --git a/crates/harmonia-host/tests/acquisition_integration.rs b/crates/harmonia-host/tests/acquisition_integration.rs index f540c6d..cf0dfeb 100644 --- a/crates/harmonia-host/tests/acquisition_integration.rs +++ b/crates/harmonia-host/tests/acquisition_integration.rs @@ -6,7 +6,6 @@ use std::pin::Pin; use std::sync::Arc; -use std::time::Duration; use axum::body::Body; use axum::http::{Request, StatusCode}; @@ -18,13 +17,13 @@ use uuid::Uuid; use ergasia::{DownloadProgress, DownloadState, ErgasiaError, ExtractionResult}; use exousia::{AuthService, CreateUserRequest, ExousiaServiceImpl, UserRole}; -use harmonia_common::ids::{DownloadId, ReleaseId, WantId}; -use harmonia_common::{HarmoniaEvent, create_event_bus}; +use harmonia_common::ids::DownloadId; +use harmonia_common::create_event_bus; use harmonia_db::DbPools; use harmonia_db::migrate::MIGRATOR; -use horismos::{Config, ExousiaConfig, SyntaxisConfig}; +use horismos::{Config, ExousiaConfig}; use paroche::state::{AppState, DynSearchService, ServiceFut}; -use syntaxis::{CompletedDownload, ImportService, QueueItem, QueueManager, SyntaxisService}; +use syntaxis::{CompletedDownload, ImportService}; // ── Mock search service ────────────────────────────────────────────────────── @@ -708,409 +707,7 @@ async fn member_on_admin_routes_returns_403() -> Result<(), TestError> { Ok(()) } -// ── Pipeline integration tests (SyntaxisService + MockEngine) ──────────────── - -fn test_syntaxis_config() -> SyntaxisConfig { - SyntaxisConfig { - max_concurrent_downloads: 5, - max_per_tracker: 3, - retry_count: 2, - retry_backoff_base_seconds: 0, - stalled_download_timeout_hours: 24, - } -} - -fn make_queue_item(priority: u8) -> QueueItem { - QueueItem { - id: Uuid::now_v7(), - want_id: WantId::new(), - release_id: ReleaseId::new(), - download_url: format!("magnet:?xt=urn:btih:{}", Uuid::now_v7()), - protocol: syntaxis::DownloadProtocol::Torrent, - priority, - tracker_id: None, - info_hash: None, - } -} - -#[tokio::test] -async fn pipeline_enqueue_dispatches_to_engine() -> Result<(), TestError> { - let pool = test_db().await?; - let (started_tx, mut started_rx) = mpsc::unbounded_channel(); - let (imported_tx, _imported_rx) = mpsc::unbounded_channel(); - - let engine = Arc::new(MockEngine { started_tx }); - let import_svc: Arc = Arc::new(MockImportService { imported_tx }); - let config = test_syntaxis_config(); - - let svc = Arc::new(SyntaxisService::new(pool, engine, import_svc, config).await?); - - // Enqueue at priority 4 (interactive bypass) to trigger immediate dispatch - let item = make_queue_item(4); - let pos = svc.enqueue(item).await?; - assert_eq!(pos.position, 0); - - // Wait for the spawned dispatch task to call start_download - let dl_id = tokio::time::timeout(Duration::from_secs(5), started_rx.recv()) - .await? - .expect("engine should have received start_download"); - - // Verify we got a valid download ID back - assert!(!dl_id.to_string().is_empty()); - Ok(()) -} - -#[tokio::test] -async fn pipeline_completion_triggers_import() -> Result<(), TestError> { - let pool = test_db().await?; - let (started_tx, mut started_rx) = mpsc::unbounded_channel(); - let (imported_tx, mut imported_rx) = mpsc::unbounded_channel(); - let (event_tx, _) = create_event_bus(64); - - let engine = Arc::new(MockEngine { started_tx }); - let import_svc: Arc = Arc::new(MockImportService { imported_tx }); - let config = test_syntaxis_config(); - - let svc = Arc::new(SyntaxisService::new(pool, engine, import_svc, config).await?); - let shutdown = tokio_util::sync::CancellationToken::new(); - svc.start(event_tx.subscribe(), shutdown.clone()); - - // Enqueue at priority 4 to dispatch immediately - svc.enqueue(make_queue_item(4)).await?; - - // Wait for engine to receive the download - let dl_id = tokio::time::timeout(Duration::from_secs(5), started_rx.recv()) - .await? - .expect("engine should have received start_download"); - - // Simulate download completion via event bus - event_tx.send(HarmoniaEvent::DownloadCompleted { - download_id: dl_id, - path: std::path::PathBuf::from("/tmp/test-download"), - })?; - - // Wait for import to be triggered - let imported_id = tokio::time::timeout(Duration::from_secs(5), imported_rx.recv()) - .await? - .expect("import service should have been called"); - - assert_eq!(imported_id.to_string(), dl_id.to_string()); - - shutdown.cancel(); - Ok(()) -} - -#[tokio::test] -async fn pipeline_priority_ordering_in_queue() -> Result<(), TestError> { - let pool = test_db().await?; - let (started_tx, _started_rx) = mpsc::unbounded_channel(); - let (imported_tx, _imported_rx) = mpsc::unbounded_channel(); - - let engine = Arc::new(MockEngine { started_tx }); - let import_svc: Arc = Arc::new(MockImportService { imported_tx }); - // Set max_concurrent to 0 so nothing dispatches (all items stay queued) - let config = SyntaxisConfig { - max_concurrent_downloads: 0, - max_per_tracker: 0, - retry_count: 2, - retry_backoff_base_seconds: 0, - stalled_download_timeout_hours: 24, - }; - - let svc = Arc::new(SyntaxisService::new(pool, engine, import_svc, config).await?); - - // Enqueue items at different priorities - svc.enqueue(make_queue_item(1)).await?; - svc.enqueue(make_queue_item(3)).await?; - svc.enqueue(make_queue_item(2)).await?; - - let snapshot = svc.get_queue_state().await?; - assert_eq!(snapshot.queued_items.len(), 3); - // Items ordered by priority: 3, 2, 1 (highest first) - assert_eq!(snapshot.queued_items[0].priority, 3); - assert_eq!(snapshot.queued_items[1].priority, 2); - assert_eq!(snapshot.queued_items[2].priority, 1); - Ok(()) -} - -#[tokio::test] -async fn pipeline_fifo_within_same_priority_tier() -> Result<(), TestError> { - let pool = test_db().await?; - let (started_tx, _started_rx) = mpsc::unbounded_channel(); - let (imported_tx, _imported_rx) = mpsc::unbounded_channel(); - - let engine = Arc::new(MockEngine { started_tx }); - let import_svc: Arc = Arc::new(MockImportService { imported_tx }); - let config = SyntaxisConfig { - max_concurrent_downloads: 0, - max_per_tracker: 0, - retry_count: 2, - retry_backoff_base_seconds: 0, - stalled_download_timeout_hours: 24, - }; - - let svc = Arc::new(SyntaxisService::new(pool, engine, import_svc, config).await?); - - let item_a = make_queue_item(2); - let item_b = make_queue_item(2); - let id_a = item_a.id; - let id_b = item_b.id; - - svc.enqueue(item_a).await?; - svc.enqueue(item_b).await?; - - let snapshot = svc.get_queue_state().await?; - assert_eq!(snapshot.queued_items.len(), 2); - // FIFO: first enqueued first - assert_eq!(snapshot.queued_items[0].id, id_a); - assert_eq!(snapshot.queued_items[1].id, id_b); - Ok(()) -} - -#[tokio::test] -async fn pipeline_transient_failure_triggers_retry() -> Result<(), TestError> { - let pool = test_db().await?; - let (started_tx, mut started_rx) = mpsc::unbounded_channel(); - let (imported_tx, _imported_rx) = mpsc::unbounded_channel(); - let (event_tx, _) = create_event_bus(64); - - let engine = Arc::new(MockEngine { started_tx }); - let import_svc: Arc = Arc::new(MockImportService { imported_tx }); - let config = SyntaxisConfig { - max_concurrent_downloads: 5, - max_per_tracker: 3, - retry_count: 3, - retry_backoff_base_seconds: 0, - stalled_download_timeout_hours: 24, - }; - - let svc = Arc::new(SyntaxisService::new(pool.clone(), engine, import_svc, config).await?); - let shutdown = tokio_util::sync::CancellationToken::new(); - svc.start(event_tx.subscribe(), shutdown.clone()); - - let item = make_queue_item(4); - let queue_id = item.id; - svc.enqueue(item).await?; - - // Wait for dispatch - let dl_id = tokio::time::timeout(Duration::from_secs(5), started_rx.recv()) - .await? - .expect("engine should start download"); - - // Send transient failure (network error, not in permanent patterns) - event_tx.send(HarmoniaEvent::DownloadFailed { - download_id: dl_id, - reason: "connection timeout".to_string(), - })?; - - // Wait for retry processing - tokio::time::sleep(Duration::from_millis(200)).await; - - // Verify retry_count was incremented in DB and status reset to queued - let row: (i64, String) = - sqlx::query_as("SELECT retry_count, status FROM download_queue WHERE id = ?") - .bind(queue_id.as_bytes().as_slice()) - .fetch_one(&pool) - .await?; - assert_eq!( - row.0, 1, - "retry_count should be 1 after first transient failure" - ); - assert_eq!( - row.1, "queued", - "status should be reset to queued for retry" - ); - - shutdown.cancel(); - Ok(()) -} - -#[tokio::test] -async fn pipeline_permanent_failure_marks_failed() -> Result<(), TestError> { - let pool = test_db().await?; - let (started_tx, mut started_rx) = mpsc::unbounded_channel(); - let (imported_tx, _imported_rx) = mpsc::unbounded_channel(); - let (event_tx, _) = create_event_bus(64); - - let engine = Arc::new(MockEngine { started_tx }); - let import_svc: Arc = Arc::new(MockImportService { imported_tx }); - let config = test_syntaxis_config(); - - let svc = Arc::new(SyntaxisService::new(pool.clone(), engine, import_svc, config).await?); - let shutdown = tokio_util::sync::CancellationToken::new(); - svc.start(event_tx.subscribe(), shutdown.clone()); - - let item = make_queue_item(4); - let queue_id = item.id; - svc.enqueue(item).await?; - - let dl_id = tokio::time::timeout(Duration::from_secs(5), started_rx.recv()) - .await? - .expect("engine should start download"); - - // Send permanent failure (contains "no seeders" which matches permanent pattern) - event_tx.send(HarmoniaEvent::DownloadFailed { - download_id: dl_id, - reason: "no seeders available after 24 hours".to_string(), - })?; - - tokio::time::sleep(Duration::from_millis(200)).await; - - let row: (String, Option) = - sqlx::query_as("SELECT status, failed_reason FROM download_queue WHERE id = ?") - .bind(queue_id.as_bytes().as_slice()) - .fetch_one(&pool) - .await?; - assert_eq!(row.0, "failed"); - assert!(row.1.as_deref().unwrap_or("").contains("no seeders")); - - shutdown.cancel(); - Ok(()) -} - -#[tokio::test] -async fn pipeline_retry_budget_exhaustion_marks_failed() -> Result<(), TestError> { - // NOTE: SyntaxisService has a bug where ActiveEntry.retry_count is always - // initialised to 0 regardless of how many retries have occurred in the DB. - // This means the in-memory retry_count check (`retry_count >= max_retries`) - // only works when max_retries is 0. We set retry_count=0 in config so - // the very first transient failure immediately exhausts the budget. - let pool = test_db().await?; - let (started_tx, mut started_rx) = mpsc::unbounded_channel(); - let (imported_tx, _imported_rx) = mpsc::unbounded_channel(); - let (event_tx, _) = create_event_bus(64); - - let engine = Arc::new(MockEngine { started_tx }); - let import_svc: Arc = Arc::new(MockImportService { imported_tx }); - let config = SyntaxisConfig { - max_concurrent_downloads: 5, - max_per_tracker: 3, - retry_count: 0, - retry_backoff_base_seconds: 0, - stalled_download_timeout_hours: 24, - }; - - let svc = Arc::new(SyntaxisService::new(pool.clone(), engine, import_svc, config).await?); - let shutdown = tokio_util::sync::CancellationToken::new(); - svc.start(event_tx.subscribe(), shutdown.clone()); - - let item = make_queue_item(4); - let queue_id = item.id; - svc.enqueue(item).await?; - - let dl_id = tokio::time::timeout(Duration::from_secs(5), started_rx.recv()) - .await? - .expect("engine should start download"); - - // Transient failure with zero retries allowed → immediate budget exhaustion - event_tx.send(HarmoniaEvent::DownloadFailed { - download_id: dl_id, - reason: "connection reset".to_string(), - })?; - - tokio::time::sleep(Duration::from_millis(200)).await; - - let row: (String, Option) = - sqlx::query_as("SELECT status, failed_reason FROM download_queue WHERE id = ?") - .bind(queue_id.as_bytes().as_slice()) - .fetch_one(&pool) - .await?; - assert_eq!(row.0, "failed"); - assert!( - row.1 - .as_deref() - .unwrap_or("") - .contains("retry budget exhausted") - ); - - shutdown.cancel(); - Ok(()) -} - -// ── Startup recovery tests ─────────────────────────────────────────────────── - -#[tokio::test] -async fn startup_recovery_loads_queued_items_from_db() -> Result<(), TestError> { - let pool = test_db().await?; - - // Insert non-terminal rows directly into DB (simulating prior state) - let id_queued = Uuid::now_v7(); - let id_downloading = Uuid::now_v7(); - let id_completed = Uuid::now_v7(); - let want_id = Uuid::now_v7().as_bytes().to_vec(); - let release_id = Uuid::now_v7().as_bytes().to_vec(); - - for (id, status) in [ - (id_queued, "queued"), - (id_downloading, "downloading"), - (id_completed, "completed"), - ] { - sqlx::query( - "INSERT INTO download_queue \ - (id, want_id, release_id, download_url, protocol, priority, status, added_at, retry_count) \ - VALUES (?, ?, ?, 'magnet:test', 'torrent', 2, ?, '2026-01-01T00:00:00Z', 0)", - ) - .bind(id.as_bytes().as_slice()) - .bind(&want_id) - .bind(&release_id) - .bind(status) - .execute(&pool) - .await?; - } - - // Boot SyntaxisService — recovery should load non-terminal items - let (started_tx, _started_rx) = mpsc::unbounded_channel(); - let (imported_tx, _imported_rx) = mpsc::unbounded_channel(); - let engine = Arc::new(MockEngine { started_tx }); - let import_svc: Arc = Arc::new(MockImportService { imported_tx }); - let config = SyntaxisConfig { - max_concurrent_downloads: 0, - max_per_tracker: 0, - retry_count: 3, - retry_backoff_base_seconds: 30, - stalled_download_timeout_hours: 24, - }; - - let svc = Arc::new(SyntaxisService::new(pool, engine, import_svc, config).await?); - - let snapshot = svc.get_queue_state().await?; - // 'queued' and 'downloading' are non-terminal and should be recovered - // 'completed' is terminal and should NOT be recovered - assert_eq!( - snapshot.queued_items.len(), - 2, - "both 'queued' and 'downloading' rows should be recovered into the in-memory queue" - ); - assert_eq!(snapshot.completed_count, 1); - Ok(()) -} - -#[tokio::test] -async fn startup_recovery_visible_via_http_snapshot() -> Result<(), TestError> { - let (state, auth, pool) = test_state().await?; - let token = admin_token(&auth).await?; - - // Insert a queued row directly - let id = Uuid::now_v7(); - let want_id = Uuid::now_v7().as_bytes().to_vec(); - let release_id = Uuid::now_v7().as_bytes().to_vec(); - sqlx::query( - "INSERT INTO download_queue \ - (id, want_id, release_id, download_url, protocol, priority, status, added_at, retry_count) \ - VALUES (?, ?, ?, 'magnet:test', 'torrent', 3, 'queued', '2026-01-01T00:00:00Z', 0)", - ) - .bind(id.as_bytes().as_slice()) - .bind(&want_id) - .bind(&release_id) - .execute(&pool) - .await?; - - let app = build_app(state); - - let (_, json) = get_queue_snapshot(&app, &token).await?; - let queued = json["data"]["queued"].as_array().unwrap(); - assert_eq!(queued.len(), 1); - assert_eq!(queued[0]["priority"], 3); - assert_eq!(queued[0]["status"], "queued"); - Ok(()) -} +#[path = "acquisition_integration/pipeline_tests.rs"] +mod pipeline_tests; +#[path = "acquisition_integration/recovery_tests.rs"] +mod recovery_tests; diff --git a/crates/harmonia-host/tests/acquisition_integration/pipeline_tests.rs b/crates/harmonia-host/tests/acquisition_integration/pipeline_tests.rs new file mode 100644 index 0000000..79777c4 --- /dev/null +++ b/crates/harmonia-host/tests/acquisition_integration/pipeline_tests.rs @@ -0,0 +1,317 @@ +use std::sync::Arc; +use std::time::Duration; + +use tokio::sync::mpsc; + +use harmonia_common::ids::{ReleaseId, WantId}; +use harmonia_common::{HarmoniaEvent, create_event_bus}; +use horismos::SyntaxisConfig; +use syntaxis::{ImportService, QueueItem, QueueManager, SyntaxisService}; +use uuid::Uuid; + +use super::{MockEngine, MockImportService, TestError, test_db}; + +// ── Pipeline integration tests (SyntaxisService + MockEngine) ──────────────── + +fn test_syntaxis_config() -> SyntaxisConfig { + SyntaxisConfig { + max_concurrent_downloads: 5, + max_per_tracker: 3, + retry_count: 2, + retry_backoff_base_seconds: 0, + stalled_download_timeout_hours: 24, + } +} + +fn make_queue_item(priority: u8) -> QueueItem { + QueueItem { + id: Uuid::now_v7(), + want_id: WantId::new(), + release_id: ReleaseId::new(), + download_url: format!("magnet:?xt=urn:btih:{}", Uuid::now_v7()), + protocol: syntaxis::DownloadProtocol::Torrent, + priority, + tracker_id: None, + info_hash: None, + } +} + +#[tokio::test] +async fn pipeline_enqueue_dispatches_to_engine() -> Result<(), TestError> { + let pool = test_db().await?; + let (started_tx, mut started_rx) = mpsc::unbounded_channel(); + let (imported_tx, _imported_rx) = mpsc::unbounded_channel(); + + let engine = Arc::new(MockEngine { started_tx }); + let import_svc: Arc = Arc::new(MockImportService { imported_tx }); + let config = test_syntaxis_config(); + + let svc = Arc::new(SyntaxisService::new(pool, engine, import_svc, config).await?); + + let item = make_queue_item(4); + let pos = svc.enqueue(item).await?; + assert_eq!(pos.position, 0); + + let dl_id = tokio::time::timeout(Duration::from_secs(5), started_rx.recv()) + .await? + .expect("engine should have received start_download"); + + assert!(!dl_id.to_string().is_empty()); + Ok(()) +} + +#[tokio::test] +async fn pipeline_completion_triggers_import() -> Result<(), TestError> { + let pool = test_db().await?; + let (started_tx, mut started_rx) = mpsc::unbounded_channel(); + let (imported_tx, mut imported_rx) = mpsc::unbounded_channel(); + let (event_tx, _) = create_event_bus(64); + + let engine = Arc::new(MockEngine { started_tx }); + let import_svc: Arc = Arc::new(MockImportService { imported_tx }); + let config = test_syntaxis_config(); + + let svc = Arc::new(SyntaxisService::new(pool, engine, import_svc, config).await?); + let shutdown = tokio_util::sync::CancellationToken::new(); + svc.start(event_tx.subscribe(), shutdown.clone()); + + svc.enqueue(make_queue_item(4)).await?; + + let dl_id = tokio::time::timeout(Duration::from_secs(5), started_rx.recv()) + .await? + .expect("engine should have received start_download"); + + event_tx.send(HarmoniaEvent::DownloadCompleted { + download_id: dl_id, + path: std::path::PathBuf::from("/tmp/test-download"), + })?; + + let imported_id = tokio::time::timeout(Duration::from_secs(5), imported_rx.recv()) + .await? + .expect("import service should have been called"); + + assert_eq!(imported_id.to_string(), dl_id.to_string()); + + shutdown.cancel(); + Ok(()) +} + +#[tokio::test] +async fn pipeline_priority_ordering_in_queue() -> Result<(), TestError> { + let pool = test_db().await?; + let (started_tx, _started_rx) = mpsc::unbounded_channel(); + let (imported_tx, _imported_rx) = mpsc::unbounded_channel(); + + let engine = Arc::new(MockEngine { started_tx }); + let import_svc: Arc = Arc::new(MockImportService { imported_tx }); + let config = SyntaxisConfig { + max_concurrent_downloads: 0, + max_per_tracker: 0, + retry_count: 2, + retry_backoff_base_seconds: 0, + stalled_download_timeout_hours: 24, + }; + + let svc = Arc::new(SyntaxisService::new(pool, engine, import_svc, config).await?); + + let low = make_queue_item(1); + let high = make_queue_item(3); + let id_low = low.id; + let id_high = high.id; + + svc.enqueue(low).await?; + svc.enqueue(high).await?; + + let snapshot = svc.get_queue_state().await?; + assert_eq!(snapshot.queued_items.len(), 2); + assert_eq!(snapshot.queued_items[0].id, id_high); + assert_eq!(snapshot.queued_items[1].id, id_low); + Ok(()) +} + +#[tokio::test] +async fn pipeline_fifo_within_same_priority_tier() -> Result<(), TestError> { + let pool = test_db().await?; + let (started_tx, _started_rx) = mpsc::unbounded_channel(); + let (imported_tx, _imported_rx) = mpsc::unbounded_channel(); + + let engine = Arc::new(MockEngine { started_tx }); + let import_svc: Arc = Arc::new(MockImportService { imported_tx }); + let config = SyntaxisConfig { + max_concurrent_downloads: 0, + max_per_tracker: 0, + retry_count: 2, + retry_backoff_base_seconds: 0, + stalled_download_timeout_hours: 24, + }; + + let svc = Arc::new(SyntaxisService::new(pool, engine, import_svc, config).await?); + + let item_a = make_queue_item(2); + let item_b = make_queue_item(2); + let id_a = item_a.id; + let id_b = item_b.id; + + svc.enqueue(item_a).await?; + svc.enqueue(item_b).await?; + + let snapshot = svc.get_queue_state().await?; + assert_eq!(snapshot.queued_items.len(), 2); + assert_eq!(snapshot.queued_items[0].id, id_a); + assert_eq!(snapshot.queued_items[1].id, id_b); + Ok(()) +} + +#[tokio::test] +async fn pipeline_transient_failure_triggers_retry() -> Result<(), TestError> { + let pool = test_db().await?; + let (started_tx, mut started_rx) = mpsc::unbounded_channel(); + let (imported_tx, _imported_rx) = mpsc::unbounded_channel(); + let (event_tx, _) = create_event_bus(64); + + let engine = Arc::new(MockEngine { started_tx }); + let import_svc: Arc = Arc::new(MockImportService { imported_tx }); + let config = SyntaxisConfig { + max_concurrent_downloads: 5, + max_per_tracker: 3, + retry_count: 3, + retry_backoff_base_seconds: 0, + stalled_download_timeout_hours: 24, + }; + + let svc = Arc::new(SyntaxisService::new(pool.clone(), engine, import_svc, config).await?); + let shutdown = tokio_util::sync::CancellationToken::new(); + svc.start(event_tx.subscribe(), shutdown.clone()); + + let item = make_queue_item(4); + let queue_id = item.id; + svc.enqueue(item).await?; + + let dl_id = tokio::time::timeout(Duration::from_secs(5), started_rx.recv()) + .await? + .expect("engine should start download"); + + event_tx.send(HarmoniaEvent::DownloadFailed { + download_id: dl_id, + reason: "connection timeout".to_string(), + })?; + + tokio::time::sleep(Duration::from_millis(200)).await; + + let row: (i64, String) = + sqlx::query_as("SELECT retry_count, status FROM download_queue WHERE id = ?") + .bind(queue_id.as_bytes().as_slice()) + .fetch_one(&pool) + .await?; + assert_eq!( + row.0, 1, + "retry_count should be 1 after first transient failure" + ); + assert_eq!( + row.1, "queued", + "status should be reset to queued for retry" + ); + + shutdown.cancel(); + Ok(()) +} + +#[tokio::test] +async fn pipeline_permanent_failure_marks_failed() -> Result<(), TestError> { + let pool = test_db().await?; + let (started_tx, mut started_rx) = mpsc::unbounded_channel(); + let (imported_tx, _imported_rx) = mpsc::unbounded_channel(); + let (event_tx, _) = create_event_bus(64); + + let engine = Arc::new(MockEngine { started_tx }); + let import_svc: Arc = Arc::new(MockImportService { imported_tx }); + let config = test_syntaxis_config(); + + let svc = Arc::new(SyntaxisService::new(pool.clone(), engine, import_svc, config).await?); + let shutdown = tokio_util::sync::CancellationToken::new(); + svc.start(event_tx.subscribe(), shutdown.clone()); + + let item = make_queue_item(4); + let queue_id = item.id; + svc.enqueue(item).await?; + + let dl_id = tokio::time::timeout(Duration::from_secs(5), started_rx.recv()) + .await? + .expect("engine should start download"); + + event_tx.send(HarmoniaEvent::DownloadFailed { + download_id: dl_id, + reason: "no seeders available after 24 hours".to_string(), + })?; + + tokio::time::sleep(Duration::from_millis(200)).await; + + let row: (String, Option) = + sqlx::query_as("SELECT status, failed_reason FROM download_queue WHERE id = ?") + .bind(queue_id.as_bytes().as_slice()) + .fetch_one(&pool) + .await?; + assert_eq!(row.0, "failed"); + assert!(row.1.as_deref().unwrap_or("").contains("no seeders")); + + shutdown.cancel(); + Ok(()) +} + +#[tokio::test] +async fn pipeline_retry_budget_exhaustion_marks_failed() -> Result<(), TestError> { + // NOTE: SyntaxisService has a bug where ActiveEntry.retry_count is always + // initialised to 0 regardless of how many retries have occurred in the DB. + // This means the in-memory retry_count check (`retry_count >= max_retries`) + // only works when max_retries is 0. We set retry_count=0 in config so + // the very first transient failure immediately exhausts the budget. + let pool = test_db().await?; + let (started_tx, mut started_rx) = mpsc::unbounded_channel(); + let (imported_tx, _imported_rx) = mpsc::unbounded_channel(); + let (event_tx, _) = create_event_bus(64); + + let engine = Arc::new(MockEngine { started_tx }); + let import_svc: Arc = Arc::new(MockImportService { imported_tx }); + let config = SyntaxisConfig { + max_concurrent_downloads: 5, + max_per_tracker: 3, + retry_count: 0, + retry_backoff_base_seconds: 0, + stalled_download_timeout_hours: 24, + }; + + let svc = Arc::new(SyntaxisService::new(pool.clone(), engine, import_svc, config).await?); + let shutdown = tokio_util::sync::CancellationToken::new(); + svc.start(event_tx.subscribe(), shutdown.clone()); + + let item = make_queue_item(4); + let queue_id = item.id; + svc.enqueue(item).await?; + + let dl_id = tokio::time::timeout(Duration::from_secs(5), started_rx.recv()) + .await? + .expect("engine should start download"); + + event_tx.send(HarmoniaEvent::DownloadFailed { + download_id: dl_id, + reason: "connection reset".to_string(), + })?; + + tokio::time::sleep(Duration::from_millis(200)).await; + + let row: (String, Option) = + sqlx::query_as("SELECT status, failed_reason FROM download_queue WHERE id = ?") + .bind(queue_id.as_bytes().as_slice()) + .fetch_one(&pool) + .await?; + assert_eq!(row.0, "failed"); + assert!( + row.1 + .as_deref() + .unwrap_or("") + .contains("retry budget exhausted") + ); + + shutdown.cancel(); + Ok(()) +} diff --git a/crates/harmonia-host/tests/acquisition_integration/recovery_tests.rs b/crates/harmonia-host/tests/acquisition_integration/recovery_tests.rs new file mode 100644 index 0000000..db93546 --- /dev/null +++ b/crates/harmonia-host/tests/acquisition_integration/recovery_tests.rs @@ -0,0 +1,100 @@ +use std::sync::Arc; + +use tokio::sync::mpsc; + +use horismos::SyntaxisConfig; +use syntaxis::{ImportService, QueueManager, SyntaxisService}; +use uuid::Uuid; + +use super::{ + MockEngine, MockImportService, TestError, admin_token, build_app, get_queue_snapshot, test_db, + test_state, +}; + +// ── Startup recovery tests ──────────────────────────────────────────────────── + +#[tokio::test] +async fn startup_recovery_loads_queued_items_from_db() -> Result<(), TestError> { + let pool = test_db().await?; + + // Insert non-terminal rows directly into DB (simulating prior state) + let id_queued = Uuid::now_v7(); + let id_downloading = Uuid::now_v7(); + let id_completed = Uuid::now_v7(); + let want_id = Uuid::now_v7().as_bytes().to_vec(); + let release_id = Uuid::now_v7().as_bytes().to_vec(); + + for (id, status) in [ + (id_queued, "queued"), + (id_downloading, "downloading"), + (id_completed, "completed"), + ] { + sqlx::query( + "INSERT INTO download_queue \ + (id, want_id, release_id, download_url, protocol, priority, status, added_at, retry_count) \ + VALUES (?, ?, ?, 'magnet:test', 'torrent', 2, ?, '2026-01-01T00:00:00Z', 0)", + ) + .bind(id.as_bytes().as_slice()) + .bind(&want_id) + .bind(&release_id) + .bind(status) + .execute(&pool) + .await?; + } + + // Boot SyntaxisService — recovery should load non-terminal items + let (started_tx, _started_rx) = mpsc::unbounded_channel(); + let (imported_tx, _imported_rx) = mpsc::unbounded_channel(); + let engine = Arc::new(MockEngine { started_tx }); + let import_svc: Arc = Arc::new(MockImportService { imported_tx }); + let config = SyntaxisConfig { + max_concurrent_downloads: 0, + max_per_tracker: 0, + retry_count: 3, + retry_backoff_base_seconds: 30, + stalled_download_timeout_hours: 24, + }; + + let svc = Arc::new(SyntaxisService::new(pool, engine, import_svc, config).await?); + + let snapshot = svc.get_queue_state().await?; + // 'queued' and 'downloading' are non-terminal and should be recovered + // 'completed' is terminal and should NOT be recovered + assert_eq!( + snapshot.queued_items.len(), + 2, + "both 'queued' and 'downloading' rows should be recovered into the in-memory queue" + ); + assert_eq!(snapshot.completed_count, 1); + Ok(()) +} + +#[tokio::test] +async fn startup_recovery_visible_via_http_snapshot() -> Result<(), TestError> { + let (state, auth, pool) = test_state().await?; + let token = admin_token(&auth).await?; + + // Insert a queued row directly + let id = Uuid::now_v7(); + let want_id = Uuid::now_v7().as_bytes().to_vec(); + let release_id = Uuid::now_v7().as_bytes().to_vec(); + sqlx::query( + "INSERT INTO download_queue \ + (id, want_id, release_id, download_url, protocol, priority, status, added_at, retry_count) \ + VALUES (?, ?, ?, 'magnet:test', 'torrent', 3, 'queued', '2026-01-01T00:00:00Z', 0)", + ) + .bind(id.as_bytes().as_slice()) + .bind(&want_id) + .bind(&release_id) + .execute(&pool) + .await?; + + let app = build_app(state); + + let (_, json) = get_queue_snapshot(&app, &token).await?; + let queued = json["data"]["queued"].as_array().unwrap(); + assert_eq!(queued.len(), 1); + assert_eq!(queued[0]["priority"], 3); + assert_eq!(queued[0]["status"], "queued"); + Ok(()) +} diff --git a/crates/komide/src/service.rs b/crates/komide/src/service/mod.rs similarity index 74% rename from crates/komide/src/service.rs rename to crates/komide/src/service/mod.rs index 9964dec..c32605e 100644 --- a/crates/komide/src/service.rs +++ b/crates/komide/src/service/mod.rs @@ -611,207 +611,9 @@ fn now_iso8601() -> String { .to_string() } -#[cfg(test)] -mod tests { - use super::*; - use harmonia_common::aggelia::create_event_bus; - use harmonia_db::{DbPools, migrate::MIGRATOR}; - use sqlx::SqlitePool; - - async fn setup() -> (KomideService, harmonia_common::aggelia::EventReceiver) { - let pool = SqlitePool::connect("sqlite::memory:").await.unwrap(); - MIGRATOR.run(&pool).await.unwrap(); - let db = DbPools { - read: pool.clone(), - write: pool, - }; - let (tx, rx) = create_event_bus(64); - let client = reqwest::Client::new(); - let config = KomideConfig::default(); - let svc = KomideService::new(db, tx, client, config); - (svc, rx) - } - - #[tokio::test] - async fn validate_url_rejects_empty() { - assert!(validate_url("").is_err()); - } - - #[tokio::test] - async fn validate_url_rejects_non_http() { - assert!(validate_url("ftp://example.com/feed.xml").is_err()); - } - - #[tokio::test] - async fn validate_url_accepts_https() { - assert!(validate_url("https://example.com/feed.xml").is_ok()); - } - - #[tokio::test] - async fn validate_url_accepts_http() { - assert!(validate_url("http://example.com/feed.xml").is_ok()); - } - - #[tokio::test] - async fn list_feeds_empty_returns_empty() { - let (svc, _rx) = setup().await; - let podcasts = svc.list_feeds(MediaType::Podcast).await.unwrap(); - assert!(podcasts.is_empty()); - let news = svc.list_feeds(MediaType::News).await.unwrap(); - assert!(news.is_empty()); - } - - #[tokio::test] - async fn unsubscribe_nonexistent_returns_error() { - let (svc, _rx) = setup().await; - let result = svc.unsubscribe(FeedId::new()).await; - assert!(result.is_err()); - } - - #[tokio::test] - async fn mark_consumed_nonexistent_is_ok() { - let (svc, _rx) = setup().await; - // Should silently succeed for unknown IDs - let result = svc.mark_consumed(MediaId::new()).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn insert_episodes_deduplicates_by_guid() { - let (svc, _rx) = setup().await; - let sub_id = make_subscription(&svc).await; - - let entries = vec![ - make_podcast_entry("ep-001", "Episode 1"), - make_podcast_entry("ep-001", "Episode 1 duplicate"), - ]; - - let now = now_iso8601(); - let count = svc - .insert_new_podcast_episodes(&sub_id, &entries, &now) - .await - .unwrap(); - assert_eq!(count, 1, "duplicate GUID should not be inserted"); - - let episodes = podcast::list_episodes(&svc.db.read, &sub_id, 10, 0) - .await - .unwrap(); - assert_eq!(episodes.len(), 1); - } - - #[tokio::test] - async fn insert_articles_deduplicates_by_guid() { - let (svc, _rx) = setup().await; - let feed_id = make_news_feed(&svc).await; - - let entries = vec![ - make_news_entry("art-001", "Article 1"), - make_news_entry("art-001", "Article 1 duplicate"), - ]; - - let now = now_iso8601(); - let count = svc - .insert_new_articles(&feed_id, &entries, &now) - .await - .unwrap(); - assert_eq!(count, 1, "duplicate GUID should not be inserted"); - } - - #[tokio::test] - async fn episode_available_event_emitted_on_new_episode() { - let pool = SqlitePool::connect("sqlite::memory:").await.unwrap(); - MIGRATOR.run(&pool).await.unwrap(); - let db = DbPools { - read: pool.clone(), - write: pool, - }; - let (tx, mut rx) = create_event_bus(64); - let svc = KomideService::new(db, tx, reqwest::Client::new(), KomideConfig::default()); +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- - let sub_id = make_subscription(&svc).await; - let entries = vec![make_podcast_entry("ep-new", "New Episode")]; - let now = now_iso8601(); - svc.insert_new_podcast_episodes(&sub_id, &entries, &now) - .await - .unwrap(); - - let event = rx.try_recv().unwrap(); - assert!(matches!( - event, - harmonia_common::aggelia::HarmoniaEvent::EpisodeAvailable { .. } - )); - } - - // ── Test helpers ───────────────────────────────────────────────────────── - - async fn make_subscription(svc: &KomideService) -> Vec { - let feed_id = FeedId::new(); - let id_bytes = feed_id.as_bytes().to_vec(); - let sub = podcast::PodcastSubscription { - id: id_bytes.clone(), - feed_url: "https://example.com/podcast.xml".to_string(), - title: Some("Test Podcast".to_string()), - description: None, - author: None, - image_url: None, - language: None, - last_checked_at: None, - auto_download: 1, - quality_profile_id: None, - added_at: now_iso8601(), - }; - podcast::insert_subscription(&svc.db.write, &sub) - .await - .unwrap(); - id_bytes - } - - async fn make_news_feed(svc: &KomideService) -> Vec { - let feed_id = FeedId::new(); - let id_bytes = feed_id.as_bytes().to_vec(); - let feed = news::NewsFeed { - id: id_bytes.clone(), - title: "Test News".to_string(), - url: "https://example.com/news.xml".to_string(), - site_url: None, - description: None, - category: None, - icon_url: None, - last_fetched_at: None, - fetch_interval_minutes: 15, - is_active: 1, - added_at: now_iso8601(), - updated_at: now_iso8601(), - }; - news::insert_feed(&svc.db.write, &feed).await.unwrap(); - id_bytes - } - - fn make_podcast_entry(guid: &str, title: &str) -> crate::parser::NormalizedEntry { - crate::parser::NormalizedEntry { - guid: guid.to_string(), - title: title.to_string(), - published: Some("2026-01-01T00:00:00Z".to_string()), - summary: None, - content: None, - enclosures: vec![crate::parser::Enclosure { - url: format!("https://example.com/{guid}.mp3"), - content_type: Some("audio/mpeg".to_string()), - length: None, - }], - link: None, - } - } - - fn make_news_entry(guid: &str, title: &str) -> crate::parser::NormalizedEntry { - crate::parser::NormalizedEntry { - guid: guid.to_string(), - title: title.to_string(), - published: Some("2026-01-01T00:00:00Z".to_string()), - summary: Some("Summary".to_string()), - content: None, - enclosures: vec![], - link: Some(format!("https://example.com/{guid}")), - } - } -} +#[cfg(test)] +mod tests; diff --git a/crates/komide/src/service/tests.rs b/crates/komide/src/service/tests.rs new file mode 100644 index 0000000..4efd7ad --- /dev/null +++ b/crates/komide/src/service/tests.rs @@ -0,0 +1,201 @@ +use super::*; +use harmonia_common::aggelia::create_event_bus; +use harmonia_db::{DbPools, migrate::MIGRATOR}; +use sqlx::SqlitePool; + +async fn setup() -> (KomideService, harmonia_common::aggelia::EventReceiver) { + let pool = SqlitePool::connect("sqlite::memory:").await.unwrap(); + MIGRATOR.run(&pool).await.unwrap(); + let db = DbPools { + read: pool.clone(), + write: pool, + }; + let (tx, rx) = create_event_bus(64); + let client = reqwest::Client::new(); + let config = KomideConfig::default(); + let svc = KomideService::new(db, tx, client, config); + (svc, rx) +} + +#[tokio::test] +async fn validate_url_rejects_empty() { + assert!(validate_url("").is_err()); +} + +#[tokio::test] +async fn validate_url_rejects_non_http() { + assert!(validate_url("ftp://example.com/feed.xml").is_err()); +} + +#[tokio::test] +async fn validate_url_accepts_https() { + assert!(validate_url("https://example.com/feed.xml").is_ok()); +} + +#[tokio::test] +async fn validate_url_accepts_http() { + assert!(validate_url("http://example.com/feed.xml").is_ok()); +} + +#[tokio::test] +async fn list_feeds_empty_returns_empty() { + let (svc, _rx) = setup().await; + let podcasts = svc.list_feeds(MediaType::Podcast).await.unwrap(); + assert!(podcasts.is_empty()); + let news = svc.list_feeds(MediaType::News).await.unwrap(); + assert!(news.is_empty()); +} + +#[tokio::test] +async fn unsubscribe_nonexistent_returns_error() { + let (svc, _rx) = setup().await; + let result = svc.unsubscribe(FeedId::new()).await; + assert!(result.is_err()); +} + +#[tokio::test] +async fn mark_consumed_nonexistent_is_ok() { + let (svc, _rx) = setup().await; + // Should silently succeed for unknown IDs + let result = svc.mark_consumed(MediaId::new()).await; + assert!(result.is_ok()); +} + +#[tokio::test] +async fn insert_episodes_deduplicates_by_guid() { + let (svc, _rx) = setup().await; + let sub_id = make_subscription(&svc).await; + + let entries = vec![ + make_podcast_entry("ep-001", "Episode 1"), + make_podcast_entry("ep-001", "Episode 1 duplicate"), + ]; + + let now = now_iso8601(); + let count = svc + .insert_new_podcast_episodes(&sub_id, &entries, &now) + .await + .unwrap(); + assert_eq!(count, 1, "duplicate GUID should not be inserted"); + + let episodes = podcast::list_episodes(&svc.db.read, &sub_id, 10, 0) + .await + .unwrap(); + assert_eq!(episodes.len(), 1); +} + +#[tokio::test] +async fn insert_articles_deduplicates_by_guid() { + let (svc, _rx) = setup().await; + let feed_id = make_news_feed(&svc).await; + + let entries = vec![ + make_news_entry("art-001", "Article 1"), + make_news_entry("art-001", "Article 1 duplicate"), + ]; + + let now = now_iso8601(); + let count = svc + .insert_new_articles(&feed_id, &entries, &now) + .await + .unwrap(); + assert_eq!(count, 1, "duplicate GUID should not be inserted"); +} + +#[tokio::test] +async fn episode_available_event_emitted_on_new_episode() { + let pool = SqlitePool::connect("sqlite::memory:").await.unwrap(); + MIGRATOR.run(&pool).await.unwrap(); + let db = DbPools { + read: pool.clone(), + write: pool, + }; + let (tx, mut rx) = create_event_bus(64); + let svc = KomideService::new(db, tx, reqwest::Client::new(), KomideConfig::default()); + + let sub_id = make_subscription(&svc).await; + let entries = vec![make_podcast_entry("ep-new", "New Episode")]; + let now = now_iso8601(); + svc.insert_new_podcast_episodes(&sub_id, &entries, &now) + .await + .unwrap(); + + let event = rx.try_recv().unwrap(); + assert!(matches!( + event, + harmonia_common::aggelia::HarmoniaEvent::EpisodeAvailable { .. } + )); +} + +// ── Test helpers ───────────────────────────────────────────────────────── + +async fn make_subscription(svc: &KomideService) -> Vec { + let feed_id = FeedId::new(); + let id_bytes = feed_id.as_bytes().to_vec(); + let sub = podcast::PodcastSubscription { + id: id_bytes.clone(), + feed_url: "https://example.com/podcast.xml".to_string(), + title: Some("Test Podcast".to_string()), + description: None, + author: None, + image_url: None, + language: None, + last_checked_at: None, + auto_download: 1, + quality_profile_id: None, + added_at: now_iso8601(), + }; + podcast::insert_subscription(&svc.db.write, &sub) + .await + .unwrap(); + id_bytes +} + +async fn make_news_feed(svc: &KomideService) -> Vec { + let feed_id = FeedId::new(); + let id_bytes = feed_id.as_bytes().to_vec(); + let feed = news::NewsFeed { + id: id_bytes.clone(), + title: "Test News".to_string(), + url: "https://example.com/news.xml".to_string(), + site_url: None, + description: None, + category: None, + icon_url: None, + last_fetched_at: None, + fetch_interval_minutes: 15, + is_active: 1, + added_at: now_iso8601(), + updated_at: now_iso8601(), + }; + news::insert_feed(&svc.db.write, &feed).await.unwrap(); + id_bytes +} + +fn make_podcast_entry(guid: &str, title: &str) -> crate::parser::NormalizedEntry { + crate::parser::NormalizedEntry { + guid: guid.to_string(), + title: title.to_string(), + published: Some("2026-01-01T00:00:00Z".to_string()), + summary: None, + content: None, + enclosures: vec![crate::parser::Enclosure { + url: format!("https://example.com/{guid}.mp3"), + content_type: Some("audio/mpeg".to_string()), + length: None, + }], + link: None, + } +} + +fn make_news_entry(guid: &str, title: &str) -> crate::parser::NormalizedEntry { + crate::parser::NormalizedEntry { + guid: guid.to_string(), + title: title.to_string(), + published: Some("2026-01-01T00:00:00Z".to_string()), + summary: Some("Summary".to_string()), + content: None, + enclosures: vec![], + link: Some(format!("https://example.com/{guid}")), + } +} diff --git a/crates/paroche/src/opds/catalog.rs b/crates/paroche/src/opds/catalog/mod.rs similarity index 59% rename from crates/paroche/src/opds/catalog.rs rename to crates/paroche/src/opds/catalog/mod.rs index e29bb6c..657c882 100644 --- a/crates/paroche/src/opds/catalog.rs +++ b/crates/paroche/src/opds/catalog/mod.rs @@ -712,450 +712,9 @@ pub async fn entry_v1( Err(ParocheError::NotFound) } -#[cfg(test)] -mod tests { - use super::*; - use crate::opds::opds_routes; - use crate::test_helpers::test_state; - use axum::body::{Body, to_bytes}; - use axum::http::{Request, StatusCode}; - use exousia::{ - AuthService, - user::{CreateUserRequest, UserRole}, - }; - use std::sync::Arc; - use tower::ServiceExt; - - async fn admin_token(auth: &Arc) -> String { - auth.create_user(CreateUserRequest { - username: "admin".to_string(), - display_name: "Admin".to_string(), - password: "password123".to_string(), - role: UserRole::Admin, - }) - .await - .unwrap(); - auth.login("admin", "password123") - .await - .unwrap() - .access_token - } - - async fn insert_books(state: &AppState, n: usize) { - for i in 0..n { - let book = harmonia_db::repo::book::Book { - id: uuid::Uuid::now_v7().as_bytes().to_vec(), - registry_id: None, - title: format!("Book {:04}", i), - subtitle: None, - isbn: None, - isbn13: None, - openlibrary_id: None, - goodreads_id: None, - publisher: None, - publish_date: None, - language: None, - page_count: None, - description: None, - file_path: None, - file_format: None, - file_size_bytes: None, - quality_score: None, - quality_profile_id: None, - source_type: "local".to_string(), - added_at: "2026-01-01T00:00:00Z".to_string(), - }; - harmonia_db::repo::book::insert_book(&state.db.write, &book) - .await - .unwrap(); - } - } - - async fn insert_comics(state: &AppState, n: usize) { - for i in 0..n { - let comic = harmonia_db::repo::comic::Comic { - id: uuid::Uuid::now_v7().as_bytes().to_vec(), - registry_id: None, - series_name: format!("Series {:04}", i), - volume: Some(1), - issue_number: Some(1.0), - title: Some(format!("Issue {:04}", i)), - publisher: None, - release_date: None, - page_count: None, - summary: None, - language: None, - comicinfo_writer: None, - comicinfo_penciller: None, - comicinfo_inker: None, - comicinfo_colorist: None, - file_path: None, - file_format: None, - file_size_bytes: None, - quality_score: None, - quality_profile_id: None, - source_type: "local".to_string(), - added_at: "2026-01-01T00:00:00Z".to_string(), - }; - harmonia_db::repo::comic::insert_comic(&state.db.write, &comic) - .await - .unwrap(); - } - } - - #[tokio::test] - async fn catalog_v2_unauthenticated_returns_401() { - let (state, _auth) = test_state().await; - let app = opds_routes().with_state(state); - let resp = app - .oneshot( - Request::builder() - .uri("/v2/catalog") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(resp.status(), StatusCode::UNAUTHORIZED); - } - - #[tokio::test] - async fn catalog_v2_returns_navigation_links() { - let (state, auth) = test_state().await; - let token = admin_token(&auth).await; - let app = opds_routes().with_state(state); - let resp = app - .oneshot( - Request::builder() - .uri("/v2/catalog") - .header("Authorization", format!("Bearer {token}")) - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(resp.status(), StatusCode::OK); - let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); - let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); - let nav = body["navigation"].as_array().unwrap(); - let hrefs: Vec<_> = nav.iter().map(|n| n["href"].as_str().unwrap()).collect(); - assert!(hrefs.contains(&"/opds/v2/books")); - assert!(hrefs.contains(&"/opds/v2/comics")); - } - - #[tokio::test] - async fn catalog_v2_has_opds_content_type() { - let (state, auth) = test_state().await; - let token = admin_token(&auth).await; - let app = opds_routes().with_state(state); - let resp = app - .oneshot( - Request::builder() - .uri("/v2/catalog") - .header("Authorization", format!("Bearer {token}")) - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - let ct = resp - .headers() - .get("content-type") - .unwrap() - .to_str() - .unwrap(); - assert!(ct.contains("application/opds+json")); - } - - #[tokio::test] - async fn catalog_v2_has_search_link() { - let (state, auth) = test_state().await; - let token = admin_token(&auth).await; - let app = opds_routes().with_state(state); - let resp = app - .oneshot( - Request::builder() - .uri("/v2/catalog") - .header("Authorization", format!("Bearer {token}")) - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); - let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); - let links = body["links"].as_array().unwrap(); - let search_link = links.iter().find(|l| l["rel"].as_str() == Some("search")); - assert!(search_link.is_some()); - } - - #[tokio::test] - async fn books_v2_next_link_when_more_items() { - let (state, auth) = test_state().await; - let token = admin_token(&auth).await; - // Default page size is 50; insert 51 to trigger next link - insert_books(&state, 51).await; - let app = opds_routes().with_state(state); - let resp = app - .oneshot( - Request::builder() - .uri("/v2/books") - .header("Authorization", format!("Bearer {token}")) - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(resp.status(), StatusCode::OK); - let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); - let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); - let links = body["links"].as_array().unwrap(); - let next = links.iter().find(|l| l["rel"].as_str() == Some("next")); - assert!(next.is_some(), "expected next link for 51 books"); - } +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- - #[tokio::test] - async fn books_v2_correct_page_size() { - let (state, auth) = test_state().await; - let token = admin_token(&auth).await; - insert_books(&state, 51).await; - let app = opds_routes().with_state(state); - let resp = app - .oneshot( - Request::builder() - .uri("/v2/books") - .header("Authorization", format!("Bearer {token}")) - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); - let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); - let pubs = body["publications"].as_array().unwrap(); - assert_eq!(pubs.len(), 50); - } - - #[tokio::test] - async fn books_v2_last_page_no_next_link() { - let (state, auth) = test_state().await; - let token = admin_token(&auth).await; - insert_books(&state, 5).await; - let app = opds_routes().with_state(state); - let resp = app - .oneshot( - Request::builder() - .uri("/v2/books") - .header("Authorization", format!("Bearer {token}")) - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); - let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); - let links = body["links"].as_array().unwrap(); - let next = links.iter().find(|l| l["rel"].as_str() == Some("next")); - assert!(next.is_none(), "no next link expected on last page"); - } - - #[tokio::test] - async fn comics_v2_returns_entries() { - let (state, auth) = test_state().await; - let token = admin_token(&auth).await; - insert_comics(&state, 3).await; - let app = opds_routes().with_state(state); - let resp = app - .oneshot( - Request::builder() - .uri("/v2/comics") - .header("Authorization", format!("Bearer {token}")) - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(resp.status(), StatusCode::OK); - let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); - let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); - let pubs = body["publications"].as_array().unwrap(); - assert_eq!(pubs.len(), 3); - } - - #[tokio::test] - async fn single_book_has_acquisition_link_with_correct_mime() { - let (state, auth) = test_state().await; - let token = admin_token(&auth).await; - let id = uuid::Uuid::now_v7(); - let book = harmonia_db::repo::book::Book { - id: id.as_bytes().to_vec(), - registry_id: None, - title: "Dune".to_string(), - subtitle: None, - isbn: None, - isbn13: None, - openlibrary_id: None, - goodreads_id: None, - publisher: Some("Ace Books".to_string()), - publish_date: None, - language: Some("en".to_string()), - page_count: None, - description: None, - file_path: None, - file_format: Some("epub".to_string()), - file_size_bytes: None, - quality_score: None, - quality_profile_id: None, - source_type: "local".to_string(), - added_at: "2026-01-01T00:00:00Z".to_string(), - }; - harmonia_db::repo::book::insert_book(&state.db.write, &book) - .await - .unwrap(); - - let app = opds_routes().with_state(state); - let resp = app - .oneshot( - Request::builder() - .uri(format!("/v2/books/{}", id)) - .header("Authorization", format!("Bearer {token}")) - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(resp.status(), StatusCode::OK); - let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); - let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); - let pub_links = &body["publications"][0]["links"]; - let acq = pub_links - .as_array() - .unwrap() - .iter() - .find(|l| l["rel"].as_str() == Some("http://opds-spec.org/acquisition")); - assert!(acq.is_some()); - assert_eq!(acq.unwrap()["type"], "application/epub+zip"); - } - - #[tokio::test] - async fn single_book_has_cover_art_links() { - let (state, auth) = test_state().await; - let token = admin_token(&auth).await; - let id = uuid::Uuid::now_v7(); - let book = harmonia_db::repo::book::Book { - id: id.as_bytes().to_vec(), - registry_id: None, - title: "Foundation".to_string(), - subtitle: None, - isbn: None, - isbn13: None, - openlibrary_id: None, - goodreads_id: None, - publisher: None, - publish_date: None, - language: None, - page_count: None, - description: None, - file_path: None, - file_format: None, - file_size_bytes: None, - quality_score: None, - quality_profile_id: None, - source_type: "local".to_string(), - added_at: "2026-01-01T00:00:00Z".to_string(), - }; - harmonia_db::repo::book::insert_book(&state.db.write, &book) - .await - .unwrap(); - - let app = opds_routes().with_state(state); - let resp = app - .oneshot( - Request::builder() - .uri(format!("/v2/books/{}", id)) - .header("Authorization", format!("Bearer {token}")) - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); - let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); - let images = &body["publications"][0]["images"]; - let cover = images - .as_array() - .unwrap() - .iter() - .find(|l| l["rel"].as_str() == Some("http://opds-spec.org/image")); - assert!(cover.is_some()); - let href = cover.unwrap()["href"].as_str().unwrap(); - assert!(href.contains("/api/books/")); - assert!(href.contains("/cover")); - } - - #[tokio::test] - async fn catalog_v1_returns_atom_feed() { - let (state, auth) = test_state().await; - let token = admin_token(&auth).await; - let app = opds_routes().with_state(state); - let resp = app - .oneshot( - Request::builder() - .uri("/v1/catalog.xml") - .header("Authorization", format!("Bearer {token}")) - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(resp.status(), StatusCode::OK); - let ct = resp - .headers() - .get("content-type") - .unwrap() - .to_str() - .unwrap(); - assert!(ct.contains("application/atom+xml")); - let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); - let xml = std::str::from_utf8(&bytes).unwrap(); - assert!(xml.contains("")); - } - - #[tokio::test] - async fn catalog_v1_has_book_and_comic_navigation() { - let (state, auth) = test_state().await; - let token = admin_token(&auth).await; - let app = opds_routes().with_state(state); - let resp = app - .oneshot( - Request::builder() - .uri("/v1/catalog.xml") - .header("Authorization", format!("Bearer {token}")) - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); - let xml = std::str::from_utf8(&bytes).unwrap(); - assert!(xml.contains("books.xml")); - assert!(xml.contains("comics.xml")); - } - - #[tokio::test] - async fn books_v1_unauthenticated_returns_401() { - let (state, _auth) = test_state().await; - let app = opds_routes().with_state(state); - let resp = app - .oneshot( - Request::builder() - .uri("/v1/books.xml") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(resp.status(), StatusCode::UNAUTHORIZED); - } -} +#[cfg(test)] +mod tests; diff --git a/crates/paroche/src/opds/catalog/tests.rs b/crates/paroche/src/opds/catalog/tests.rs new file mode 100644 index 0000000..e6023e1 --- /dev/null +++ b/crates/paroche/src/opds/catalog/tests.rs @@ -0,0 +1,444 @@ +use super::*; +use crate::opds::opds_routes; +use crate::test_helpers::test_state; +use axum::body::{Body, to_bytes}; +use axum::http::{Request, StatusCode}; +use exousia::{ + AuthService, + user::{CreateUserRequest, UserRole}, +}; +use std::sync::Arc; +use tower::ServiceExt; + +async fn admin_token(auth: &Arc) -> String { + auth.create_user(CreateUserRequest { + username: "admin".to_string(), + display_name: "Admin".to_string(), + password: "password123".to_string(), + role: UserRole::Admin, + }) + .await + .unwrap(); + auth.login("admin", "password123") + .await + .unwrap() + .access_token +} + +async fn insert_books(state: &AppState, n: usize) { + for i in 0..n { + let book = harmonia_db::repo::book::Book { + id: uuid::Uuid::now_v7().as_bytes().to_vec(), + registry_id: None, + title: format!("Book {:04}", i), + subtitle: None, + isbn: None, + isbn13: None, + openlibrary_id: None, + goodreads_id: None, + publisher: None, + publish_date: None, + language: None, + page_count: None, + description: None, + file_path: None, + file_format: None, + file_size_bytes: None, + quality_score: None, + quality_profile_id: None, + source_type: "local".to_string(), + added_at: "2026-01-01T00:00:00Z".to_string(), + }; + harmonia_db::repo::book::insert_book(&state.db.write, &book) + .await + .unwrap(); + } +} + +async fn insert_comics(state: &AppState, n: usize) { + for i in 0..n { + let comic = harmonia_db::repo::comic::Comic { + id: uuid::Uuid::now_v7().as_bytes().to_vec(), + registry_id: None, + series_name: format!("Series {:04}", i), + volume: Some(1), + issue_number: Some(1.0), + title: Some(format!("Issue {:04}", i)), + publisher: None, + release_date: None, + page_count: None, + summary: None, + language: None, + comicinfo_writer: None, + comicinfo_penciller: None, + comicinfo_inker: None, + comicinfo_colorist: None, + file_path: None, + file_format: None, + file_size_bytes: None, + quality_score: None, + quality_profile_id: None, + source_type: "local".to_string(), + added_at: "2026-01-01T00:00:00Z".to_string(), + }; + harmonia_db::repo::comic::insert_comic(&state.db.write, &comic) + .await + .unwrap(); + } +} + +#[tokio::test] +async fn catalog_v2_unauthenticated_returns_401() { + let (state, _auth) = test_state().await; + let app = opds_routes().with_state(state); + let resp = app + .oneshot( + Request::builder() + .uri("/v2/catalog") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::UNAUTHORIZED); +} + +#[tokio::test] +async fn catalog_v2_returns_navigation_links() { + let (state, auth) = test_state().await; + let token = admin_token(&auth).await; + let app = opds_routes().with_state(state); + let resp = app + .oneshot( + Request::builder() + .uri("/v2/catalog") + .header("Authorization", format!("Bearer {token}")) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); + let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); + let nav = body["navigation"].as_array().unwrap(); + let hrefs: Vec<_> = nav.iter().map(|n| n["href"].as_str().unwrap()).collect(); + assert!(hrefs.contains(&"/opds/v2/books")); + assert!(hrefs.contains(&"/opds/v2/comics")); +} + +#[tokio::test] +async fn catalog_v2_has_opds_content_type() { + let (state, auth) = test_state().await; + let token = admin_token(&auth).await; + let app = opds_routes().with_state(state); + let resp = app + .oneshot( + Request::builder() + .uri("/v2/catalog") + .header("Authorization", format!("Bearer {token}")) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + let ct = resp + .headers() + .get("content-type") + .unwrap() + .to_str() + .unwrap(); + assert!(ct.contains("application/opds+json")); +} + +#[tokio::test] +async fn catalog_v2_has_search_link() { + let (state, auth) = test_state().await; + let token = admin_token(&auth).await; + let app = opds_routes().with_state(state); + let resp = app + .oneshot( + Request::builder() + .uri("/v2/catalog") + .header("Authorization", format!("Bearer {token}")) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); + let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); + let links = body["links"].as_array().unwrap(); + let search_link = links.iter().find(|l| l["rel"].as_str() == Some("search")); + assert!(search_link.is_some()); +} + +#[tokio::test] +async fn books_v2_next_link_when_more_items() { + let (state, auth) = test_state().await; + let token = admin_token(&auth).await; + // Default page size is 50; insert 51 to trigger next link + insert_books(&state, 51).await; + let app = opds_routes().with_state(state); + let resp = app + .oneshot( + Request::builder() + .uri("/v2/books") + .header("Authorization", format!("Bearer {token}")) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); + let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); + let links = body["links"].as_array().unwrap(); + let next = links.iter().find(|l| l["rel"].as_str() == Some("next")); + assert!(next.is_some(), "expected next link for 51 books"); +} + +#[tokio::test] +async fn books_v2_correct_page_size() { + let (state, auth) = test_state().await; + let token = admin_token(&auth).await; + insert_books(&state, 51).await; + let app = opds_routes().with_state(state); + let resp = app + .oneshot( + Request::builder() + .uri("/v2/books") + .header("Authorization", format!("Bearer {token}")) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); + let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); + let pubs = body["publications"].as_array().unwrap(); + assert_eq!(pubs.len(), 50); +} + +#[tokio::test] +async fn books_v2_last_page_no_next_link() { + let (state, auth) = test_state().await; + let token = admin_token(&auth).await; + insert_books(&state, 5).await; + let app = opds_routes().with_state(state); + let resp = app + .oneshot( + Request::builder() + .uri("/v2/books") + .header("Authorization", format!("Bearer {token}")) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); + let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); + let links = body["links"].as_array().unwrap(); + let next = links.iter().find(|l| l["rel"].as_str() == Some("next")); + assert!(next.is_none(), "no next link expected on last page"); +} + +#[tokio::test] +async fn comics_v2_returns_entries() { + let (state, auth) = test_state().await; + let token = admin_token(&auth).await; + insert_comics(&state, 3).await; + let app = opds_routes().with_state(state); + let resp = app + .oneshot( + Request::builder() + .uri("/v2/comics") + .header("Authorization", format!("Bearer {token}")) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); + let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); + let pubs = body["publications"].as_array().unwrap(); + assert_eq!(pubs.len(), 3); +} + +#[tokio::test] +async fn single_book_has_acquisition_link_with_correct_mime() { + let (state, auth) = test_state().await; + let token = admin_token(&auth).await; + let id = uuid::Uuid::now_v7(); + let book = harmonia_db::repo::book::Book { + id: id.as_bytes().to_vec(), + registry_id: None, + title: "Dune".to_string(), + subtitle: None, + isbn: None, + isbn13: None, + openlibrary_id: None, + goodreads_id: None, + publisher: Some("Ace Books".to_string()), + publish_date: None, + language: Some("en".to_string()), + page_count: None, + description: None, + file_path: None, + file_format: Some("epub".to_string()), + file_size_bytes: None, + quality_score: None, + quality_profile_id: None, + source_type: "local".to_string(), + added_at: "2026-01-01T00:00:00Z".to_string(), + }; + harmonia_db::repo::book::insert_book(&state.db.write, &book) + .await + .unwrap(); + + let app = opds_routes().with_state(state); + let resp = app + .oneshot( + Request::builder() + .uri(format!("/v2/books/{}", id)) + .header("Authorization", format!("Bearer {token}")) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); + let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); + let pub_links = &body["publications"][0]["links"]; + let acq = pub_links + .as_array() + .unwrap() + .iter() + .find(|l| l["rel"].as_str() == Some("http://opds-spec.org/acquisition")); + assert!(acq.is_some()); + assert_eq!(acq.unwrap()["type"], "application/epub+zip"); +} + +#[tokio::test] +async fn single_book_has_cover_art_links() { + let (state, auth) = test_state().await; + let token = admin_token(&auth).await; + let id = uuid::Uuid::now_v7(); + let book = harmonia_db::repo::book::Book { + id: id.as_bytes().to_vec(), + registry_id: None, + title: "Foundation".to_string(), + subtitle: None, + isbn: None, + isbn13: None, + openlibrary_id: None, + goodreads_id: None, + publisher: None, + publish_date: None, + language: None, + page_count: None, + description: None, + file_path: None, + file_format: None, + file_size_bytes: None, + quality_score: None, + quality_profile_id: None, + source_type: "local".to_string(), + added_at: "2026-01-01T00:00:00Z".to_string(), + }; + harmonia_db::repo::book::insert_book(&state.db.write, &book) + .await + .unwrap(); + + let app = opds_routes().with_state(state); + let resp = app + .oneshot( + Request::builder() + .uri(format!("/v2/books/{}", id)) + .header("Authorization", format!("Bearer {token}")) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); + let body: serde_json::Value = serde_json::from_slice(&bytes).unwrap(); + let images = &body["publications"][0]["images"]; + let cover = images + .as_array() + .unwrap() + .iter() + .find(|l| l["rel"].as_str() == Some("http://opds-spec.org/image")); + assert!(cover.is_some()); + let href = cover.unwrap()["href"].as_str().unwrap(); + assert!(href.contains("/api/books/")); + assert!(href.contains("/cover")); +} + +#[tokio::test] +async fn catalog_v1_returns_atom_feed() { + let (state, auth) = test_state().await; + let token = admin_token(&auth).await; + let app = opds_routes().with_state(state); + let resp = app + .oneshot( + Request::builder() + .uri("/v1/catalog.xml") + .header("Authorization", format!("Bearer {token}")) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + let ct = resp + .headers() + .get("content-type") + .unwrap() + .to_str() + .unwrap(); + assert!(ct.contains("application/atom+xml")); + let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); + let xml = std::str::from_utf8(&bytes).unwrap(); + assert!(xml.contains("")); +} + +#[tokio::test] +async fn catalog_v1_has_book_and_comic_navigation() { + let (state, auth) = test_state().await; + let token = admin_token(&auth).await; + let app = opds_routes().with_state(state); + let resp = app + .oneshot( + Request::builder() + .uri("/v1/catalog.xml") + .header("Authorization", format!("Bearer {token}")) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); + let xml = std::str::from_utf8(&bytes).unwrap(); + assert!(xml.contains("books.xml")); + assert!(xml.contains("comics.xml")); +} + +#[tokio::test] +async fn books_v1_unauthenticated_returns_401() { + let (state, _auth) = test_state().await; + let app = opds_routes().with_state(state); + let resp = app + .oneshot( + Request::builder() + .uri("/v1/books.xml") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::UNAUTHORIZED); +}