diff --git a/Cargo.lock b/Cargo.lock index c7b3ef0ed..f40db7bef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -655,6 +655,15 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bindgen" version = "0.69.4" @@ -1042,7 +1051,7 @@ checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "cli" -version = "0.20.1" +version = "0.3.0" dependencies = [ "anyhow", "clap", @@ -2145,7 +2154,7 @@ dependencies = [ [[package]] name = "iggy" -version = "0.4.3" +version = "0.5.0" dependencies = [ "aes-gcm", "anyhow", @@ -2172,7 +2181,6 @@ dependencies = [ "reqwest", "reqwest-middleware", "reqwest-retry", - "rmp-serde", "rustls 0.23.8", "serde", "serde_derive", @@ -2189,7 +2197,7 @@ dependencies = [ [[package]] name = "iggy_examples" -version = "0.0.3" +version = "0.0.4" dependencies = [ "anyhow", "bytes", @@ -2269,7 +2277,6 @@ dependencies = [ "regex", "serial_test", "server", - "sled", "tempfile", "tokio", "tracing-subscriber", @@ -3908,9 +3915,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.21" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -4045,7 +4052,7 @@ dependencies = [ [[package]] name = "server" -version = "0.2.24" +version = "0.3.0" dependencies = [ "anyhow", "async-stream", @@ -4054,6 +4061,7 @@ dependencies = [ "axum 0.7.5", "axum-server", "bcrypt", + "bincode", "blake3", "bytes", "clap", @@ -4066,6 +4074,7 @@ dependencies = [ "iggy", "jsonwebtoken", "keepcalm", + "log", "moka", "prometheus-client", "quinn", diff --git a/bench/src/benchmarks/benchmark.rs b/bench/src/benchmarks/benchmark.rs index 0a4be421f..7fe250f2f 100644 --- a/bench/src/benchmarks/benchmark.rs +++ b/bench/src/benchmarks/benchmark.rs @@ -14,6 +14,7 @@ use iggy::clients::client::{IggyClient, IggyClientBackgroundConfig}; use iggy::compression::compression_algorithm::CompressionAlgorithm; use iggy::error::IggyError; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use integration::test_server::{login_root, ClientFactory}; use std::{pin::Pin, sync::Arc}; use tracing::info; @@ -90,7 +91,7 @@ pub trait Benchmarkable { None, None, IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await?; } diff --git a/cli/Cargo.toml b/cli/Cargo.toml index c56571bb3..01c7199a0 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "cli" -version = "0.20.1" +version = "0.3.0" edition = "2021" authors = ["bartosz.ciesla@gmail.com"] repository = "https://github.com/iggy-rs/iggy" diff --git a/cli/src/args/topic.rs b/cli/src/args/topic.rs index 42252d1ed..fb8659350 100644 --- a/cli/src/args/topic.rs +++ b/cli/src/args/topic.rs @@ -2,8 +2,8 @@ use crate::args::common::ListMode; use clap::{Args, Subcommand}; use iggy::compression::compression_algorithm::CompressionAlgorithm; use iggy::identifier::Identifier; -use iggy::utils::byte_size::IggyByteSize; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; #[derive(Debug, Clone, Subcommand)] pub(crate) enum TopicAction { @@ -102,7 +102,7 @@ pub(crate) struct TopicCreateArgs { /// ("unlimited" or skipping parameter disables max topic size functionality in topic) /// Can't be lower than segment size in the config. #[arg(short, long, default_value = "unlimited", verbatim_doc_comment)] - pub(crate) max_topic_size: IggyByteSize, + pub(crate) max_topic_size: MaxTopicSize, /// Replication factor for the topic #[arg(short, long, default_value = "1")] pub(crate) replication_factor: u8, @@ -149,7 +149,7 @@ pub(crate) struct TopicUpdateArgs { /// ("unlimited" or skipping parameter causes removal of max topic size parameter in topic) /// Can't be lower than segment size in the config. #[arg(short, long, default_value = "unlimited", verbatim_doc_comment)] - pub(crate) max_topic_size: IggyByteSize, + pub(crate) max_topic_size: MaxTopicSize, #[arg(short, long, default_value = "1")] /// New replication factor for the topic pub(crate) replication_factor: u8, diff --git a/configs/server.json b/configs/server.json index 32ec2f735..39250d89b 100644 --- a/configs/server.json +++ b/configs/server.json @@ -34,7 +34,6 @@ "iggy.rs" ], "access_token_expiry": "1 h", - "refresh_token_expiry": "1 d", "clock_skew": "5 s", "not_before": "0 s", "encoding_secret": "top_secret$iggy.rs$_jwt_HS256_key#!", @@ -94,9 +93,6 @@ }, "system": { "path": "local_data", - "database": { - "path": "database" - }, "backup": { "path": "backup", "compatibility": { @@ -118,7 +114,7 @@ "size": "4 GB" }, "retention_policy": { - "message_expiry": "disabled", + "message_expiry": "none", "max_topic_size": "10 GB" }, "encryption": { diff --git a/configs/server.toml b/configs/server.toml index 37a81f307..fdce13340 100644 --- a/configs/server.toml +++ b/configs/server.toml @@ -66,9 +66,6 @@ valid_audiences = ["iggy.rs"] # Expiry time for access tokens. access_token_expiry = "1 h" -# Expiry time for refresh tokens. -refresh_token_expiry = "1 d" - # Tolerance for timing discrepancies during token validation. clock_skew = "5 s" @@ -233,11 +230,11 @@ path = "backup" # Subpath of the backup directory where converted segment data is stored after compatibility conversion. path = "compatibility" -# Database configuration. -[system.database] -# Path for storing database files. -# Specifies the directory where database files are stored, relative to `system.path`. -path = "database" +# Legacy database configuration - used only for the migration purposes. +#[system.database] +## Path for storing database files. +## Specifies the directory where database files are stored, relative to `system.path`. +#path = "database" # Runtime configuration. [system.runtime] @@ -275,10 +272,10 @@ size = "4 GB" # Data retention policy configuration. [system.retention_policy] # Configures the message time-based expiry setting. -# "disabled" means messages are kept indefinitely. +# "none" means messages are kept indefinitely. # A time value in human-readable format determines the lifespan of messages. # Example: `message_expiry = "2 days 4 hours 15 minutes"` means messages will expire after that duration. -message_expiry = "disabled" +message_expiry = "none" # Configures the topic size-based expiry setting. # "unlimited" or "0" means topics are kept indefinitely. diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 4cc9c9df4..a4b4b0316 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "iggy_examples" -version = "0.0.3" +version = "0.0.4" edition = "2021" [[example]] diff --git a/examples/src/getting-started/producer/main.rs b/examples/src/getting-started/producer/main.rs index c25f43f81..df3e96228 100644 --- a/examples/src/getting-started/producer/main.rs +++ b/examples/src/getting-started/producer/main.rs @@ -5,6 +5,7 @@ use iggy::compression::compression_algorithm::CompressionAlgorithm; use iggy::messages::send_messages::{Message, Partitioning}; use iggy::users::defaults::*; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use std::env; use std::error::Error; use std::str::FromStr; @@ -53,7 +54,7 @@ async fn init_system(client: &IggyClient) { None, Some(TOPIC_ID), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await { diff --git a/examples/src/shared/messages.rs b/examples/src/shared/messages.rs index ffda0d7a2..ae48829dc 100644 --- a/examples/src/shared/messages.rs +++ b/examples/src/shared/messages.rs @@ -1,3 +1,4 @@ +use iggy::utils::timestamp::IggyTimestamp; use serde::{Deserialize, Serialize}; use std::fmt::{self, Debug}; @@ -41,7 +42,7 @@ pub struct OrderCreated { pub price: f64, pub quantity: f64, pub side: String, - pub timestamp: u64, + pub timestamp: IggyTimestamp, } impl Debug for OrderCreated { @@ -52,7 +53,7 @@ impl Debug for OrderCreated { .field("price", &format!("{:.2}", self.price)) .field("quantity", &format!("{:.2}", self.quantity)) .field("side", &self.side) - .field("timestamp", &self.timestamp) + .field("timestamp", &self.timestamp.to_micros()) .finish() } } @@ -61,7 +62,7 @@ impl Debug for OrderCreated { pub struct OrderConfirmed { pub order_id: u64, pub price: f64, - pub timestamp: u64, + pub timestamp: IggyTimestamp, } impl Debug for OrderConfirmed { @@ -69,18 +70,28 @@ impl Debug for OrderConfirmed { f.debug_struct("OrderConfirmed") .field("order_id", &self.order_id) .field("price", &format!("{:.2}", self.price)) - .field("timestamp", &self.timestamp) + .field("timestamp", &self.timestamp.to_micros()) .finish() } } -#[derive(Debug, Deserialize, Serialize)] +#[derive(Deserialize, Serialize)] pub struct OrderRejected { pub order_id: u64, - pub timestamp: u64, + pub timestamp: IggyTimestamp, pub reason: String, } +impl Debug for OrderRejected { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OrderRejected") + .field("order_id", &self.order_id) + .field("timestamp", &self.timestamp.to_micros()) + .field("reason", &self.reason) + .finish() + } +} + impl SerializableMessage for OrderCreated { fn get_message_type(&self) -> &str { ORDER_CREATED_TYPE diff --git a/examples/src/shared/messages_generator.rs b/examples/src/shared/messages_generator.rs index 2e0686210..6d6935720 100644 --- a/examples/src/shared/messages_generator.rs +++ b/examples/src/shared/messages_generator.rs @@ -1,5 +1,5 @@ use crate::shared::messages::{OrderConfirmed, OrderCreated, OrderRejected, SerializableMessage}; -use crate::shared::utils; +use iggy::utils::timestamp::IggyTimestamp; use rand::rngs::ThreadRng; use rand::Rng; @@ -32,7 +32,7 @@ impl MessagesGenerator { self.order_id += 1; Box::new(OrderCreated { order_id: self.order_id, - timestamp: utils::timestamp(), + timestamp: IggyTimestamp::now(), currency_pair: CURRENCY_PAIRS[self.rng.gen_range(0..CURRENCY_PAIRS.len())].to_string(), price: self.rng.gen_range(10.0..=1000.0), quantity: self.rng.gen_range(0.1..=1.0), @@ -47,7 +47,7 @@ impl MessagesGenerator { fn generate_order_confirmed(&mut self) -> Box { Box::new(OrderConfirmed { order_id: self.order_id, - timestamp: utils::timestamp(), + timestamp: IggyTimestamp::now(), price: self.rng.gen_range(10.0..=1000.0), }) } @@ -55,7 +55,7 @@ impl MessagesGenerator { fn generate_order_rejected(&mut self) -> Box { Box::new(OrderRejected { order_id: self.order_id, - timestamp: utils::timestamp(), + timestamp: IggyTimestamp::now(), reason: match self.rng.gen_range(0..=1) { 0 => "cancelled_by_user", _ => "other", diff --git a/examples/src/shared/mod.rs b/examples/src/shared/mod.rs index 6695fd402..d752a2b4e 100644 --- a/examples/src/shared/mod.rs +++ b/examples/src/shared/mod.rs @@ -2,4 +2,3 @@ pub mod args; pub mod messages; pub mod messages_generator; pub mod system; -pub mod utils; diff --git a/examples/src/shared/system.rs b/examples/src/shared/system.rs index 32eee38ce..e29c379bf 100644 --- a/examples/src/shared/system.rs +++ b/examples/src/shared/system.rs @@ -8,6 +8,7 @@ use iggy::messages::poll_messages::PollingStrategy; use iggy::models::messages::PolledMessage; use iggy::users::defaults::*; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use tracing::info; type MessageHandler = dyn Fn(&PolledMessage) -> Result<(), Box>; @@ -74,7 +75,7 @@ pub async fn init_by_producer(args: &Args, client: &dyn Client) -> Result<(), Ig None, Some(args.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await?; Ok(()) diff --git a/examples/src/shared/utils.rs b/examples/src/shared/utils.rs deleted file mode 100644 index d92b58c01..000000000 --- a/examples/src/shared/utils.rs +++ /dev/null @@ -1,8 +0,0 @@ -use std::time::{SystemTime, UNIX_EPOCH}; - -pub fn timestamp() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_micros() as u64 -} diff --git a/integration/Cargo.toml b/integration/Cargo.toml index 3e4112f6e..6c03928b3 100644 --- a/integration/Cargo.toml +++ b/integration/Cargo.toml @@ -17,7 +17,6 @@ predicates = "3.1.0" regex = "1.10.4" serial_test = "3.1.1" server = { path = "../server" } -sled = "0.34.7" tempfile = "3.10.1" tokio = { version = "1.38.0", features = ["full"] } tracing-subscriber = "0.3.18" diff --git a/integration/tests/cli/consumer_group/test_consumer_group_create_command.rs b/integration/tests/cli/consumer_group/test_consumer_group_create_command.rs index 60eae27b8..de8abcd64 100644 --- a/integration/tests/cli/consumer_group/test_consumer_group_create_command.rs +++ b/integration/tests/cli/consumer_group/test_consumer_group_create_command.rs @@ -6,6 +6,7 @@ use assert_cmd::assert::Assert; use async_trait::async_trait; use iggy::client::Client; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::diff; use serial_test::parallel; @@ -83,7 +84,7 @@ impl IggyCmdTestCase for TestConsumerGroupCreateCmd { None, Some(self.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); diff --git a/integration/tests/cli/consumer_group/test_consumer_group_delete_command.rs b/integration/tests/cli/consumer_group/test_consumer_group_delete_command.rs index b7c4cf2eb..cfcdb30f6 100644 --- a/integration/tests/cli/consumer_group/test_consumer_group_delete_command.rs +++ b/integration/tests/cli/consumer_group/test_consumer_group_delete_command.rs @@ -6,6 +6,7 @@ use assert_cmd::assert::Assert; use async_trait::async_trait; use iggy::client::Client; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::diff; use serial_test::parallel; @@ -84,7 +85,7 @@ impl IggyCmdTestCase for TestConsumerGroupDeleteCmd { None, Some(self.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); diff --git a/integration/tests/cli/consumer_group/test_consumer_group_get_command.rs b/integration/tests/cli/consumer_group/test_consumer_group_get_command.rs index 281b09603..2b8f25752 100644 --- a/integration/tests/cli/consumer_group/test_consumer_group_get_command.rs +++ b/integration/tests/cli/consumer_group/test_consumer_group_get_command.rs @@ -6,6 +6,7 @@ use assert_cmd::assert::Assert; use async_trait::async_trait; use iggy::client::Client; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::{contains, starts_with}; use serial_test::parallel; @@ -84,7 +85,7 @@ impl IggyCmdTestCase for TestConsumerGroupGetCmd { None, Some(self.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); diff --git a/integration/tests/cli/consumer_group/test_consumer_group_list_command.rs b/integration/tests/cli/consumer_group/test_consumer_group_list_command.rs index fab87572e..358d8265c 100644 --- a/integration/tests/cli/consumer_group/test_consumer_group_list_command.rs +++ b/integration/tests/cli/consumer_group/test_consumer_group_list_command.rs @@ -6,6 +6,7 @@ use assert_cmd::assert::Assert; use async_trait::async_trait; use iggy::client::Client; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::{contains, starts_with}; use serial_test::parallel; @@ -81,7 +82,7 @@ impl IggyCmdTestCase for TestConsumerGroupListCmd { None, Some(self.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); diff --git a/integration/tests/cli/consumer_offset/test_consumer_offset_get_command.rs b/integration/tests/cli/consumer_offset/test_consumer_offset_get_command.rs index 732c9940c..bec85ff10 100644 --- a/integration/tests/cli/consumer_offset/test_consumer_offset_get_command.rs +++ b/integration/tests/cli/consumer_offset/test_consumer_offset_get_command.rs @@ -9,6 +9,7 @@ use iggy::consumer::{Consumer, ConsumerKind}; use iggy::identifier::Identifier; use iggy::messages::send_messages::{Message, Partitioning}; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::{contains, starts_with}; use serial_test::parallel; use std::str::FromStr; @@ -97,7 +98,7 @@ impl IggyCmdTestCase for TestConsumerOffsetGetCmd { None, Some(self.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); diff --git a/integration/tests/cli/consumer_offset/test_consumer_offset_set_command.rs b/integration/tests/cli/consumer_offset/test_consumer_offset_set_command.rs index c14bbfb42..4aae4124d 100644 --- a/integration/tests/cli/consumer_offset/test_consumer_offset_set_command.rs +++ b/integration/tests/cli/consumer_offset/test_consumer_offset_set_command.rs @@ -9,6 +9,7 @@ use iggy::consumer::{Consumer, ConsumerKind}; use iggy::identifier::Identifier; use iggy::messages::send_messages::{Message, Partitioning}; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::diff; use serial_test::parallel; use std::str::FromStr; @@ -97,7 +98,7 @@ impl IggyCmdTestCase for TestConsumerOffsetSetCmd { None, Some(self.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); diff --git a/integration/tests/cli/message/test_message_poll_command.rs b/integration/tests/cli/message/test_message_poll_command.rs index 3387eb858..d82e90498 100644 --- a/integration/tests/cli/message/test_message_poll_command.rs +++ b/integration/tests/cli/message/test_message_poll_command.rs @@ -10,6 +10,7 @@ use iggy::messages::poll_messages::{PollingKind, PollingStrategy}; use iggy::messages::send_messages::{Message, Partitioning}; use iggy::models::header::{HeaderKey, HeaderValue}; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::{contains, starts_with}; use serial_test::parallel; use std::collections::HashMap; @@ -121,7 +122,7 @@ impl IggyCmdTestCase for TestMessagePollCmd { None, Some(self.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); diff --git a/integration/tests/cli/message/test_message_send_command.rs b/integration/tests/cli/message/test_message_send_command.rs index 695ecbab8..5e39c5eeb 100644 --- a/integration/tests/cli/message/test_message_send_command.rs +++ b/integration/tests/cli/message/test_message_send_command.rs @@ -9,6 +9,7 @@ use iggy::consumer::Consumer; use iggy::messages::poll_messages::PollingStrategy; use iggy::models::header::{HeaderKey, HeaderValue}; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::diff; use serial_test::parallel; use std::collections::HashMap; @@ -154,7 +155,7 @@ impl IggyCmdTestCase for TestMessageSendCmd { None, Some(self.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); diff --git a/integration/tests/cli/partition/test_partition_create_command.rs b/integration/tests/cli/partition/test_partition_create_command.rs index f7c602d55..34bf642c1 100644 --- a/integration/tests/cli/partition/test_partition_create_command.rs +++ b/integration/tests/cli/partition/test_partition_create_command.rs @@ -7,6 +7,7 @@ use async_trait::async_trait; use iggy::client::Client; use iggy::compression::compression_algorithm::CompressionAlgorithm; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::diff; use serial_test::parallel; @@ -82,7 +83,7 @@ impl IggyCmdTestCase for TestPartitionCreateCmd { None, Some(self.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); diff --git a/integration/tests/cli/partition/test_partition_delete_command.rs b/integration/tests/cli/partition/test_partition_delete_command.rs index 4747a3231..da3f30f0a 100644 --- a/integration/tests/cli/partition/test_partition_delete_command.rs +++ b/integration/tests/cli/partition/test_partition_delete_command.rs @@ -6,6 +6,7 @@ use assert_cmd::assert::Assert; use async_trait::async_trait; use iggy::client::Client; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::diff; use serial_test::parallel; @@ -78,7 +79,7 @@ impl IggyCmdTestCase for TestPartitionDeleteCmd { None, Some(self.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); diff --git a/integration/tests/cli/personal_access_token/test_pat_create_command.rs b/integration/tests/cli/personal_access_token/test_pat_create_command.rs index b176a274a..7f89cf158 100644 --- a/integration/tests/cli/personal_access_token/test_pat_create_command.rs +++ b/integration/tests/cli/personal_access_token/test_pat_create_command.rs @@ -67,7 +67,7 @@ impl IggyCmdTestCase for TestPatCreateCmd { assert!(token.is_some()); let token = token.unwrap(); if self.expiry.is_none() { - assert!(token.expiry.is_none()) + assert!(token.expiry_at.is_none()) } let delete = client.delete_personal_access_token(&self.name).await; diff --git a/integration/tests/cli/stream/test_stream_purge_command.rs b/integration/tests/cli/stream/test_stream_purge_command.rs index 9bad05616..37837cda2 100644 --- a/integration/tests/cli/stream/test_stream_purge_command.rs +++ b/integration/tests/cli/stream/test_stream_purge_command.rs @@ -7,6 +7,7 @@ use async_trait::async_trait; use iggy::client::Client; use iggy::messages::send_messages::{Message, Partitioning}; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::diff; use serial_test::parallel; use std::str::FromStr; @@ -55,7 +56,7 @@ impl IggyCmdTestCase for TestStreamPurgeCmd { None, Some(self.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); diff --git a/integration/tests/cli/system/test_stats_command.rs b/integration/tests/cli/system/test_stats_command.rs index a8194e1d2..fa8c60a2e 100644 --- a/integration/tests/cli/system/test_stats_command.rs +++ b/integration/tests/cli/system/test_stats_command.rs @@ -7,6 +7,7 @@ use iggy::cli::system::stats::GetStatsOutput; use iggy::client::Client; use iggy::identifier::Identifier; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::{contains, starts_with}; use serial_test::parallel; @@ -57,7 +58,7 @@ impl IggyCmdTestCase for TestStatsCmd { None, Some(1), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); diff --git a/integration/tests/cli/topic/test_topic_create_command.rs b/integration/tests/cli/topic/test_topic_create_command.rs index 6e54bef1c..584eb310a 100644 --- a/integration/tests/cli/topic/test_topic_create_command.rs +++ b/integration/tests/cli/topic/test_topic_create_command.rs @@ -7,8 +7,8 @@ use async_trait::async_trait; use humantime::Duration as HumanDuration; use iggy::client::Client; use iggy::compression::compression_algorithm::CompressionAlgorithm; -use iggy::utils::byte_size::IggyByteSize; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::diff; use serial_test::parallel; use std::time::Duration; @@ -21,7 +21,7 @@ struct TestTopicCreateCmd { partitions_count: u32, compression_algorithm: CompressionAlgorithm, message_expiry: Option>, - max_topic_size: Option, + max_topic_size: MaxTopicSize, replication_factor: u8, using_identifier: TestStreamId, } @@ -36,7 +36,7 @@ impl TestTopicCreateCmd { partitions_count: u32, compression_algorithm: CompressionAlgorithm, message_expiry: Option>, - max_topic_size: Option, + max_topic_size: MaxTopicSize, replication_factor: u8, using_identifier: TestStreamId, ) -> Self { @@ -111,11 +111,7 @@ impl IggyCmdTestCase for TestTopicCreateCmd { }) .to_string(); - let max_topic_size = (match &self.max_topic_size { - Some(value) => value.as_human_string_with_zero_as_unlimited(), - None => IggyByteSize::default().as_human_string_with_zero_as_unlimited(), - }) - .to_string(); + let max_topic_size = self.max_topic_size.to_string(); let replication_factor = self.replication_factor; @@ -155,7 +151,7 @@ impl IggyCmdTestCase for TestTopicCreateCmd { .unwrap(); assert_eq!( topic_details.message_expiry, - Some(duration.as_secs() as u32) + IggyExpiry::ExpireDuration(duration.into()) ); } @@ -189,7 +185,7 @@ pub async fn should_be_successful() { 1, Default::default(), None, - None, + MaxTopicSize::ServerDefault, 1, TestStreamId::Numeric, )) @@ -203,7 +199,7 @@ pub async fn should_be_successful() { 5, Default::default(), None, - None, + MaxTopicSize::ServerDefault, 1, TestStreamId::Named, )) @@ -217,7 +213,7 @@ pub async fn should_be_successful() { 1, Default::default(), Some(vec![String::from("3days"), String::from("5s")]), - None, + MaxTopicSize::ServerDefault, 1, TestStreamId::Named, )) @@ -236,7 +232,7 @@ pub async fn should_be_successful() { String::from("1m"), String::from("1s"), ]), - None, + MaxTopicSize::ServerDefault, 1, TestStreamId::Numeric, )) diff --git a/integration/tests/cli/topic/test_topic_delete_command.rs b/integration/tests/cli/topic/test_topic_delete_command.rs index d2376cf3c..b550211ff 100644 --- a/integration/tests/cli/topic/test_topic_delete_command.rs +++ b/integration/tests/cli/topic/test_topic_delete_command.rs @@ -6,6 +6,7 @@ use assert_cmd::assert::Assert; use async_trait::async_trait; use iggy::client::Client; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::diff; use serial_test::parallel; @@ -69,7 +70,7 @@ impl IggyCmdTestCase for TestTopicDeleteCmd { None, Some(self.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); diff --git a/integration/tests/cli/topic/test_topic_get_command.rs b/integration/tests/cli/topic/test_topic_get_command.rs index e9db2ee47..9183591a2 100644 --- a/integration/tests/cli/topic/test_topic_get_command.rs +++ b/integration/tests/cli/topic/test_topic_get_command.rs @@ -6,6 +6,7 @@ use assert_cmd::assert::Assert; use async_trait::async_trait; use iggy::client::Client; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::{contains, starts_with}; use serial_test::parallel; @@ -69,7 +70,7 @@ impl IggyCmdTestCase for TestTopicGetCmd { None, Some(self.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); @@ -109,7 +110,10 @@ impl IggyCmdTestCase for TestTopicGetCmd { ))) .stdout(contains("Topic size | 0")) .stdout(contains("Message expiry | unlimited")) - .stdout(contains("Max topic size | unlimited")) + .stdout(contains(format!( + "Max topic size | {}", + MaxTopicSize::get_server_default() + ))) .stdout(contains("Topic message count | 0")) .stdout(contains("Partitions count | 1")); } diff --git a/integration/tests/cli/topic/test_topic_list_command.rs b/integration/tests/cli/topic/test_topic_list_command.rs index c7e59075d..9ab4bd93f 100644 --- a/integration/tests/cli/topic/test_topic_list_command.rs +++ b/integration/tests/cli/topic/test_topic_list_command.rs @@ -6,6 +6,7 @@ use assert_cmd::assert::Assert; use async_trait::async_trait; use iggy::client::Client; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::{contains, starts_with}; use serial_test::parallel; @@ -66,7 +67,7 @@ impl IggyCmdTestCase for TestTopicListCmd { None, Some(self.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); diff --git a/integration/tests/cli/topic/test_topic_purge_command.rs b/integration/tests/cli/topic/test_topic_purge_command.rs index f41ddb2ac..d4dbf3a3b 100644 --- a/integration/tests/cli/topic/test_topic_purge_command.rs +++ b/integration/tests/cli/topic/test_topic_purge_command.rs @@ -7,6 +7,7 @@ use async_trait::async_trait; use iggy::client::Client; use iggy::messages::send_messages::{Message, Partitioning}; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::diff; use serial_test::parallel; use std::str::FromStr; @@ -71,7 +72,7 @@ impl IggyCmdTestCase for TestTopicPurgeCmd { None, Some(self.topic_id), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(topic.is_ok()); diff --git a/integration/tests/cli/topic/test_topic_update_command.rs b/integration/tests/cli/topic/test_topic_update_command.rs index 0bee535c8..89c71eba1 100644 --- a/integration/tests/cli/topic/test_topic_update_command.rs +++ b/integration/tests/cli/topic/test_topic_update_command.rs @@ -7,8 +7,8 @@ use async_trait::async_trait; use humantime::Duration as HumanDuration; use iggy::client::Client; use iggy::compression::compression_algorithm::CompressionAlgorithm; -use iggy::utils::byte_size::IggyByteSize; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use predicates::str::diff; use serial_test::parallel; use std::time::Duration; @@ -20,12 +20,12 @@ struct TestTopicUpdateCmd { topic_name: String, compression_algorithm: CompressionAlgorithm, message_expiry: Option>, - max_topic_size: Option, + max_topic_size: MaxTopicSize, replication_factor: u8, topic_new_name: String, topic_new_compression_algorithm: CompressionAlgorithm, topic_new_message_expiry: Option>, - topic_new_max_size: Option, + topic_new_max_size: MaxTopicSize, topic_new_replication_factor: u8, using_stream_id: TestStreamId, using_topic_id: TestTopicId, @@ -40,12 +40,12 @@ impl TestTopicUpdateCmd { topic_name: String, compression_algorithm: CompressionAlgorithm, message_expiry: Option>, - max_topic_size: Option, + max_topic_size: MaxTopicSize, replication_factor: u8, topic_new_name: String, topic_new_compression_algorithm: CompressionAlgorithm, topic_new_message_expiry: Option>, - topic_new_max_size: Option, + topic_new_max_size: MaxTopicSize, topic_new_replication_factor: u8, using_stream_id: TestStreamId, using_topic_id: TestTopicId, @@ -83,8 +83,8 @@ impl TestTopicUpdateCmd { command.push(self.topic_new_name.clone()); command.push(self.topic_new_compression_algorithm.to_string()); - if let Some(max_size_bytes) = &self.topic_new_max_size { - command.push(format!("--max-size-bytes={}", max_size_bytes)); + if let MaxTopicSize::Custom(max_size) = &self.topic_new_max_size { + command.push(format!("--max-topic-bytes={}", max_size)); } if self.topic_new_replication_factor != 1 { @@ -111,12 +111,12 @@ impl IggyCmdTestCase for TestTopicUpdateCmd { assert!(stream.is_ok()); let message_expiry = match &self.message_expiry { - None => None, + None => IggyExpiry::NeverExpire, Some(message_expiry) => { let duration: Duration = *message_expiry.join(" ").parse::().unwrap(); - Some(duration.as_secs() as u32) + IggyExpiry::ExpireDuration(duration.into()) } }; @@ -128,7 +128,7 @@ impl IggyCmdTestCase for TestTopicUpdateCmd { self.compression_algorithm, Some(self.replication_factor), Some(self.topic_id), - message_expiry.into(), + message_expiry, self.max_topic_size, ) .await; @@ -162,11 +162,7 @@ impl IggyCmdTestCase for TestTopicUpdateCmd { }) .to_string(); - let max_topic_size = (match &self.topic_new_max_size { - Some(value) => value.as_human_string_with_zero_as_unlimited(), - None => IggyByteSize::default().as_human_string_with_zero_as_unlimited(), - }) - .to_string(); + let max_topic_size = self.max_topic_size.to_string(); let replication_factor = self.replication_factor; let new_topic_name = &self.topic_new_name; @@ -203,7 +199,7 @@ impl IggyCmdTestCase for TestTopicUpdateCmd { .unwrap(); assert_eq!( topic_details.message_expiry, - Some(duration.as_secs() as u32) + IggyExpiry::ExpireDuration(duration.into()) ); } @@ -235,12 +231,12 @@ pub async fn should_be_successful() { String::from("sync"), Default::default(), None, - None, + MaxTopicSize::ServerDefault, 1, String::from("new_name"), CompressionAlgorithm::Gzip, None, - None, + MaxTopicSize::ServerDefault, 1, TestStreamId::Numeric, TestTopicId::Numeric, @@ -254,12 +250,12 @@ pub async fn should_be_successful() { String::from("topic"), Default::default(), None, - None, + MaxTopicSize::ServerDefault, 1, String::from("testing"), CompressionAlgorithm::Gzip, None, - None, + MaxTopicSize::ServerDefault, 1, TestStreamId::Named, TestTopicId::Numeric, @@ -273,12 +269,12 @@ pub async fn should_be_successful() { String::from("development"), Default::default(), None, - None, + MaxTopicSize::ServerDefault, 1, String::from("development"), CompressionAlgorithm::Gzip, None, - None, + MaxTopicSize::ServerDefault, 1, TestStreamId::Numeric, TestTopicId::Named, @@ -292,7 +288,7 @@ pub async fn should_be_successful() { String::from("probe"), Default::default(), None, - None, + MaxTopicSize::ServerDefault, 1, String::from("development"), CompressionAlgorithm::Gzip, @@ -302,7 +298,7 @@ pub async fn should_be_successful() { String::from("1m"), String::from("1s"), ]), - None, + MaxTopicSize::ServerDefault, 1, TestStreamId::Numeric, TestTopicId::Numeric, @@ -316,12 +312,12 @@ pub async fn should_be_successful() { String::from("testing"), Default::default(), Some(vec![String::from("1s")]), - None, + MaxTopicSize::ServerDefault, 1, String::from("testing"), CompressionAlgorithm::Gzip, Some(vec![String::from("1m 6s")]), - None, + MaxTopicSize::ServerDefault, 1, TestStreamId::Numeric, TestTopicId::Numeric, @@ -339,12 +335,12 @@ pub async fn should_be_successful() { String::from("1m"), String::from("1h"), ]), - None, + MaxTopicSize::ServerDefault, 1, String::from("testing"), CompressionAlgorithm::Gzip, None, - None, + MaxTopicSize::ServerDefault, 1, TestStreamId::Numeric, TestTopicId::Named, diff --git a/integration/tests/config_provider/mod.rs b/integration/tests/config_provider/mod.rs index 64528f5bb..63d107e5e 100644 --- a/integration/tests/config_provider/mod.rs +++ b/integration/tests/config_provider/mod.rs @@ -35,7 +35,6 @@ async fn validate_server_config_json_from_repository() { #[serial] #[tokio::test] async fn validate_custom_env_provider() { - let expected_database_path = "awesome_database_path"; let expected_datagram_send_buffer_size = "1.00 KB"; let expected_quic_certificate_self_signed = false; let expected_http_enabled = false; @@ -43,7 +42,6 @@ async fn validate_custom_env_provider() { let expected_message_saver_enabled = false; let expected_message_expiry = "10s"; - env::set_var("IGGY_SYSTEM_DATABASE_PATH", expected_database_path); env::set_var( "IGGY_QUIC_DATAGRAM_SEND_BUFFER_SIZE", expected_datagram_send_buffer_size, @@ -67,7 +65,6 @@ async fn validate_custom_env_provider() { .await .expect("Failed to load default server.toml config"); - assert_eq!(config.system.database.path, expected_database_path); assert_eq!( config.quic.datagram_send_buffer_size.to_string(), expected_datagram_send_buffer_size @@ -84,7 +81,6 @@ async fn validate_custom_env_provider() { expected_message_expiry ); - env::remove_var("IGGY_SYSTEM_DATABASE_PATH"); env::remove_var("IGGY_QUIC_DATAGRAM_SEND_BUFFER_SIZE"); env::remove_var("IGGY_QUIC_CERTIFICATE_SELF_SIGNED"); env::remove_var("IGGY_HTTP_ENABLED"); diff --git a/integration/tests/examples/mod.rs b/integration/tests/examples/mod.rs index 3d8803691..7df2199d5 100644 --- a/integration/tests/examples/mod.rs +++ b/integration/tests/examples/mod.rs @@ -11,6 +11,7 @@ use iggy::tcp::client::TcpClient; use iggy::tcp::config::TcpClientConfig; use iggy::users::defaults::*; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use integration::test_server::{IpAddrKind, TestServer}; use regex::Regex; use std::sync::Arc; @@ -126,7 +127,7 @@ impl<'a> IggyExampleTest<'a> { None, None, IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await .unwrap(); diff --git a/integration/tests/mod.rs b/integration/tests/mod.rs index 04a63a4ea..43df859f5 100644 --- a/integration/tests/mod.rs +++ b/integration/tests/mod.rs @@ -4,4 +4,5 @@ mod config_provider; mod data_integrity; mod examples; mod server; +mod state; mod streaming; diff --git a/integration/tests/server/scenarios/consumer_group_join_scenario.rs b/integration/tests/server/scenarios/consumer_group_join_scenario.rs index 46c227f29..dd55a7a6e 100644 --- a/integration/tests/server/scenarios/consumer_group_join_scenario.rs +++ b/integration/tests/server/scenarios/consumer_group_join_scenario.rs @@ -10,6 +10,7 @@ use iggy::identifier::Identifier; use iggy::models::client_info::ClientInfoDetails; use iggy::models::consumer_group::ConsumerGroupDetails; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use integration::test_server::{ assert_clean_system, create_user, login_root, login_user, ClientFactory, }; @@ -39,7 +40,7 @@ pub async fn run(client_factory: &dyn ClientFactory) { None, Some(TOPIC_ID), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await .unwrap(); diff --git a/integration/tests/server/scenarios/consumer_group_with_multiple_clients_polling_messages_scenario.rs b/integration/tests/server/scenarios/consumer_group_with_multiple_clients_polling_messages_scenario.rs index 225a848eb..e0bc1580c 100644 --- a/integration/tests/server/scenarios/consumer_group_with_multiple_clients_polling_messages_scenario.rs +++ b/integration/tests/server/scenarios/consumer_group_with_multiple_clients_polling_messages_scenario.rs @@ -12,6 +12,7 @@ use iggy::messages::poll_messages::PollingStrategy; use iggy::messages::send_messages::{Message, Partitioning}; use iggy::models::consumer_group::ConsumerGroupDetails; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use integration::test_server::{ assert_clean_system, create_user, login_root, login_user, ClientFactory, }; @@ -55,7 +56,7 @@ async fn init_system( None, Some(TOPIC_ID), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await .unwrap(); diff --git a/integration/tests/server/scenarios/consumer_group_with_single_client_polling_messages_scenario.rs b/integration/tests/server/scenarios/consumer_group_with_single_client_polling_messages_scenario.rs index 2b32d7c8a..e6f087582 100644 --- a/integration/tests/server/scenarios/consumer_group_with_single_client_polling_messages_scenario.rs +++ b/integration/tests/server/scenarios/consumer_group_with_single_client_polling_messages_scenario.rs @@ -11,6 +11,7 @@ use iggy::identifier::Identifier; use iggy::messages::poll_messages::PollingStrategy; use iggy::messages::send_messages::{Message, Partitioning}; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use integration::test_server::{assert_clean_system, login_root, ClientFactory}; use std::str::{from_utf8, FromStr}; @@ -46,7 +47,7 @@ async fn init_system(client: &IggyClient) { None, Some(TOPIC_ID), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await .unwrap(); diff --git a/integration/tests/server/scenarios/create_message_payload.rs b/integration/tests/server/scenarios/create_message_payload.rs index b8274cb70..f592df125 100644 --- a/integration/tests/server/scenarios/create_message_payload.rs +++ b/integration/tests/server/scenarios/create_message_payload.rs @@ -6,6 +6,7 @@ use iggy::messages::poll_messages::PollingStrategy; use iggy::messages::send_messages::{Message, Partitioning}; use iggy::models::header::{HeaderKey, HeaderValue}; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use integration::test_server::{assert_clean_system, login_root, ClientFactory}; use std::collections::HashMap; use std::str::FromStr; @@ -118,7 +119,7 @@ async fn init_system(client: &IggyClient) { None, None, IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await .unwrap(); diff --git a/integration/tests/server/scenarios/message_headers_scenario.rs b/integration/tests/server/scenarios/message_headers_scenario.rs index fc75d57c9..3718394c6 100644 --- a/integration/tests/server/scenarios/message_headers_scenario.rs +++ b/integration/tests/server/scenarios/message_headers_scenario.rs @@ -12,6 +12,7 @@ use iggy::messages::poll_messages::PollingStrategy; use iggy::messages::send_messages::{Message, Partitioning}; use iggy::models::header::{HeaderKey, HeaderValue}; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use integration::test_server::{assert_clean_system, login_root, ClientFactory}; use std::collections::HashMap; use std::str::FromStr; @@ -109,7 +110,7 @@ async fn init_system(client: &IggyClient) { None, Some(TOPIC_ID), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await .unwrap(); diff --git a/integration/tests/server/scenarios/message_size_scenario.rs b/integration/tests/server/scenarios/message_size_scenario.rs index 5704a43db..2b76dad9c 100644 --- a/integration/tests/server/scenarios/message_size_scenario.rs +++ b/integration/tests/server/scenarios/message_size_scenario.rs @@ -8,6 +8,7 @@ use iggy::messages::poll_messages::PollingStrategy; use iggy::messages::send_messages::{Message, Partitioning}; use iggy::models::header::{HeaderKey, HeaderValue}; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use integration::test_server::{assert_clean_system, login_root, ClientFactory}; use std::collections::HashMap; use std::str::FromStr; @@ -128,7 +129,7 @@ async fn init_system(client: &IggyClient) { None, None, IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await .unwrap(); diff --git a/integration/tests/server/scenarios/stream_size_validation_scenario.rs b/integration/tests/server/scenarios/stream_size_validation_scenario.rs index eacec66cc..88ce6f82a 100644 --- a/integration/tests/server/scenarios/stream_size_validation_scenario.rs +++ b/integration/tests/server/scenarios/stream_size_validation_scenario.rs @@ -6,6 +6,7 @@ use iggy::compression::compression_algorithm::CompressionAlgorithm; use iggy::identifier::Identifier; use iggy::messages::send_messages::{Message, Partitioning}; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use integration::test_server::{assert_clean_system, login_root, ClientFactory}; use std::str::FromStr; @@ -127,7 +128,7 @@ async fn create_topic_assert_empty(client: &IggyClient, stream_name: &str, topic None, None, IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await .unwrap(); diff --git a/integration/tests/server/scenarios/system_scenario.rs b/integration/tests/server/scenarios/system_scenario.rs index 4a6d291aa..2b6e8c6a1 100644 --- a/integration/tests/server/scenarios/system_scenario.rs +++ b/integration/tests/server/scenarios/system_scenario.rs @@ -20,6 +20,7 @@ use iggy::models::messages::PolledMessage; use iggy::users::defaults::{DEFAULT_ROOT_PASSWORD, DEFAULT_ROOT_USERNAME}; use iggy::utils::byte_size::IggyByteSize; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use integration::test_server::{assert_clean_system, ClientFactory}; pub async fn run(client_factory: &dyn ClientFactory) { @@ -106,7 +107,7 @@ pub async fn run(client_factory: &dyn ClientFactory) { None, Some(TOPIC_ID), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await .unwrap(); @@ -124,8 +125,8 @@ pub async fn run(client_factory: &dyn ClientFactory) { assert_eq!(topic.compression_algorithm, CompressionAlgorithm::default()); assert_eq!(topic.size, 0); assert_eq!(topic.messages_count, 0); - assert_eq!(topic.message_expiry, None); - assert_eq!(topic.max_topic_size, None); + assert_eq!(topic.message_expiry, IggyExpiry::NeverExpire); + assert_eq!(topic.max_topic_size, MaxTopicSize::get_server_default()); assert_eq!(topic.replication_factor, 1); // 11. Get topic details by ID @@ -190,7 +191,7 @@ pub async fn run(client_factory: &dyn ClientFactory) { None, Some(TOPIC_ID), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(create_topic_result.is_err()); @@ -205,7 +206,7 @@ pub async fn run(client_factory: &dyn ClientFactory) { None, Some(TOPIC_ID + 1), IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await; assert!(create_topic_result.is_err()); @@ -539,7 +540,7 @@ pub async fn run(client_factory: &dyn ClientFactory) { let updated_topic_name = format!("{}-updated", TOPIC_NAME); let updated_message_expiry = 1000; let message_expiry_duration = updated_message_expiry.into(); - let updated_max_topic_size = IggyByteSize::from(0x1337); + let updated_max_topic_size = MaxTopicSize::Custom(IggyByteSize::from(0x1337)); let updated_replication_factor = 5; client @@ -550,7 +551,7 @@ pub async fn run(client_factory: &dyn ClientFactory) { CompressionAlgorithm::Gzip, Some(updated_replication_factor), IggyExpiry::ExpireDuration(message_expiry_duration), - Some(updated_max_topic_size), + updated_max_topic_size, ) .await .unwrap(); @@ -566,13 +567,13 @@ pub async fn run(client_factory: &dyn ClientFactory) { assert_eq!(updated_topic.name, updated_topic_name); assert_eq!( updated_topic.message_expiry, - Some(updated_message_expiry as u32) + IggyExpiry::ExpireDuration(message_expiry_duration) ); assert_eq!( updated_topic.compression_algorithm, CompressionAlgorithm::Gzip ); - assert_eq!(updated_topic.max_topic_size, Some(updated_max_topic_size)); + assert_eq!(updated_topic.max_topic_size, updated_max_topic_size); assert_eq!(updated_topic.replication_factor, updated_replication_factor); // 37. Purge the existing topic and ensure it has no messages @@ -688,7 +689,7 @@ pub async fn run(client_factory: &dyn ClientFactory) { None, None, IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await .unwrap(); @@ -728,7 +729,7 @@ pub async fn run(client_factory: &dyn ClientFactory) { fn assert_message(message: &PolledMessage, offset: u64) { let expected_payload = create_message_payload(offset); - assert!(message.timestamp > 0); + assert!(message.timestamp.to_micros() > 0); assert_eq!(message.offset, offset); assert_eq!(message.payload, expected_payload); } diff --git a/integration/tests/server/scenarios/user_scenario.rs b/integration/tests/server/scenarios/user_scenario.rs index 2a5a2c871..3682609e7 100644 --- a/integration/tests/server/scenarios/user_scenario.rs +++ b/integration/tests/server/scenarios/user_scenario.rs @@ -28,7 +28,7 @@ pub async fn run(client_factory: &dyn ClientFactory) { // 5. Get user details let user = users.first().unwrap(); assert_eq!(user.id, 1); - assert!(user.created_at > 0); + assert!(user.created_at.to_micros() > 0); assert_eq!(user.username, DEFAULT_ROOT_USERNAME); assert_eq!(user.status, UserStatus::Active); @@ -38,7 +38,7 @@ pub async fn run(client_factory: &dyn ClientFactory) { .unwrap(); assert_eq!(user.id, 1); - assert!(user.created_at > 0); + assert!(user.created_at.to_micros() > 0); assert_eq!(user.username, DEFAULT_ROOT_USERNAME); assert_eq!(user.status, UserStatus::Active); assert!(user.permissions.is_some()); @@ -122,7 +122,7 @@ pub async fn run(client_factory: &dyn ClientFactory) { let raw_pat1 = client .create_personal_access_token( pat_name1, - PersonalAccessTokenExpiry::ExpireDuration(3600.into()), + PersonalAccessTokenExpiry::ExpireDuration((1000000 * 3600).into()), ) .await .unwrap(); diff --git a/integration/tests/state/file.rs b/integration/tests/state/file.rs new file mode 100644 index 000000000..c9806122d --- /dev/null +++ b/integration/tests/state/file.rs @@ -0,0 +1,141 @@ +use crate::state::StateSetup; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::{CREATE_STREAM_CODE, CREATE_USER_CODE}; +use iggy::streams::create_stream::CreateStream; +use iggy::users::create_user::CreateUser; +use server::state::State; + +#[tokio::test] +async fn should_be_empty_given_initialized_state() { + let setup = StateSetup::init().await; + let state = setup.state(); + state.init().await.unwrap(); + let entries = state.load_entries().await.unwrap(); + assert!(entries.is_empty()); +} + +#[tokio::test] +async fn should_apply_single_entry() { + let setup = StateSetup::init().await; + let state = setup.state(); + state.init().await.unwrap(); + + let context = "test".as_bytes(); + let user_id = 1; + let code = CREATE_USER_CODE; + let command = CreateUser { + username: "test".to_string(), + password: "secret".to_string(), + status: Default::default(), + permissions: None, + }; + let payload = command.as_bytes(); + state + .apply(code, user_id, &payload, Some(context)) + .await + .unwrap(); + + let mut entries = state.load_entries().await.unwrap(); + assert_eq!(entries.len(), 1); + let entry = entries.remove(0); + assert_eq!(entry.index, 0); + assert_eq!(entry.term, 0); + assert_eq!(entry.version, setup.version()); + assert_eq!(entry.flags, 0); + assert!(entry.timestamp.to_micros() > 0); + assert_eq!(entry.user_id, user_id); + assert_eq!(entry.code, code); + assert_eq!(entry.payload, payload); + assert_eq!(entry.context, context); + + let loaded_command = CreateUser::from_bytes(entry.payload).unwrap(); + assert_eq!(command, loaded_command); +} + +#[tokio::test] +async fn should_apply_multiple_entries() { + let setup = StateSetup::init().await; + let state = setup.state(); + let entries = state.init().await.unwrap(); + + assert!(entries.is_empty()); + assert_eq!(state.current_index(), 0); + assert_eq!(state.entries_count(), 0); + assert_eq!(state.term(), 0); + + let context = "test".as_bytes(); + let first_user_id = 1; + let create_user_code = CREATE_USER_CODE; + let create_user = CreateUser { + username: "test".to_string(), + password: "secret".to_string(), + status: Default::default(), + permissions: None, + }; + let create_user_payload = create_user.as_bytes(); + state + .apply( + create_user_code, + first_user_id, + &create_user_payload, + Some(context), + ) + .await + .unwrap(); + + assert_eq!(state.current_index(), 0); + assert_eq!(state.entries_count(), 1); + + let create_user_context = "test".as_bytes(); + let second_user_id = 2; + let create_stream_code = CREATE_STREAM_CODE; + let create_stream = CreateStream { + stream_id: Some(1), + name: "test".to_string(), + }; + let create_stream_payload = create_stream.as_bytes(); + state + .apply( + create_stream_code, + second_user_id, + &create_stream_payload, + None, + ) + .await + .unwrap(); + + assert_eq!(state.current_index(), 1); + assert_eq!(state.entries_count(), 2); + + let mut entries = state.load_entries().await.unwrap(); + assert_eq!(entries.len(), 2); + + let create_user_entry = entries.remove(0); + assert_eq!(create_user_entry.index, 0); + assert_eq!(create_user_entry.term, 0); + assert_eq!(create_user_entry.version, setup.version()); + assert_eq!(create_user_entry.flags, 0); + assert!(create_user_entry.timestamp.to_micros() > 0); + assert_eq!(create_user_entry.user_id, 1); + assert_eq!(create_user_entry.code, create_user_code); + assert_eq!(create_user_entry.payload, create_user_payload); + assert_eq!(create_user_entry.context, create_user_context); + + let loaded_create_user = CreateUser::from_bytes(create_user_entry.payload).unwrap(); + assert_eq!(create_user, loaded_create_user); + + let create_stream_entry = entries.remove(0); + assert_eq!(create_stream_entry.index, 1); + assert_eq!(create_stream_entry.term, 0); + assert_eq!(create_stream_entry.version, setup.version()); + assert_eq!(create_stream_entry.flags, 0); + assert!(create_stream_entry.timestamp.to_micros() > 0); + assert!(create_stream_entry.timestamp.to_micros() > create_user_entry.timestamp.to_micros()); + assert_eq!(create_stream_entry.user_id, 2); + assert_eq!(create_stream_entry.code, create_stream_code); + assert_eq!(create_stream_entry.payload, create_stream_payload); + assert!(create_stream_entry.context.is_empty()); + + let loaded_create_stream = CreateStream::from_bytes(create_stream_entry.payload).unwrap(); + assert_eq!(create_stream, loaded_create_stream); +} diff --git a/integration/tests/state/mod.rs b/integration/tests/state/mod.rs new file mode 100644 index 000000000..d4b5743fb --- /dev/null +++ b/integration/tests/state/mod.rs @@ -0,0 +1,48 @@ +use server::state::file::FileState; +use server::streaming::persistence::persister::FilePersister; +use server::versioning::SemanticVersion; +use std::str::FromStr; +use std::sync::Arc; +use tokio::fs::create_dir; +use uuid::Uuid; + +mod file; +mod system; + +pub struct StateSetup { + directory_path: String, + state: FileState, + version: u32, +} + +impl StateSetup { + pub async fn init() -> StateSetup { + let directory_path = format!("state_{}", Uuid::new_v4().to_u128_le()); + let log_path = format!("{}/log", directory_path); + create_dir(&directory_path).await.unwrap(); + + let version = SemanticVersion::from_str("1.2.3").unwrap(); + let persister = FilePersister {}; + let state = FileState::new(&log_path, &version, Arc::new(persister)); + + Self { + directory_path, + state, + version: version.get_numeric_version().unwrap(), + } + } + + pub fn state(&self) -> &FileState { + &self.state + } + + pub fn version(&self) -> u32 { + self.version + } +} + +impl Drop for StateSetup { + fn drop(&mut self) { + std::fs::remove_dir_all(&self.directory_path).unwrap(); + } +} diff --git a/integration/tests/state/system.rs b/integration/tests/state/system.rs new file mode 100644 index 000000000..fc1bf7c9b --- /dev/null +++ b/integration/tests/state/system.rs @@ -0,0 +1,173 @@ +use crate::state::StateSetup; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::{ + CREATE_PARTITIONS_CODE, CREATE_PERSONAL_ACCESS_TOKEN_CODE, CREATE_STREAM_CODE, + CREATE_TOPIC_CODE, CREATE_USER_CODE, DELETE_STREAM_CODE, +}; +use iggy::partitions::create_partitions::CreatePartitions; +use iggy::personal_access_tokens::create_personal_access_token::CreatePersonalAccessToken; +use iggy::streams::create_stream::CreateStream; +use iggy::streams::delete_stream::DeleteStream; +use iggy::topics::create_topic::CreateTopic; +use iggy::users::create_user::CreateUser; +use iggy::utils::expiry::IggyExpiry; +use server::state::models::CreatePersonalAccessTokenWithHash; +use server::state::system::SystemState; +use server::state::State; + +#[tokio::test] +async fn should_be_initialized_based_on_state_entries() { + let setup = StateSetup::init().await; + let state = setup.state(); + state.init().await.unwrap(); + + let user_id = 1; + let create_user = CreateUser { + username: "user".to_string(), + password: "secret".to_string(), + status: Default::default(), + permissions: None, + }; + + let create_stream1 = CreateStream { + stream_id: Some(1), + name: "stream1".to_string(), + }; + + let create_topic1 = CreateTopic { + stream_id: create_stream1.stream_id.unwrap().try_into().unwrap(), + topic_id: Some(1), + partitions_count: 1, + compression_algorithm: Default::default(), + message_expiry: Default::default(), + max_topic_size: Default::default(), + name: "topic1".to_string(), + replication_factor: None, + }; + + let create_stream2 = CreateStream { + stream_id: Some(2), + name: "stream2".to_string(), + }; + + let create_topic2 = CreateTopic { + stream_id: create_stream2.stream_id.unwrap().try_into().unwrap(), + topic_id: Some(2), + partitions_count: 1, + compression_algorithm: Default::default(), + message_expiry: Default::default(), + max_topic_size: Default::default(), + name: "topic2".to_string(), + replication_factor: None, + }; + + let create_partitions = CreatePartitions { + stream_id: create_topic1.stream_id.clone(), + topic_id: create_topic1.topic_id.unwrap().try_into().unwrap(), + partitions_count: 2, + }; + + let delete_stream2 = DeleteStream { + stream_id: create_stream2.stream_id.unwrap().try_into().unwrap(), + }; + + let create_personal_access_token = CreatePersonalAccessTokenWithHash { + command: CreatePersonalAccessToken { + name: "test".to_string(), + expiry: IggyExpiry::NeverExpire, + }, + hash: "hash".to_string(), + }; + + state + .apply(CREATE_USER_CODE, user_id, &create_user.as_bytes(), None) + .await + .unwrap(); + state + .apply( + CREATE_STREAM_CODE, + user_id, + &create_stream1.as_bytes(), + None, + ) + .await + .unwrap(); + state + .apply(CREATE_TOPIC_CODE, user_id, &create_topic1.as_bytes(), None) + .await + .unwrap(); + state + .apply( + CREATE_STREAM_CODE, + user_id, + &create_stream2.as_bytes(), + None, + ) + .await + .unwrap(); + state + .apply(CREATE_TOPIC_CODE, user_id, &create_topic2.as_bytes(), None) + .await + .unwrap(); + state + .apply( + CREATE_PARTITIONS_CODE, + user_id, + &create_partitions.as_bytes(), + None, + ) + .await + .unwrap(); + state + .apply( + DELETE_STREAM_CODE, + user_id, + &delete_stream2.as_bytes(), + None, + ) + .await + .unwrap(); + state + .apply( + CREATE_PERSONAL_ACCESS_TOKEN_CODE, + user_id, + &create_personal_access_token.as_bytes(), + None, + ) + .await + .unwrap(); + + let entries = state.load_entries().await.unwrap(); + let mut system = SystemState::init(entries).await.unwrap(); + + assert_eq!(system.users.len(), 1); + let mut user = system.users.remove(&1).unwrap(); + assert_eq!(user.id, 1); + assert_eq!(user.username, create_user.username); + assert_eq!(user.password_hash, create_user.password); + assert_eq!(user.personal_access_tokens.len(), 1); + + let personal_access_token = user + .personal_access_tokens + .remove(&create_personal_access_token.command.name) + .unwrap(); + assert_eq!( + personal_access_token.token_hash, + create_personal_access_token.hash + ); + + assert_eq!(system.streams.len(), 1); + let mut stream = system + .streams + .remove(&create_stream1.stream_id.unwrap()) + .unwrap(); + assert_eq!(stream.name, create_stream1.name); + assert_eq!(stream.topics.len(), 1); + + let topic = stream + .topics + .remove(&create_topic1.topic_id.unwrap()) + .unwrap(); + assert_eq!(topic.name, create_topic1.name); + assert_eq!(topic.partitions.len(), 3); +} diff --git a/integration/tests/streaming/common/test_setup.rs b/integration/tests/streaming/common/test_setup.rs index e0845704f..9722b4898 100644 --- a/integration/tests/streaming/common/test_setup.rs +++ b/integration/tests/streaming/common/test_setup.rs @@ -1,7 +1,6 @@ use server::configs::system::SystemConfig; use server::streaming::persistence::persister::FilePersister; use server::streaming::storage::SystemStorage; -use sled::Db; use std::sync::Arc; use tokio::fs; use uuid::Uuid; @@ -9,7 +8,6 @@ use uuid::Uuid; pub struct TestSetup { pub config: Arc, pub storage: Arc, - pub db: Arc, } impl TestSetup { @@ -23,13 +21,8 @@ impl TestSetup { let config = Arc::new(config); fs::create_dir(config.get_system_path()).await.unwrap(); let persister = FilePersister {}; - let db = Arc::new(sled::open(config.get_database_path()).unwrap()); - let storage = Arc::new(SystemStorage::new(db.clone(), Arc::new(persister))); - TestSetup { - config, - storage, - db, - } + let storage = Arc::new(SystemStorage::new(config.clone(), Arc::new(persister))); + TestSetup { config, storage } } pub async fn create_streams_directory(&self) { diff --git a/integration/tests/streaming/consumer_group.rs b/integration/tests/streaming/consumer_group.rs deleted file mode 100644 index cff190ce7..000000000 --- a/integration/tests/streaming/consumer_group.rs +++ /dev/null @@ -1,94 +0,0 @@ -use crate::streaming::common::test_setup::TestSetup; -use iggy::compression::compression_algorithm::CompressionAlgorithm; -use iggy::identifier::Identifier; -use server::streaming::topics::topic::Topic; -use std::sync::{ - atomic::{AtomicU32, AtomicU64}, - Arc, -}; - -#[tokio::test] -async fn should_persist_consumer_group_and_then_load_it_from_disk() { - let setup = TestSetup::init().await; - let storage = setup.storage.topic.as_ref(); - let mut topic = init_topic(&setup).await; - let group_id = 1; - let group_name = "test"; - topic - .create_consumer_group(Some(group_id), group_name) - .await - .unwrap(); - - let consumer_groups = storage.load_consumer_groups(&topic).await.unwrap(); - assert_eq!(consumer_groups.len(), 1); - let consumer_group = consumer_groups.first().unwrap(); - - let consumer_group_by_id = topic - .get_consumer_group(&Identifier::numeric(group_id).unwrap()) - .unwrap(); - let consumer_group_by_id = consumer_group_by_id.read().await; - assert_eq!(consumer_group_by_id.group_id, consumer_group.group_id); - assert_eq!(consumer_group_by_id.name, consumer_group.name); - - let consumer_group_by_name = topic - .get_consumer_group(&Identifier::named(group_name).unwrap()) - .unwrap(); - let consumer_group_by_name = consumer_group_by_name.read().await; - assert_eq!(consumer_group_by_name.group_id, consumer_group.group_id); - assert_eq!(consumer_group_by_name.name, consumer_group.name); -} - -#[tokio::test] -async fn should_delete_consumer_group_from_disk() { - let setup = TestSetup::init().await; - let storage = setup.storage.topic.as_ref(); - let mut topic = init_topic(&setup).await; - let group_id = 1; - let group_name = "test"; - topic - .create_consumer_group(Some(group_id), group_name) - .await - .unwrap(); - - let consumer_groups = storage.load_consumer_groups(&topic).await.unwrap(); - assert_eq!(consumer_groups.len(), 1); - let consumer_group = consumer_groups.first().unwrap(); - - let deleted_consumer_group = topic - .delete_consumer_group(&Identifier::numeric(group_id).unwrap()) - .await - .unwrap(); - let deleted_consumer_group = deleted_consumer_group.read().await; - assert_eq!(deleted_consumer_group.group_id, consumer_group.group_id); - assert_eq!(deleted_consumer_group.name, consumer_group.name); - let consumer_groups = storage.load_consumer_groups(&topic).await.unwrap(); - assert!(consumer_groups.is_empty()); -} - -async fn init_topic(setup: &TestSetup) -> Topic { - let stream_id = 1; - let size_of_parent_stream = Arc::new(AtomicU64::new(0)); - let messages_count_of_parent_stream = Arc::new(AtomicU64::new(0)); - let segments_count_of_parent_stream = Arc::new(AtomicU32::new(0)); - - setup.create_topics_directory(stream_id).await; - let name = "test"; - let topic = Topic::create( - stream_id, - 1, - name, - 1, - setup.config.clone(), - setup.storage.clone(), - size_of_parent_stream, - messages_count_of_parent_stream, - segments_count_of_parent_stream, - None, - CompressionAlgorithm::default(), - None, - 1, - ) - .unwrap(); - topic.persist().await.unwrap(); - topic -} diff --git a/integration/tests/streaming/consumer_offset.rs b/integration/tests/streaming/consumer_offset.rs index 6da3bf50c..0b985b8fe 100644 --- a/integration/tests/streaming/consumer_offset.rs +++ b/integration/tests/streaming/consumer_offset.rs @@ -1,46 +1,50 @@ use crate::streaming::common::test_setup::TestSetup; use iggy::consumer::ConsumerKind; +use server::configs::system::SystemConfig; use server::streaming::partitions::partition::ConsumerOffset; use server::streaming::storage::PartitionStorage; +use std::sync::Arc; +use tokio::fs; #[tokio::test] async fn should_persist_consumer_offsets_and_then_load_them_from_disk() { let setup = TestSetup::init().await; let storage = setup.storage.partition.as_ref(); - assert_persisted_offsets(storage, ConsumerKind::Consumer).await; - assert_persisted_offsets(storage, ConsumerKind::ConsumerGroup).await; + assert_persisted_offsets(&setup.config, storage, ConsumerKind::Consumer).await; + assert_persisted_offsets(&setup.config, storage, ConsumerKind::ConsumerGroup).await; } -async fn assert_persisted_offsets(storage: &dyn PartitionStorage, kind: ConsumerKind) { - let stream_id = 1; - let topic_id = 2; - let partition_id = 3; - let consumer_ids_count = 50; - let offsets_count = 500; +async fn assert_persisted_offsets( + config: &Arc, + storage: &dyn PartitionStorage, + kind: ConsumerKind, +) { + let consumer_ids_count = 3; + let offsets_count = 5; + let path = match kind { + ConsumerKind::Consumer => "consumer_offsets", + ConsumerKind::ConsumerGroup => "consumer_group_offsets", + }; + let path = format!("{}/{}", config.get_system_path(), path); + fs::create_dir(&path).await.unwrap(); for consumer_id in 1..=consumer_ids_count { let expected_offsets_count = consumer_id; for offset in 0..=offsets_count { - let consumer_offset = - ConsumerOffset::new(kind, consumer_id, offset, stream_id, topic_id, partition_id); - assert_persisted_offset(storage, &consumer_offset, expected_offsets_count).await; + let consumer_offset = ConsumerOffset::new(kind, consumer_id, offset, &path); + assert_persisted_offset(&path, storage, &consumer_offset, expected_offsets_count).await; } } } async fn assert_persisted_offset( + path: &str, storage: &dyn PartitionStorage, consumer_offset: &ConsumerOffset, expected_offsets_count: u32, ) { - let parts = get_parts(&consumer_offset.key); storage.save_consumer_offset(consumer_offset).await.unwrap(); let consumer_offsets = storage - .load_consumer_offsets( - consumer_offset.kind, - parts.stream_id, - parts.topic_id, - parts.partition_id, - ) + .load_consumer_offsets(consumer_offset.kind, path) .await .unwrap(); let expected_offsets_count = expected_offsets_count as usize; @@ -48,19 +52,3 @@ async fn assert_persisted_offset( let loaded_consumer_offset = consumer_offsets.get(expected_offsets_count - 1).unwrap(); assert_eq!(loaded_consumer_offset, consumer_offset); } - -fn get_parts(key: &str) -> ConsumerOffsetParts { - let parts: Vec<&str> = key.split(':').collect(); - ConsumerOffsetParts { - stream_id: parts[1].parse().unwrap(), - topic_id: parts[2].parse().unwrap(), - partition_id: parts[3].parse().unwrap(), - } -} - -#[derive(Debug)] -struct ConsumerOffsetParts { - pub stream_id: u32, - pub topic_id: u32, - pub partition_id: u32, -} diff --git a/integration/tests/streaming/messages.rs b/integration/tests/streaming/messages.rs index 5d40a7623..54b2669a0 100644 --- a/integration/tests/streaming/messages.rs +++ b/integration/tests/streaming/messages.rs @@ -3,8 +3,10 @@ use bytes::Bytes; use iggy::bytes_serializable::BytesSerializable; use iggy::messages::send_messages::Message; use iggy::models::header::{HeaderKey, HeaderValue}; +use iggy::utils::expiry::IggyExpiry; use iggy::utils::timestamp::IggyTimestamp; use server::configs::system::{PartitionConfig, SystemConfig}; +use server::state::system::PartitionState; use server::streaming::batching::appendable_batch_info::AppendableBatchInfo; use server::streaming::partitions::partition::Partition; use std::collections::HashMap; @@ -35,12 +37,13 @@ async fn should_persist_messages_and_then_load_them_by_timestamp() { true, config.clone(), setup.storage.clone(), - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), + IggyTimestamp::now(), ); let mut messages = Vec::with_capacity(messages_count as usize); @@ -114,7 +117,7 @@ async fn should_persist_messages_and_then_load_them_by_timestamp() { .append_messages(appendable_batch_info, messages) .await .unwrap(); - let test_timestamp = IggyTimestamp::now().to_micros(); + let test_timestamp = IggyTimestamp::now(); partition .append_messages(appendable_batch_info_two, messages_two) .await @@ -131,7 +134,7 @@ async fn should_persist_messages_and_then_load_them_by_timestamp() { let appended_message = &appended_messages[index]; assert_eq!(loaded_message.id, appended_message.id); assert_eq!(loaded_message.payload, appended_message.payload); - assert!(loaded_message.timestamp >= test_timestamp); + assert!(loaded_message.timestamp.to_micros() >= test_timestamp.to_micros()); assert_eq!( loaded_message .headers @@ -164,12 +167,13 @@ async fn should_persist_messages_and_then_load_them_from_disk() { true, config.clone(), setup.storage.clone(), - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), + IggyTimestamp::now(), ); let mut messages = Vec::with_capacity(messages_count as usize); @@ -218,6 +222,7 @@ async fn should_persist_messages_and_then_load_them_from_disk() { .unwrap(); assert_eq!(partition.unsaved_messages_count, 0); + let now = IggyTimestamp::now(); let mut loaded_partition = Partition::create( stream_id, topic_id, @@ -225,14 +230,19 @@ async fn should_persist_messages_and_then_load_them_from_disk() { false, config.clone(), setup.storage.clone(), - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), + now, ); - loaded_partition.load().await.unwrap(); + let partition_state = PartitionState { + id: partition.partition_id, + created_at: now, + }; + loaded_partition.load(partition_state).await.unwrap(); let loaded_messages = loaded_partition .get_messages_by_offset(0, messages_count) .await diff --git a/integration/tests/streaming/mod.rs b/integration/tests/streaming/mod.rs index 64d6e5f1c..f6697b7ba 100644 --- a/integration/tests/streaming/mod.rs +++ b/integration/tests/streaming/mod.rs @@ -2,17 +2,14 @@ use bytes::Bytes; use iggy::messages::send_messages::Message; mod common; -mod consumer_group; mod consumer_offset; mod messages; mod partition; -mod personal_access_token; mod segment; mod stream; mod system; mod topic; mod topic_messages; -mod user; fn create_messages() -> Vec { vec![ diff --git a/integration/tests/streaming/partition.rs b/integration/tests/streaming/partition.rs index 2b528916a..1089dad61 100644 --- a/integration/tests/streaming/partition.rs +++ b/integration/tests/streaming/partition.rs @@ -1,5 +1,8 @@ use crate::streaming::common::test_setup::TestSetup; use crate::streaming::create_messages; +use iggy::utils::expiry::IggyExpiry; +use iggy::utils::timestamp::IggyTimestamp; +use server::state::system::PartitionState; use server::streaming::batching::appendable_batch_info::AppendableBatchInfo; use server::streaming::partitions::partition::Partition; use server::streaming::segments::segment::{INDEX_EXTENSION, LOG_EXTENSION, TIME_INDEX_EXTENSION}; @@ -23,17 +26,18 @@ async fn should_persist_partition_with_segment() { with_segment, setup.config.clone(), setup.storage.clone(), - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), + IggyTimestamp::now(), ); partition.persist().await.unwrap(); - assert_persisted_partition(&partition.path, with_segment).await; + assert_persisted_partition(&partition.partition_path, with_segment).await; } } @@ -53,16 +57,18 @@ async fn should_load_existing_partition_from_disk() { with_segment, setup.config.clone(), setup.storage.clone(), - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), + IggyTimestamp::now(), ); partition.persist().await.unwrap(); - assert_persisted_partition(&partition.path, with_segment).await; + assert_persisted_partition(&partition.partition_path, with_segment).await; + let now = IggyTimestamp::now(); let mut loaded_partition = Partition::create( stream_id, topic_id, @@ -70,18 +76,23 @@ async fn should_load_existing_partition_from_disk() { false, setup.config.clone(), setup.storage.clone(), - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), + now, ); - loaded_partition.load().await.unwrap(); + let partition_state = PartitionState { + id: partition.partition_id, + created_at: now, + }; + loaded_partition.load(partition_state).await.unwrap(); assert_eq!(loaded_partition.stream_id, partition.stream_id); assert_eq!(loaded_partition.partition_id, partition.partition_id); - assert_eq!(loaded_partition.path, partition.path); + assert_eq!(loaded_partition.partition_path, partition.partition_path); assert_eq!(loaded_partition.current_offset, partition.current_offset); assert_eq!( loaded_partition.unsaved_messages_count, @@ -119,19 +130,20 @@ async fn should_delete_existing_partition_from_disk() { with_segment, setup.config.clone(), setup.storage.clone(), - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), + IggyTimestamp::now(), ); partition.persist().await.unwrap(); - assert_persisted_partition(&partition.path, with_segment).await; + assert_persisted_partition(&partition.partition_path, with_segment).await; partition.delete().await.unwrap(); - assert!(fs::metadata(&partition.path).await.is_err()); + assert!(fs::metadata(&partition.partition_path).await.is_err()); } } @@ -151,15 +163,16 @@ async fn should_purge_existing_partition_on_disk() { with_segment, setup.config.clone(), setup.storage.clone(), - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), + IggyTimestamp::now(), ); partition.persist().await.unwrap(); - assert_persisted_partition(&partition.path, with_segment).await; + assert_persisted_partition(&partition.partition_path, with_segment).await; let messages = create_messages(); let messages_count = messages.len(); let appendable_batch_info = AppendableBatchInfo::new( diff --git a/integration/tests/streaming/personal_access_token.rs b/integration/tests/streaming/personal_access_token.rs deleted file mode 100644 index eb0ec24b0..000000000 --- a/integration/tests/streaming/personal_access_token.rs +++ /dev/null @@ -1,172 +0,0 @@ -use crate::streaming::common::test_setup::TestSetup; -use iggy::utils::timestamp::IggyTimestamp; -use server::streaming::personal_access_tokens::personal_access_token::PersonalAccessToken; - -#[tokio::test] -async fn many_personal_access_tokens_should_be_saved_and_loaded() { - let setup = TestSetup::init().await; - let now = IggyTimestamp::now().to_micros(); - let (pat1, raw_token1) = PersonalAccessToken::new(1, "test1", now, None); - let (pat2, raw_token2) = PersonalAccessToken::new(2, "test2", now, Some(1000)); - let (pat3, raw_token3) = PersonalAccessToken::new(3, "test3", now, Some(100_000)); - - setup - .storage - .personal_access_token - .save(&pat1) - .await - .expect("Failed to save personal access token"); - setup - .storage - .personal_access_token - .save(&pat2) - .await - .expect("Failed to save personal access token"); - setup - .storage - .personal_access_token - .save(&pat3) - .await - .expect("Failed to save personal access token"); - - let personal_access_tokens = setup - .storage - .personal_access_token - .load_all() - .await - .expect("Failed to load personal access tokens"); - assert_eq!(personal_access_tokens.len(), 3); - - let user1_personal_access_tokens = setup - .storage - .personal_access_token - .load_for_user(pat1.user_id) - .await - .expect("Failed to load personal access tokens"); - assert_eq!(user1_personal_access_tokens.len(), 1); - assert_eq!(user1_personal_access_tokens[0], pat1); - let user2_personal_access_tokens = setup - .storage - .personal_access_token - .load_for_user(pat2.user_id) - .await - .expect("Failed to load personal access tokens"); - assert_eq!(user2_personal_access_tokens.len(), 1); - assert_eq!(user2_personal_access_tokens[0], pat2); - let user3_personal_access_tokens = setup - .storage - .personal_access_token - .load_for_user(pat3.user_id) - .await - .expect("Failed to load personal access tokens"); - assert_eq!(user3_personal_access_tokens.len(), 1); - assert_eq!(user3_personal_access_tokens[0], pat3); - - let loaded_pat1 = setup - .storage - .personal_access_token - .load_by_token(&pat1.token) - .await - .expect("Failed to load personal access token"); - let loaded_pat2 = setup - .storage - .personal_access_token - .load_by_token(&pat2.token) - .await - .expect("Failed to load personal access token"); - let loaded_pat3 = setup - .storage - .personal_access_token - .load_by_token(&pat3.token) - .await - .expect("Failed to load personal access token"); - - assert_pat(&pat1, &loaded_pat1); - assert_pat(&pat2, &loaded_pat2); - assert_pat(&pat3, &loaded_pat3); - - assert_ne!(loaded_pat1.token, raw_token1); - assert_ne!(loaded_pat2.token, raw_token2); - assert_ne!(loaded_pat3.token, raw_token3); -} - -fn assert_pat(personal_access_token: &PersonalAccessToken, loaded_pat: &PersonalAccessToken) { - assert_eq!(loaded_pat.user_id, personal_access_token.user_id); - assert_eq!(loaded_pat.name, personal_access_token.name); - assert_eq!(loaded_pat.token, personal_access_token.token); - assert_eq!(loaded_pat.expiry, personal_access_token.expiry); -} - -#[tokio::test] -async fn personal_access_token_should_be_deleted() { - let setup = TestSetup::init().await; - let user_id = 1; - let now = IggyTimestamp::now().to_micros(); - let (personal_access_token, _) = PersonalAccessToken::new(user_id, "test", now, None); - setup - .storage - .personal_access_token - .save(&personal_access_token) - .await - .expect("Failed to save personal access token"); - - let personal_access_tokens = setup - .storage - .personal_access_token - .load_all() - .await - .expect("Failed to load personal access tokens"); - assert_eq!(personal_access_tokens.len(), 1); - let loaded_pat = setup - .storage - .personal_access_token - .load_by_token(&personal_access_token.token) - .await - .expect("Failed to load personal access token"); - assert_pat(&personal_access_token, &loaded_pat); - - let loaded_pat_by_name = setup - .storage - .personal_access_token - .load_by_name(user_id, &personal_access_token.name) - .await - .expect("Failed to load personal access token"); - assert_pat(&personal_access_token, &loaded_pat_by_name); - - setup - .storage - .personal_access_token - .delete_for_user(personal_access_token.user_id, &personal_access_token.name) - .await - .expect("Failed to delete personal access token"); - - let loaded_pat = setup - .storage - .personal_access_token - .load_by_token(&personal_access_token.token) - .await; - assert!(loaded_pat.is_err()); - - let loaded_pat_by_name = setup - .storage - .personal_access_token - .load_by_name(user_id, &personal_access_token.name) - .await; - assert!(loaded_pat_by_name.is_err()); - - let personal_access_tokens = setup - .storage - .personal_access_token - .load_all() - .await - .expect("Failed to load personal access tokens"); - assert!(personal_access_tokens.is_empty()); - - let user_personal_access_tokens = setup - .storage - .personal_access_token - .load_for_user(user_id) - .await - .expect("Failed to load personal access tokens"); - assert!(user_personal_access_tokens.is_empty()); -} diff --git a/integration/tests/streaming/segment.rs b/integration/tests/streaming/segment.rs index 05cb8c2ea..1f2124091 100644 --- a/integration/tests/streaming/segment.rs +++ b/integration/tests/streaming/segment.rs @@ -2,6 +2,7 @@ use crate::streaming::common::test_setup::TestSetup; use bytes::{Bytes, BytesMut}; use iggy::bytes_serializable::BytesSerializable; use iggy::models::messages::{MessageState, PolledMessage}; +use iggy::utils::expiry::IggyExpiry; use iggy::utils::{checksum, timestamp::IggyTimestamp}; use server::streaming::batching::message_batch::RetainedMessageBatch; use server::streaming::models::messages::RetainedMessage; @@ -26,7 +27,7 @@ async fn should_persist_segment() { start_offset, setup.config.clone(), setup.storage.clone(), - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), @@ -64,7 +65,7 @@ async fn should_load_existing_segment_from_disk() { start_offset, setup.config.clone(), setup.storage.clone(), - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), @@ -91,7 +92,7 @@ async fn should_load_existing_segment_from_disk() { start_offset, setup.config.clone(), setup.storage.clone(), - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), @@ -129,7 +130,7 @@ async fn should_persist_and_load_segment_with_messages() { start_offset, setup.config.clone(), setup.storage.clone(), - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), @@ -151,10 +152,10 @@ async fn should_persist_and_load_segment_with_messages() { .await; let messages_count = 10; let mut base_offset = 0; - let mut last_timestamp = 0; + let mut last_timestamp = IggyTimestamp::zero(); let mut batch_buffer = BytesMut::new(); for i in 0..messages_count { - let message = create_message(i, "test", IggyTimestamp::now().to_micros()); + let message = create_message(i, "test", IggyTimestamp::now()); if i == 0 { base_offset = message.offset; } @@ -191,7 +192,7 @@ async fn should_persist_and_load_segment_with_messages() { start_offset, setup.config.clone(), setup.storage.clone(), - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), @@ -214,7 +215,8 @@ async fn given_all_expired_messages_segment_should_be_expired() { let topic_id = 2; let partition_id = 3; let start_offset = 0; - let message_expiry = 10; + let message_expiry_ms = 1000; + let message_expiry = IggyExpiry::ExpireDuration(message_expiry_ms.into()); let mut segment = segment::Segment::create( stream_id, topic_id, @@ -222,7 +224,7 @@ async fn given_all_expired_messages_segment_should_be_expired() { start_offset, setup.config.clone(), setup.storage.clone(), - Some(message_expiry), + message_expiry, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), @@ -243,15 +245,14 @@ async fn given_all_expired_messages_segment_should_be_expired() { ) .await; let messages_count = 10; - let now = IggyTimestamp::now().to_micros(); - let message_expiry = message_expiry as u64; - let mut expired_timestamp = now - (1000000 * 2 * message_expiry); + let now = IggyTimestamp::now(); + let mut expired_timestamp = (now.to_micros() - 2 * message_expiry_ms).into(); let mut base_offset = 0; - let mut last_timestamp = 0; + let mut last_timestamp = IggyTimestamp::zero(); let mut batch_buffer = BytesMut::new(); for i in 0..messages_count { let message = create_message(i, "test", expired_timestamp); - expired_timestamp += 1; + expired_timestamp = (expired_timestamp.to_micros() + 1).into(); if i == 0 { base_offset = message.offset; } @@ -293,7 +294,8 @@ async fn given_at_least_one_not_expired_message_segment_should_not_be_expired() let topic_id = 2; let partition_id = 3; let start_offset = 0; - let message_expiry = 10; + let message_expiry_ms = 1000; + let message_expiry = IggyExpiry::ExpireDuration(message_expiry_ms.into()); let mut segment = segment::Segment::create( stream_id, topic_id, @@ -301,7 +303,7 @@ async fn given_at_least_one_not_expired_message_segment_should_not_be_expired() start_offset, setup.config.clone(), setup.storage.clone(), - Some(message_expiry), + message_expiry, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), @@ -321,10 +323,9 @@ async fn given_at_least_one_not_expired_message_segment_should_not_be_expired() start_offset, ) .await; - let now = IggyTimestamp::now().to_micros(); - let message_expiry = message_expiry as u64; - let expired_timestamp = now - (1000000 * 2 * message_expiry); - let not_expired_timestamp = now - (1000000 * message_expiry) + 1; + let now = IggyTimestamp::now(); + let expired_timestamp = (now.to_micros() - 2 * message_expiry_ms).into(); + let not_expired_timestamp = (now.to_micros() - message_expiry_ms + 1).into(); let expired_message = create_message(0, "test", expired_timestamp); let not_expired_message = create_message(1, "test", not_expired_timestamp); @@ -386,7 +387,7 @@ async fn assert_persisted_segment(partition_path: &str, start_offset: u64) { assert!(fs::metadata(&time_index_path).await.is_ok()); } -fn create_message(offset: u64, payload: &str, timestamp: u64) -> PolledMessage { +fn create_message(offset: u64, payload: &str, timestamp: IggyTimestamp) -> PolledMessage { let payload = Bytes::from(payload.to_string()); let checksum = checksum::calculate(payload.as_ref()); PolledMessage::create( diff --git a/integration/tests/streaming/stream.rs b/integration/tests/streaming/stream.rs index bb891ce2c..8e2742991 100644 --- a/integration/tests/streaming/stream.rs +++ b/integration/tests/streaming/stream.rs @@ -3,8 +3,13 @@ use crate::streaming::create_messages; use iggy::identifier::Identifier; use iggy::messages::poll_messages::PollingStrategy; use iggy::messages::send_messages::Partitioning; +use iggy::utils::expiry::IggyExpiry; +use iggy::utils::timestamp::IggyTimestamp; +use iggy::utils::topic_size::MaxTopicSize; +use server::state::system::StreamState; use server::streaming::polling_consumer::PollingConsumer; use server::streaming::streams::stream::Stream; +use std::collections::HashMap; use tokio::fs; #[tokio::test] @@ -43,9 +48,20 @@ async fn should_load_existing_stream_from_disk() { stream.persist().await.unwrap(); assert_persisted_stream(&stream.path, &setup.config.topic.path).await; - let mut loaded_stream = - Stream::empty(stream_id, setup.config.clone(), setup.storage.clone()); - loaded_stream.load().await.unwrap(); + let mut loaded_stream = Stream::empty( + stream_id, + &name, + setup.config.clone(), + setup.storage.clone(), + ); + let state = StreamState { + id: stream_id, + name: name.clone(), + created_at: IggyTimestamp::now(), + topics: HashMap::new(), + current_topic_id: 0, + }; + loaded_stream.load(state).await.unwrap(); assert_eq!(loaded_stream.stream_id, stream.stream_id); assert_eq!(loaded_stream.name, stream.name); @@ -94,7 +110,15 @@ async fn should_purge_existing_stream_on_disk() { let topic_id = 1; stream - .create_topic(Some(topic_id), "test", 1, None, Default::default(), None, 1) + .create_topic( + Some(topic_id), + "test", + 1, + IggyExpiry::NeverExpire, + Default::default(), + MaxTopicSize::ServerDefault, + 1, + ) .await .unwrap(); diff --git a/integration/tests/streaming/system.rs b/integration/tests/streaming/system.rs index 0e8e6d969..dc2f6a187 100644 --- a/integration/tests/streaming/system.rs +++ b/integration/tests/streaming/system.rs @@ -9,11 +9,7 @@ use tokio::fs; #[tokio::test] async fn should_initialize_system_and_base_directories() { let setup = TestSetup::init().await; - let mut system = System::new( - setup.config.clone(), - Some(setup.db.clone()), - PersonalAccessTokenConfig::default(), - ); + let mut system = System::new(setup.config.clone(), PersonalAccessTokenConfig::default()); system.init().await.unwrap(); @@ -27,17 +23,12 @@ async fn should_initialize_system_and_base_directories() { assert_eq!(names.len(), 3); assert!(names.contains(&setup.config.stream.path)); - assert!(names.contains(&setup.config.database.path)); } #[tokio::test] async fn should_create_and_persist_stream() { let setup = TestSetup::init().await; - let mut system = System::new( - setup.config.clone(), - Some(setup.db.clone()), - PersonalAccessTokenConfig::default(), - ); + let mut system = System::new(setup.config.clone(), PersonalAccessTokenConfig::default()); let stream_id = 1; let stream_name = "test"; let session = Session::new(1, 1, SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 1234)); @@ -54,11 +45,7 @@ async fn should_create_and_persist_stream() { #[tokio::test] async fn should_create_and_persist_stream_with_automatically_generated_id() { let setup = TestSetup::init().await; - let mut system = System::new( - setup.config.clone(), - Some(setup.db.clone()), - PersonalAccessTokenConfig::default(), - ); + let mut system = System::new(setup.config.clone(), PersonalAccessTokenConfig::default()); let stream_id = 1; let stream_name = "test"; let session = Session::new(1, 1, SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 1234)); @@ -75,11 +62,7 @@ async fn should_create_and_persist_stream_with_automatically_generated_id() { #[tokio::test] async fn should_delete_persisted_stream() { let setup = TestSetup::init().await; - let mut system = System::new( - setup.config.clone(), - Some(setup.db.clone()), - PersonalAccessTokenConfig::default(), - ); + let mut system = System::new(setup.config.clone(), PersonalAccessTokenConfig::default()); let stream_id = 1; let stream_name = "test"; let session = Session::new(1, 1, SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 1234)); diff --git a/integration/tests/streaming/topic.rs b/integration/tests/streaming/topic.rs index 965ccd719..2d24a7636 100644 --- a/integration/tests/streaming/topic.rs +++ b/integration/tests/streaming/topic.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; use std::default::Default; use std::sync::atomic::{AtomicU32, AtomicU64}; use std::sync::Arc; @@ -7,6 +8,10 @@ use crate::streaming::create_messages; use iggy::compression::compression_algorithm::CompressionAlgorithm; use iggy::messages::poll_messages::PollingStrategy; use iggy::messages::send_messages::Partitioning; +use iggy::utils::expiry::IggyExpiry; +use iggy::utils::timestamp::IggyTimestamp; +use iggy::utils::topic_size::MaxTopicSize; +use server::state::system::{PartitionState, TopicState}; use server::streaming::polling_consumer::PollingConsumer; use server::streaming::topics::topic::Topic; use tokio::fs; @@ -30,9 +35,9 @@ async fn should_persist_topics_with_partitions_directories_and_info_file() { Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), - None, + IggyExpiry::NeverExpire, CompressionAlgorithm::default(), - None, + MaxTopicSize::ServerDefault, 1, ) .unwrap(); @@ -67,9 +72,9 @@ async fn should_load_existing_topic_from_disk() { Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), - None, + IggyExpiry::NeverExpire, CompressionAlgorithm::default(), - None, + MaxTopicSize::ServerDefault, 1, ) .unwrap(); @@ -81,16 +86,36 @@ async fn should_load_existing_topic_from_disk() { ) .await; + let created_at = IggyTimestamp::now(); let mut loaded_topic = Topic::empty( stream_id, topic_id, + &name, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), setup.config.clone(), setup.storage.clone(), ); - loaded_topic.load().await.unwrap(); + let topic_state = TopicState { + id: topic_id, + name, + partitions: if partitions_count == 0 { + HashMap::new() + } else { + (1..=partitions_count) + .map(|id| (id, PartitionState { id, created_at })) + .collect() + }, + consumer_groups: Default::default(), + compression_algorithm: Default::default(), + message_expiry: IggyExpiry::NeverExpire, + max_topic_size: MaxTopicSize::ServerDefault, + replication_factor: Some(1), + created_at: Default::default(), + current_consumer_group_id: 0, + }; + loaded_topic.load(topic_state).await.unwrap(); assert_eq!(loaded_topic.stream_id, topic.stream_id); assert_eq!(loaded_topic.topic_id, topic.topic_id); @@ -123,9 +148,9 @@ async fn should_delete_existing_topic_from_disk() { Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), - None, + IggyExpiry::NeverExpire, CompressionAlgorithm::default(), - None, + MaxTopicSize::ServerDefault, 1, ) .unwrap(); @@ -162,9 +187,9 @@ async fn should_purge_existing_topic_on_disk() { Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), - None, + IggyExpiry::NeverExpire, CompressionAlgorithm::default(), - None, + MaxTopicSize::ServerDefault, 1, ) .unwrap(); diff --git a/integration/tests/streaming/topic_messages.rs b/integration/tests/streaming/topic_messages.rs index f7fe2bac1..678626c1d 100644 --- a/integration/tests/streaming/topic_messages.rs +++ b/integration/tests/streaming/topic_messages.rs @@ -4,6 +4,8 @@ use iggy::locking::IggySharedMutFn; use iggy::messages::poll_messages::PollingStrategy; use iggy::messages::send_messages::{Message, Partitioning}; use iggy::utils::byte_size::IggyByteSize; +use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use server::configs::resource_quota::MemoryResourceQuota; use server::configs::system::{CacheConfig, SystemConfig}; use server::streaming::polling_consumer::PollingConsumer; @@ -230,9 +232,9 @@ async fn init_topic(setup: &TestSetup, partitions_count: u32) -> Topic { Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), - None, + IggyExpiry::NeverExpire, Default::default(), - None, + MaxTopicSize::ServerDefault, 1, ) .unwrap(); diff --git a/integration/tests/streaming/user.rs b/integration/tests/streaming/user.rs deleted file mode 100644 index 611f530e1..000000000 --- a/integration/tests/streaming/user.rs +++ /dev/null @@ -1,206 +0,0 @@ -use crate::streaming::common::test_setup::TestSetup; -use iggy::models::permissions::{ - GlobalPermissions, Permissions, StreamPermissions, TopicPermissions, -}; -use iggy::models::user_status::UserStatus; -use iggy::utils::timestamp::IggyTimestamp; -use server::streaming::users::user::User; -use std::collections::HashMap; - -#[tokio::test] -async fn single_user_should_be_saved_and_loaded() { - let setup = TestSetup::init().await; - let user_id = 10; - let user = create_user(user_id); - setup.storage.user.save(&user).await.unwrap(); - - let mut loaded_user = User { - id: user_id, - ..Default::default() - }; - setup.storage.user.load(&mut loaded_user).await.unwrap(); - - assert_user(&user, &loaded_user); -} - -#[tokio::test] -async fn many_users_should_be_saved_and_loaded() { - let setup = TestSetup::init().await; - let user1 = create_user(1); - let user2 = create_user(2); - let user3 = create_user(3); - - setup.storage.user.save(&user1).await.unwrap(); - setup.storage.user.save(&user2).await.unwrap(); - setup.storage.user.save(&user3).await.unwrap(); - - let users = setup.storage.user.load_all().await.unwrap(); - assert_eq!(users.len(), 3); - let loaded_user1 = users.first().unwrap(); - let loaded_user2 = users.get(1).unwrap(); - let loaded_user3 = users.get(2).unwrap(); - assert_user(&user1, loaded_user1); - assert_user(&user2, loaded_user2); - assert_user(&user3, loaded_user3); -} - -#[tokio::test] -async fn user_should_be_deleted() { - let setup = TestSetup::init().await; - let user1 = create_user(1); - let user2 = create_user(2); - setup.storage.user.save(&user1).await.unwrap(); - setup.storage.user.save(&user2).await.unwrap(); - - let users = setup.storage.user.load_all().await.unwrap(); - assert_eq!(users.len(), 2); - - setup.storage.user.delete(&user1).await.unwrap(); - let users = setup.storage.user.load_all().await.unwrap(); - assert_eq!(users.len(), 1); - let loaded_user = users.first().unwrap(); - assert_user(&user2, loaded_user); -} - -fn assert_user(user: &User, loaded_user: &User) { - assert_eq!(loaded_user.id, user.id); - assert_eq!(loaded_user.username, user.username); - assert_eq!(loaded_user.password, user.password); - assert_eq!(loaded_user.created_at, user.created_at); - assert_eq!(loaded_user.status, user.status); - if user.permissions.is_none() { - assert!(loaded_user.permissions.is_none()); - return; - } - - let user_permissions = user.permissions.as_ref().unwrap(); - let loaded_user_permissions = loaded_user.permissions.as_ref().unwrap(); - - assert_eq!( - loaded_user_permissions.global.manage_servers, - user_permissions.global.manage_servers - ); - assert_eq!( - loaded_user_permissions.global.read_servers, - user_permissions.global.read_servers - ); - assert_eq!( - loaded_user_permissions.global.manage_users, - user_permissions.global.manage_users - ); - assert_eq!( - loaded_user_permissions.global.read_users, - user_permissions.global.read_users - ); - assert_eq!( - loaded_user_permissions.global.manage_streams, - user_permissions.global.manage_streams - ); - assert_eq!( - loaded_user_permissions.global.read_streams, - user_permissions.global.read_streams - ); - assert_eq!( - loaded_user_permissions.global.manage_topics, - user_permissions.global.manage_topics - ); - assert_eq!( - loaded_user_permissions.global.read_topics, - user_permissions.global.read_topics - ); - assert_eq!( - loaded_user_permissions.global.poll_messages, - user_permissions.global.poll_messages - ); - assert_eq!( - loaded_user_permissions.global.send_messages, - user_permissions.global.send_messages - ); - - if user_permissions.streams.is_none() { - assert!(loaded_user_permissions.streams.is_none()); - return; - } - - let streams = user_permissions.streams.as_ref().unwrap(); - let loaded_streams = loaded_user_permissions.streams.as_ref().unwrap(); - - assert_eq!(loaded_streams.len(), streams.len()); - for (stream_id, stream) in streams { - let loaded_stream = loaded_streams.get(stream_id).unwrap(); - assert_eq!(loaded_stream.manage_stream, stream.manage_stream); - assert_eq!(loaded_stream.read_stream, stream.read_stream); - assert_eq!(loaded_stream.manage_topics, stream.manage_topics); - assert_eq!(loaded_stream.read_topics, stream.read_topics); - assert_eq!(loaded_stream.poll_messages, stream.poll_messages); - assert_eq!(loaded_stream.send_messages, stream.send_messages); - - if stream.topics.is_none() { - assert!(loaded_stream.topics.is_none()); - continue; - } - - let topics = stream.topics.as_ref().unwrap(); - let loaded_topics = loaded_stream.topics.as_ref().unwrap(); - assert_eq!(loaded_topics.len(), topics.len()); - for (topic_id, topic) in topics { - let loaded_topic = loaded_topics.get(topic_id).unwrap(); - assert_eq!(loaded_topic.manage_topic, topic.manage_topic); - assert_eq!(loaded_topic.read_topic, topic.read_topic); - assert_eq!(loaded_topic.poll_messages, topic.poll_messages); - assert_eq!(loaded_topic.send_messages, topic.send_messages); - } - } -} - -fn create_user(id: u32) -> User { - User { - id, - username: format!("user{}", id), - password: "secret".to_string(), - created_at: IggyTimestamp::now().to_micros(), - status: UserStatus::Active, - permissions: Some(Permissions { - global: GlobalPermissions { - manage_servers: false, - read_servers: false, - manage_users: false, - read_users: false, - manage_streams: false, - manage_topics: false, - read_streams: true, - poll_messages: false, - send_messages: false, - read_topics: true, - }, - streams: Some({ - let mut map = HashMap::new(); - map.insert( - 1, - StreamPermissions { - manage_stream: false, - read_stream: false, - manage_topics: false, - read_topics: true, - poll_messages: true, - send_messages: true, - topics: Some({ - let mut map = HashMap::new(); - map.insert( - 1, - TopicPermissions { - manage_topic: false, - read_topic: true, - poll_messages: true, - send_messages: true, - }, - ); - map - }), - }, - ); - map - }), - }), - } -} diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index a7c87dbc1..f7e6a256e 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "iggy" -version = "0.4.3" +version = "0.5.0" description = "Iggy is the persistent message streaming platform written in Rust, supporting QUIC, TCP and HTTP transport protocols, capable of processing millions of messages per second." edition = "2021" license = "MIT" @@ -54,7 +54,6 @@ uuid = { version = "1.8.0", features = ["v4", "fast-rng"] } [build-dependencies] convert_case = "0.6.0" -rmp-serde = "1.3.0" serde = { version = "1.0.203", features = ["derive", "rc"] } serde_derive = "1.0.194" diff --git a/sdk/src/binary/mapper.rs b/sdk/src/binary/mapper.rs index fa53cec30..5c390ab49 100644 --- a/sdk/src/binary/mapper.rs +++ b/sdk/src/binary/mapper.rs @@ -15,6 +15,8 @@ use crate::models::topic::{Topic, TopicDetails}; use crate::models::user_info::{UserInfo, UserInfoDetails}; use crate::models::user_status::UserStatus; use crate::utils::byte_size::IggyByteSize; +use crate::utils::expiry::IggyExpiry; +use crate::utils::topic_size::MaxTopicSize; use bytes::Bytes; use std::collections::HashMap; use std::str::from_utf8; @@ -169,7 +171,7 @@ pub fn map_identity_info(payload: Bytes) -> Result { let user_id = u32::from_le_bytes(payload[..4].try_into()?); Ok(IdentityInfo { user_id, - tokens: None, + access_token: None, }) } @@ -247,6 +249,7 @@ pub fn map_polled_messages(payload: Bytes) -> Result let offset = u64::from_le_bytes(payload[position..position + 8].try_into()?); let state = MessageState::from_code(payload[position + 8])?; let timestamp = u64::from_le_bytes(payload[position + 9..position + 17].try_into()?); + let timestamp = timestamp.into(); let id = u128::from_le_bytes(payload[position + 17..position + 33].try_into()?); let checksum = u32::from_le_bytes(payload[position + 33..position + 37].try_into()?); let headers_length = u32::from_le_bytes(payload[position + 37..position + 41].try_into()?); @@ -402,27 +405,25 @@ pub fn map_topic(payload: Bytes) -> Result { fn map_to_topic(payload: Bytes, position: usize) -> Result<(Topic, usize), IggyError> { let id = u32::from_le_bytes(payload[position..position + 4].try_into()?); let created_at = u64::from_le_bytes(payload[position + 4..position + 12].try_into()?); + let created_at = created_at.into(); let partitions_count = u32::from_le_bytes(payload[position + 12..position + 16].try_into()?); - let message_expiry = match u32::from_le_bytes(payload[position + 16..position + 20].try_into()?) + let message_expiry = match u64::from_le_bytes(payload[position + 16..position + 24].try_into()?) { - 0 => None, - message_expiry => Some(message_expiry), + 0 => IggyExpiry::NeverExpire, + message_expiry => message_expiry.into(), }; - let compression_algorithm = CompressionAlgorithm::from_code(payload[position + 20])?; - let max_topic_size = match u64::from_le_bytes(payload[position + 21..position + 29].try_into()?) - { - 0 => None, - max_topic_size => Some(IggyByteSize::from(max_topic_size)), - }; - let replication_factor = payload[position + 29]; + let compression_algorithm = CompressionAlgorithm::from_code(payload[position + 24])?; + let max_topic_size = u64::from_le_bytes(payload[position + 25..position + 33].try_into()?); + let max_topic_size: MaxTopicSize = max_topic_size.into(); + let replication_factor = payload[position + 33]; let size_bytes = IggyByteSize::from(u64::from_le_bytes( - payload[position + 30..position + 38].try_into()?, + payload[position + 34..position + 42].try_into()?, )); - let messages_count = u64::from_le_bytes(payload[position + 38..position + 46].try_into()?); - let name_length = payload[position + 46]; + let messages_count = u64::from_le_bytes(payload[position + 42..position + 50].try_into()?); + let name_length = payload[position + 50]; let name = - from_utf8(&payload[position + 47..position + 47 + name_length as usize])?.to_string(); - let read_bytes = 4 + 8 + 4 + 4 + 8 + 8 + 8 + 1 + 1 + 1 + name_length as usize; + from_utf8(&payload[position + 51..position + 51 + name_length as usize])?.to_string(); + let read_bytes = 4 + 8 + 4 + 8 + 8 + 8 + 8 + 1 + 1 + 1 + name_length as usize; Ok(( Topic { id, @@ -443,6 +444,7 @@ fn map_to_topic(payload: Bytes, position: usize) -> Result<(Topic, usize), IggyE fn map_to_partition(payload: Bytes, position: usize) -> Result<(Partition, usize), IggyError> { let id = u32::from_le_bytes(payload[position..position + 4].try_into()?); let created_at = u64::from_le_bytes(payload[position + 4..position + 12].try_into()?); + let created_at = created_at.into(); let segments_count = u32::from_le_bytes(payload[position + 12..position + 16].try_into()?); let current_offset = u64::from_le_bytes(payload[position + 16..position + 24].try_into()?); let size_bytes = u64::from_le_bytes(payload[position + 24..position + 32].try_into()?).into(); @@ -588,6 +590,7 @@ fn map_to_client_info( fn map_to_user_info(payload: Bytes, position: usize) -> Result<(UserInfo, usize), IggyError> { let id = u32::from_le_bytes(payload[position..position + 4].try_into()?); let created_at = u64::from_le_bytes(payload[position + 4..position + 12].try_into()?); + let created_at = created_at.into(); let status = payload[position + 12]; let status = UserStatus::from_code(status)?; let username_length = payload[position + 13]; @@ -613,12 +616,11 @@ fn map_to_pat_info( let name_length = payload[position]; let name = from_utf8(&payload[position + 1..position + 1 + name_length as usize])?.to_string(); let position = position + 1 + name_length as usize; - let expiry = u64::from_le_bytes(payload[position..position + 8].try_into()?); - let expiry = match expiry { + let expiry_at = u64::from_le_bytes(payload[position..position + 8].try_into()?); + let expiry_at = match expiry_at { 0 => None, - _ => Some(expiry), + value => Some(value.into()), }; let read_bytes = 1 + name_length as usize + 8; - - Ok((PersonalAccessTokenInfo { name, expiry }, read_bytes)) + Ok((PersonalAccessTokenInfo { name, expiry_at }, read_bytes)) } diff --git a/sdk/src/binary/personal_access_tokens.rs b/sdk/src/binary/personal_access_tokens.rs index 6fa71bfd5..38a6b0fd1 100644 --- a/sdk/src/binary/personal_access_tokens.rs +++ b/sdk/src/binary/personal_access_tokens.rs @@ -36,7 +36,7 @@ impl PersonalAccessTokenClient for B { CREATE_PERSONAL_ACCESS_TOKEN_CODE, CreatePersonalAccessToken { name: name.to_string(), - expiry: expiry.into(), + expiry, } .as_bytes(), ) diff --git a/sdk/src/binary/topics.rs b/sdk/src/binary/topics.rs index d0c61c38f..6c0366f77 100644 --- a/sdk/src/binary/topics.rs +++ b/sdk/src/binary/topics.rs @@ -16,8 +16,8 @@ use crate::topics::get_topic::GetTopic; use crate::topics::get_topics::GetTopics; use crate::topics::purge_topic::PurgeTopic; use crate::topics::update_topic::UpdateTopic; -use crate::utils::byte_size::IggyByteSize; use crate::utils::expiry::IggyExpiry; +use crate::utils::topic_size::MaxTopicSize; #[async_trait::async_trait] impl TopicClient for B { @@ -63,7 +63,7 @@ impl TopicClient for B { replication_factor: Option, topic_id: Option, message_expiry: IggyExpiry, - max_topic_size: Option, + max_topic_size: MaxTopicSize, ) -> Result<(), IggyError> { fail_if_not_authenticated(self).await?; self.send_with_response( @@ -75,7 +75,7 @@ impl TopicClient for B { compression_algorithm, replication_factor, topic_id, - message_expiry: message_expiry.into(), + message_expiry, max_topic_size, } .as_bytes(), @@ -92,7 +92,7 @@ impl TopicClient for B { compression_algorithm: CompressionAlgorithm, replication_factor: Option, message_expiry: IggyExpiry, - max_topic_size: Option, + max_topic_size: MaxTopicSize, ) -> Result<(), IggyError> { fail_if_not_authenticated(self).await?; self.send_with_response( @@ -103,7 +103,7 @@ impl TopicClient for B { name: name.to_string(), compression_algorithm, replication_factor, - message_expiry: message_expiry.into(), + message_expiry, max_topic_size, } .as_bytes(), diff --git a/sdk/src/binary/users.rs b/sdk/src/binary/users.rs index ba4a790b0..e241041bc 100644 --- a/sdk/src/binary/users.rs +++ b/sdk/src/binary/users.rs @@ -143,6 +143,8 @@ impl UserClient for B { LoginUser { username: username.to_string(), password: password.to_string(), + version: Some("0.5.0".to_string()), + context: Some("".to_string()), } .as_bytes(), ) diff --git a/sdk/src/cli/message/poll_messages.rs b/sdk/src/cli/message/poll_messages.rs index cc8bee87f..ae305d68f 100644 --- a/sdk/src/cli/message/poll_messages.rs +++ b/sdk/src/cli/message/poll_messages.rs @@ -5,7 +5,7 @@ use crate::identifier::Identifier; use crate::messages::poll_messages::{PollMessages, PollingStrategy}; use crate::models::header::{HeaderKey, HeaderKind}; use crate::models::messages::PolledMessages; -use crate::utils::{byte_size::IggyByteSize, duration::IggyDuration, timestamp::IggyTimestamp}; +use crate::utils::{byte_size::IggyByteSize, duration::IggyDuration}; use anyhow::Context; use async_trait::async_trait; use comfy_table::{Cell, CellAlignment, Row, Table}; @@ -103,7 +103,7 @@ impl PollMessagesCmd { .map(|message| { let mut row = vec![ format!("{}", message.offset), - IggyTimestamp::from(message.timestamp).to_local_string("%Y-%m-%d %H:%M:%S%.6f"), + message.timestamp.to_local_string("%Y-%m-%d %H:%M:%S%.6f"), format!("{}", message.id), format!("{}", message.payload.len()), String::from_utf8_lossy(&message.payload).to_string(), diff --git a/sdk/src/cli/personal_access_tokens/create_personal_access_token.rs b/sdk/src/cli/personal_access_tokens/create_personal_access_token.rs index 8a56a6deb..8418724d2 100644 --- a/sdk/src/cli/personal_access_tokens/create_personal_access_token.rs +++ b/sdk/src/cli/personal_access_tokens/create_personal_access_token.rs @@ -27,8 +27,8 @@ impl CreatePersonalAccessTokenCmd { create_token: CreatePersonalAccessToken { name, expiry: match &pat_expiry { - None => None, - Some(value) => value.into(), + None => PersonalAccessTokenExpiry::NeverExpire, + Some(value) => *value, }, }, token_expiry: pat_expiry, @@ -54,7 +54,7 @@ impl CliCommand for CreatePersonalAccessTokenCmd { async fn execute_cmd(&mut self, client: &dyn Client) -> anyhow::Result<(), anyhow::Error> { let token = client - .create_personal_access_token(&self.create_token.name, self.create_token.expiry.into()) + .create_personal_access_token(&self.create_token.name, self.create_token.expiry) .await .with_context(|| { format!( diff --git a/sdk/src/cli/personal_access_tokens/get_personal_access_tokens.rs b/sdk/src/cli/personal_access_tokens/get_personal_access_tokens.rs index 9a6438af6..33e9a0d8f 100644 --- a/sdk/src/cli/personal_access_tokens/get_personal_access_tokens.rs +++ b/sdk/src/cli/personal_access_tokens/get_personal_access_tokens.rs @@ -1,7 +1,6 @@ use crate::cli_command::{CliCommand, PRINT_TARGET}; use crate::client::Client; use crate::personal_access_tokens::get_personal_access_tokens::GetPersonalAccessTokens; -use crate::utils::timestamp::IggyTimestamp; use anyhow::Context; use async_trait::async_trait; use comfy_table::Table; @@ -51,11 +50,9 @@ impl CliCommand for GetPersonalAccessTokensCmd { tokens.iter().for_each(|token| { table.add_row(vec![ format!("{}", token.name.clone()), - match token.expiry { - Some(value) => { - IggyTimestamp::from(value).to_local_string("%Y-%m-%d %H:%M:%S") - } + match token.expiry_at { None => String::from("unlimited"), + Some(value) => value.to_local_string("%Y-%m-%d %H:%M:%S"), }, ]); }); @@ -67,9 +64,9 @@ impl CliCommand for GetPersonalAccessTokensCmd { event!(target: PRINT_TARGET, Level::INFO, "{}|{}", token.name, - match token.expiry { - Some(value) => IggyTimestamp::from(value).to_local_string("%Y-%m-%d %H:%M:%S"), + match token.expiry_at { None => String::from("unlimited"), + Some(value) => value.to_local_string("%Y-%m-%d %H:%M:%S"), }, ); }); diff --git a/sdk/src/cli/system/login.rs b/sdk/src/cli/system/login.rs index 6872dbb8f..7b7ebb93b 100644 --- a/sdk/src/cli/system/login.rs +++ b/sdk/src/cli/system/login.rs @@ -6,7 +6,7 @@ use anyhow::Context; use async_trait::async_trait; use tracing::{event, Level}; -const DEFAULT_LOGIN_SESSION_TIMEOUT: u32 = 15 * 60; +const DEFAULT_LOGIN_SESSION_TIMEOUT: u64 = 1000 * 15 * 60; pub struct LoginCmd { server_session: ServerSession, @@ -64,7 +64,7 @@ impl CliCommand for LoginCmd { &self.server_session.get_token_name(), match &self.login_session_expiry { None => Some(DEFAULT_LOGIN_SESSION_TIMEOUT).into(), - Some(value) => value.clone(), + Some(value) => *value, }, ) .await diff --git a/sdk/src/cli/topics/create_topic.rs b/sdk/src/cli/topics/create_topic.rs index bd039257f..c78600268 100644 --- a/sdk/src/cli/topics/create_topic.rs +++ b/sdk/src/cli/topics/create_topic.rs @@ -3,8 +3,8 @@ use crate::client::Client; use crate::compression::compression_algorithm::CompressionAlgorithm; use crate::identifier::Identifier; use crate::topics::create_topic::CreateTopic; -use crate::utils::byte_size::IggyByteSize; use crate::utils::expiry::IggyExpiry; +use crate::utils::topic_size::MaxTopicSize; use anyhow::Context; use async_trait::async_trait; use core::fmt; @@ -13,7 +13,7 @@ use tracing::{event, Level}; pub struct CreateTopicCmd { create_topic: CreateTopic, message_expiry: IggyExpiry, - max_topic_size: IggyByteSize, + max_topic_size: MaxTopicSize, replication_factor: u8, } @@ -26,7 +26,7 @@ impl CreateTopicCmd { compression_algorithm: CompressionAlgorithm, name: String, message_expiry: IggyExpiry, - max_topic_size: IggyByteSize, + max_topic_size: MaxTopicSize, replication_factor: u8, ) -> Self { Self { @@ -36,8 +36,8 @@ impl CreateTopicCmd { partitions_count, compression_algorithm, name, - message_expiry: message_expiry.clone().into(), - max_topic_size: Some(max_topic_size), + message_expiry, + max_topic_size, replication_factor: Some(replication_factor), }, message_expiry, @@ -62,7 +62,7 @@ impl CliCommand for CreateTopicCmd { async fn execute_cmd(&mut self, client: &dyn Client) -> anyhow::Result<(), anyhow::Error> { client - .create_topic(&self.create_topic.stream_id, &self.create_topic.name, self.create_topic.partitions_count, self.create_topic.compression_algorithm, self.create_topic.replication_factor, self.create_topic.topic_id, self.create_topic.message_expiry.into(), self.create_topic.max_topic_size) + .create_topic(&self.create_topic.stream_id, &self.create_topic.name, self.create_topic.partitions_count, self.create_topic.compression_algorithm, self.create_topic.replication_factor, self.create_topic.topic_id, self.create_topic.message_expiry, self.create_topic.max_topic_size) .await .with_context(|| { format!( @@ -78,7 +78,7 @@ impl CliCommand for CreateTopicCmd { self.create_topic.partitions_count, self.create_topic.compression_algorithm, self.message_expiry, - self.max_topic_size.as_human_string_with_zero_as_unlimited(), + self.max_topic_size, self.replication_factor, self.create_topic.stream_id, ); @@ -93,7 +93,7 @@ impl fmt::Display for CreateTopicCmd { let topic_name = &self.create_topic.name; let compression_algorithm = &self.create_topic.compression_algorithm; let message_expiry = &self.message_expiry; - let max_topic_size = &self.max_topic_size.as_human_string_with_zero_as_unlimited(); + let max_topic_size = &self.max_topic_size; let replication_factor = self.replication_factor; let stream_id = &self.create_topic.stream_id; diff --git a/sdk/src/cli/topics/get_topic.rs b/sdk/src/cli/topics/get_topic.rs index 4eb4050eb..88af80038 100644 --- a/sdk/src/cli/topics/get_topic.rs +++ b/sdk/src/cli/topics/get_topic.rs @@ -2,7 +2,7 @@ use crate::cli_command::{CliCommand, PRINT_TARGET}; use crate::client::Client; use crate::identifier::Identifier; use crate::topics::get_topic::GetTopic; -use crate::utils::timestamp::IggyTimestamp; +use crate::utils::expiry::IggyExpiry; use anyhow::Context; use async_trait::async_trait; use comfy_table::Table; @@ -49,9 +49,7 @@ impl CliCommand for GetTopicCmd { table.add_row(vec!["Topic id", format!("{}", topic.id).as_str()]); table.add_row(vec![ "Created", - IggyTimestamp::from(topic.created_at) - .to_utc_string("%Y-%m-%d %H:%M:%S") - .as_str(), + topic.created_at.to_utc_string("%Y-%m-%d %H:%M:%S").as_str(), ]); table.add_row(vec!["Topic name", topic.name.as_str()]); table.add_row(vec!["Topic size", format!("{}", topic.size).as_str()]); @@ -62,18 +60,14 @@ impl CliCommand for GetTopicCmd { table.add_row(vec![ "Message expiry", match topic.message_expiry { - Some(value) => format!("{}", value), - None => String::from("unlimited"), + IggyExpiry::NeverExpire => String::from("unlimited"), + IggyExpiry::ExpireDuration(value) => format!("{}", value), } .as_str(), ]); table.add_row(vec![ "Max topic size", - match topic.max_topic_size { - Some(value) => format!("{}", value), - None => String::from("unlimited"), - } - .as_str(), + format!("{}", topic.max_topic_size).as_str(), ]); table.add_row(vec![ "Topic message count", diff --git a/sdk/src/cli/topics/get_topics.rs b/sdk/src/cli/topics/get_topics.rs index b1dfc5732..a88d45d03 100644 --- a/sdk/src/cli/topics/get_topics.rs +++ b/sdk/src/cli/topics/get_topics.rs @@ -2,7 +2,7 @@ use crate::cli_command::{CliCommand, PRINT_TARGET}; use crate::client::Client; use crate::identifier::Identifier; use crate::topics::get_topics::GetTopics; -use crate::utils::timestamp::IggyTimestamp; +use crate::utils::expiry::IggyExpiry; use anyhow::Context; use async_trait::async_trait; use comfy_table::Table; @@ -78,17 +78,14 @@ impl CliCommand for GetTopicsCmd { topics.iter().for_each(|topic| { table.add_row(vec![ format!("{}", topic.id), - IggyTimestamp::from(topic.created_at).to_utc_string("%Y-%m-%d %H:%M:%S"), + topic.created_at.to_utc_string("%Y-%m-%d %H:%M:%S"), topic.name.clone(), format!("{}", topic.size), - match topic.max_topic_size { - Some(value) => format!("{}", value), - None => String::from("unlimited"), - }, + format!("{}", topic.max_topic_size), topic.compression_algorithm.to_string(), match topic.message_expiry { - Some(value) => format!("{}", value), - None => String::from("unlimited"), + IggyExpiry::NeverExpire => String::from("unlimited"), + IggyExpiry::ExpireDuration(value) => format!("{}", value), }, format!("{}", topic.messages_count), format!("{}", topic.partitions_count), @@ -100,23 +97,20 @@ impl CliCommand for GetTopicsCmd { GetTopicsOutput::List => { topics.iter().for_each(|topic| { event!(target: PRINT_TARGET, Level::INFO, - "{}|{}|{}|{}|{}|{}|{}|{}|{}", - topic.id, - IggyTimestamp::from(topic.created_at).to_utc_string("%Y-%m-%d %H:%M:%S"), - topic.name, - topic.size, - match topic.max_topic_size { - Some(value) => format!("{}", value), - None => String::from("unlimited"), - }, - topic.compression_algorithm.to_string(), - match topic.message_expiry { - Some(value) => format!("{}", value), - None => String::from("unlimited"), - }, - topic.messages_count, - topic.partitions_count - ); + "{}|{}|{}|{}|{}|{}|{}|{}|{}", + topic.id, + topic.created_at.to_utc_string("%Y-%m-%d %H:%M:%S"), + topic.name, + topic.size, + topic.max_topic_size, + topic.compression_algorithm.to_string(), + match topic.message_expiry { + IggyExpiry::NeverExpire => String::from("unlimited"), + IggyExpiry::ExpireDuration(value) => format!("{}", value), + }, + topic.messages_count, + topic.partitions_count + ); }); } } diff --git a/sdk/src/cli/topics/update_topic.rs b/sdk/src/cli/topics/update_topic.rs index c510877f7..9a42ca5e1 100644 --- a/sdk/src/cli/topics/update_topic.rs +++ b/sdk/src/cli/topics/update_topic.rs @@ -3,8 +3,8 @@ use crate::client::Client; use crate::compression::compression_algorithm::CompressionAlgorithm; use crate::identifier::Identifier; use crate::topics::update_topic::UpdateTopic; -use crate::utils::byte_size::IggyByteSize; use crate::utils::expiry::IggyExpiry; +use crate::utils::topic_size::MaxTopicSize; use anyhow::Context; use async_trait::async_trait; use core::fmt; @@ -13,7 +13,7 @@ use tracing::{event, Level}; pub struct UpdateTopicCmd { update_topic: UpdateTopic, message_expiry: IggyExpiry, - max_topic_size: IggyByteSize, + max_topic_size: MaxTopicSize, replication_factor: u8, } @@ -24,7 +24,7 @@ impl UpdateTopicCmd { compression_algorithm: CompressionAlgorithm, name: String, message_expiry: IggyExpiry, - max_topic_size: IggyByteSize, + max_topic_size: MaxTopicSize, replication_factor: u8, ) -> Self { Self { @@ -33,8 +33,8 @@ impl UpdateTopicCmd { topic_id, name, compression_algorithm, - message_expiry: message_expiry.clone().into(), - max_topic_size: Some(max_topic_size), + message_expiry, + max_topic_size, replication_factor: Some(replication_factor), }, message_expiry, @@ -52,7 +52,7 @@ impl CliCommand for UpdateTopicCmd { async fn execute_cmd(&mut self, client: &dyn Client) -> anyhow::Result<(), anyhow::Error> { client - .update_topic(&self.update_topic.stream_id, &self.update_topic.topic_id, &self.update_topic.name, self.update_topic.compression_algorithm, self.replication_factor.into(), self.message_expiry.clone(), Some(self.max_topic_size)) + .update_topic(&self.update_topic.stream_id, &self.update_topic.topic_id, &self.update_topic.name, self.update_topic.compression_algorithm, self.replication_factor.into(), self.message_expiry, self.max_topic_size) .await .with_context(|| { format!( @@ -83,7 +83,7 @@ impl fmt::Display for UpdateTopicCmd { let topic_name = &self.update_topic.name; let compression_algorithm = &self.update_topic.compression_algorithm; let message_expiry = &self.message_expiry; - let max_topic_size = &self.max_topic_size.as_human_string_with_zero_as_unlimited(); + let max_topic_size = &self.max_topic_size; let replication_factor = self.replication_factor; let stream_id = &self.update_topic.stream_id; diff --git a/sdk/src/cli/users/get_user.rs b/sdk/src/cli/users/get_user.rs index 690891125..89c779f81 100644 --- a/sdk/src/cli/users/get_user.rs +++ b/sdk/src/cli/users/get_user.rs @@ -3,7 +3,6 @@ use crate::client::Client; use crate::identifier::Identifier; use crate::models::permissions::{GlobalPermissions, StreamPermissions, TopicPermissions}; use crate::users::get_user::GetUser; -use crate::utils::timestamp::IggyTimestamp; use anyhow::Context; use async_trait::async_trait; use comfy_table::presets::ASCII_NO_BORDERS; @@ -148,7 +147,7 @@ impl CliCommand for GetUserCmd { table.add_row(vec!["User ID", format!("{}", user.id).as_str()]); table.add_row(vec![ "Created", - IggyTimestamp::from(user.created_at) + user.created_at .to_local_string("%Y-%m-%d %H:%M:%S") .as_str(), ]); diff --git a/sdk/src/cli/users/get_users.rs b/sdk/src/cli/users/get_users.rs index 75bad5070..727f0b846 100644 --- a/sdk/src/cli/users/get_users.rs +++ b/sdk/src/cli/users/get_users.rs @@ -1,7 +1,6 @@ use crate::cli_command::{CliCommand, PRINT_TARGET}; use crate::client::Client; use crate::users::get_users::GetUsers; -use crate::utils::timestamp::IggyTimestamp; use anyhow::Context; use async_trait::async_trait; use comfy_table::Table; @@ -65,7 +64,7 @@ impl CliCommand for GetUsersCmd { users.iter().for_each(|user| { table.add_row(vec![ format!("{}", user.id), - IggyTimestamp::from(user.created_at).to_local_string("%Y-%m-%d %H:%M:%S"), + user.created_at.to_local_string("%Y-%m-%d %H:%M:%S"), user.status.clone().to_string(), user.username.clone(), ]); @@ -78,7 +77,7 @@ impl CliCommand for GetUsersCmd { event!(target: PRINT_TARGET, Level::INFO, "{}|{}|{}|{}", user.id, - IggyTimestamp::from(user.created_at).to_local_string("%Y-%m-%d %H:%M:%S"), + user.created_at.to_local_string("%Y-%m-%d %H:%M:%S"), user.status.clone().to_string(), user.username.clone(), ); diff --git a/sdk/src/client.rs b/sdk/src/client.rs index fc612e13a..ed1f03966 100644 --- a/sdk/src/client.rs +++ b/sdk/src/client.rs @@ -16,9 +16,9 @@ use crate::models::stream::{Stream, StreamDetails}; use crate::models::topic::{Topic, TopicDetails}; use crate::models::user_info::{UserInfo, UserInfoDetails}; use crate::models::user_status::UserStatus; -use crate::utils::byte_size::IggyByteSize; use crate::utils::expiry::IggyExpiry; use crate::utils::personal_access_token_expiry::PersonalAccessTokenExpiry; +use crate::utils::topic_size::MaxTopicSize; use async_trait::async_trait; use std::fmt::Debug; @@ -206,7 +206,7 @@ pub trait TopicClient { replication_factor: Option, topic_id: Option, message_expiry: IggyExpiry, - max_topic_size: Option, + max_topic_size: MaxTopicSize, ) -> Result<(), IggyError>; /// Update a topic by unique ID or name. /// @@ -219,7 +219,7 @@ pub trait TopicClient { compression_algorithm: CompressionAlgorithm, replication_factor: Option, message_expiry: IggyExpiry, - max_topic_size: Option, + max_topic_size: MaxTopicSize, ) -> Result<(), IggyError>; /// Delete a topic by unique ID or name. /// diff --git a/sdk/src/clients/client.rs b/sdk/src/clients/client.rs index fde4f48a4..ebe8eb156 100644 --- a/sdk/src/clients/client.rs +++ b/sdk/src/clients/client.rs @@ -39,9 +39,9 @@ use crate::compression::compression_algorithm::CompressionAlgorithm; use crate::messages::poll_messages::{PollingKind, PollingStrategy}; use crate::models::permissions::Permissions; use crate::models::user_status::UserStatus; -use crate::utils::byte_size::IggyByteSize; use crate::utils::expiry::IggyExpiry; use crate::utils::personal_access_token_expiry::PersonalAccessTokenExpiry; +use crate::utils::topic_size::MaxTopicSize; // The default interval between sending the messages as batches in the background. pub const DEFAULT_SEND_MESSAGES_INTERVAL_MS: u64 = 100; @@ -684,7 +684,7 @@ impl TopicClient for IggyClient { replication_factor: Option, topic_id: Option, message_expiry: IggyExpiry, - max_topic_size: Option, + max_topic_size: MaxTopicSize, ) -> Result<(), IggyError> { self.client .read() @@ -710,7 +710,7 @@ impl TopicClient for IggyClient { compression_algorithm: CompressionAlgorithm, replication_factor: Option, message_expiry: IggyExpiry, - max_topic_size: Option, + max_topic_size: MaxTopicSize, ) -> Result<(), IggyError> { self.client .read() diff --git a/sdk/src/command.rs b/sdk/src/command.rs index 96a621f00..6a4c1f512 100644 --- a/sdk/src/command.rs +++ b/sdk/src/command.rs @@ -429,6 +429,54 @@ impl Display for Command { } } +pub fn get_name_from_code(code: u32) -> Result<&'static str, IggyError> { + match code { + PING_CODE => Ok(PING), + GET_STATS_CODE => Ok(GET_STATS), + GET_ME_CODE => Ok(GET_ME), + GET_CLIENT_CODE => Ok(GET_CLIENT), + GET_CLIENTS_CODE => Ok(GET_CLIENTS), + GET_USER_CODE => Ok(GET_USER), + GET_USERS_CODE => Ok(GET_USERS), + CREATE_USER_CODE => Ok(CREATE_USER), + DELETE_USER_CODE => Ok(DELETE_USER), + UPDATE_USER_CODE => Ok(UPDATE_USER), + UPDATE_PERMISSIONS_CODE => Ok(UPDATE_PERMISSIONS), + CHANGE_PASSWORD_CODE => Ok(CHANGE_PASSWORD), + LOGIN_USER_CODE => Ok(LOGIN_USER), + LOGOUT_USER_CODE => Ok(LOGOUT_USER), + GET_PERSONAL_ACCESS_TOKENS_CODE => Ok(GET_PERSONAL_ACCESS_TOKENS), + CREATE_PERSONAL_ACCESS_TOKEN_CODE => Ok(CREATE_PERSONAL_ACCESS_TOKEN), + DELETE_PERSONAL_ACCESS_TOKEN_CODE => Ok(DELETE_PERSONAL_ACCESS_TOKEN), + LOGIN_WITH_PERSONAL_ACCESS_TOKEN_CODE => Ok(LOGIN_WITH_PERSONAL_ACCESS_TOKEN), + SEND_MESSAGES_CODE => Ok(SEND_MESSAGES), + POLL_MESSAGES_CODE => Ok(POLL_MESSAGES), + STORE_CONSUMER_OFFSET_CODE => Ok(STORE_CONSUMER_OFFSET), + GET_CONSUMER_OFFSET_CODE => Ok(GET_CONSUMER_OFFSET), + GET_STREAM_CODE => Ok(GET_STREAM), + GET_STREAMS_CODE => Ok(GET_STREAMS), + CREATE_STREAM_CODE => Ok(CREATE_STREAM), + DELETE_STREAM_CODE => Ok(DELETE_STREAM), + UPDATE_STREAM_CODE => Ok(UPDATE_STREAM), + PURGE_STREAM_CODE => Ok(PURGE_STREAM), + GET_TOPIC_CODE => Ok(GET_TOPIC), + GET_TOPICS_CODE => Ok(GET_TOPICS), + CREATE_TOPIC_CODE => Ok(CREATE_TOPIC), + DELETE_TOPIC_CODE => Ok(DELETE_TOPIC), + UPDATE_TOPIC_CODE => Ok(UPDATE_TOPIC), + PURGE_TOPIC_CODE => Ok(PURGE_TOPIC), + CREATE_PARTITIONS_CODE => Ok(CREATE_PARTITIONS), + DELETE_PARTITIONS_CODE => Ok(DELETE_PARTITIONS), + GET_CONSUMER_GROUP_CODE => Ok(GET_CONSUMER_GROUP), + GET_CONSUMER_GROUPS_CODE => Ok(GET_CONSUMER_GROUPS), + CREATE_CONSUMER_GROUP_CODE => Ok(CREATE_CONSUMER_GROUP), + DELETE_CONSUMER_GROUP_CODE => Ok(DELETE_CONSUMER_GROUP), + JOIN_CONSUMER_GROUP_CODE => Ok(JOIN_CONSUMER_GROUP), + LEAVE_CONSUMER_GROUP_CODE => Ok(LEAVE_CONSUMER_GROUP), + _ => Err(IggyError::InvalidCommand), + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/sdk/src/error.rs b/sdk/src/error.rs index fffd7b2db..d53d0e6db 100644 --- a/sdk/src/error.rs +++ b/sdk/src/error.rs @@ -22,12 +22,20 @@ pub enum IggyError { FeatureUnavailable = 5, #[error("Invalid identifier")] InvalidIdentifier = 6, + #[error("Invalid version: {0}")] + InvalidVersion(String) = 7, #[error("Cannot create base directory, Path: {0}")] CannotCreateBaseDirectory(String) = 10, #[error("Cannot create runtime directory, Path: {0}")] CannotCreateRuntimeDirectory(String) = 11, #[error("Cannot remove runtime directory, Path: {0}")] CannotRemoveRuntimeDirectory(String) = 12, + #[error("Cannot create state directory, Path: {0}")] + CannotCreateStateDirectory(String) = 13, + #[error("State file not found")] + StateFileNotFound = 14, + #[error("Cannot open database, Path: {0}")] + CannotOpenDatabase(String) = 19, #[error("Resource with key: {0} was not found.")] ResourceNotFound(String) = 20, #[error("Cannot load resource. Reason: {0:#}")] @@ -90,12 +98,10 @@ pub enum IggyError { JwtMissing = 75, #[error("Cannot generate JWT")] CannotGenerateJwt = 76, - #[error("Refresh token is missing")] - RefreshTokenMissing = 77, - #[error("Invalid refresh token")] - InvalidRefreshToken = 78, - #[error("Refresh token expired")] - RefreshTokenExpired = 79, + #[error("Access token is missing")] + AccessTokenMissing = 77, + #[error("Invalid access token")] + InvalidAccessToken = 78, #[error("Client with ID: {0} was not found.")] ClientNotFound(u32) = 100, #[error("Invalid client ID")] @@ -174,6 +180,12 @@ pub enum IggyError { InvalidStreamId = 1014, #[error("Cannot read streams")] CannotReadStreams = 1015, + #[error("Missing streams")] + MissingStreams = 1016, + #[error("Missing topics for stream with ID: {0}")] + MissingTopics(u32) = 1017, + #[error("Missing partitions for topic with ID: {0} for stream with ID: {1}.")] + MissingPartitions(u32, u32) = 1018, #[error("Cannot create topics directory for stream with ID: {0}, Path: {1}")] CannotCreateTopicsDirectory(u32, String) = 2000, #[error( @@ -238,6 +250,12 @@ pub enum IggyError { PartitionNotFound(u32, u32, u32) = 3007, #[error("Topic with ID: {0} for stream with ID: {1} has no partitions.")] NoPartitions(u32, u32) = 3008, + #[error("Failed to delete consumer offsets directory for path: {0}")] + CannotDeleteConsumerOffsetsDirectory(String) = 3010, + #[error("Failed to delete consumer offset file for path: {0}")] + CannotDeleteConsumerOffsetFile(String) = 3011, + #[error("Failed to read consumers offsets from path: {0}")] + CannotReadConsumerOffsets(String) = 3020, #[error("Segment not found")] SegmentNotFound = 4000, #[error("Segment with start offset: {0} and partition with ID: {1} is closed")] @@ -300,8 +318,6 @@ pub enum IggyError { CommandLengthError(String) = 4029, #[error("Invalid offset: {0}")] InvalidOffset(u64) = 4100, - #[error("Failed to read consumers offsets for partition with ID: {0}")] - CannotReadConsumerOffsets(u32) = 4101, #[error("Consumer group with ID: {0} for topic with ID: {1} was not found.")] ConsumerGroupIdNotFound(u32, u32) = 5000, #[error("Consumer group with ID: {0} for topic with ID: {1} already exists.")] diff --git a/sdk/src/http/client.rs b/sdk/src/http/client.rs index 19d00cc63..3b6347d39 100644 --- a/sdk/src/http/client.rs +++ b/sdk/src/http/client.rs @@ -29,7 +29,6 @@ pub struct HttpClient { pub api_url: Url, client: ClientWithMiddleware, access_token: IggySharedMut, - refresh_token: IggySharedMut, } #[async_trait] @@ -171,35 +170,26 @@ impl HttpTransport for HttpClient { !token.is_empty() } - /// Refresh the access token using the provided refresh token. - async fn refresh_access_token(&self, refresh_token: &str) -> Result<(), IggyError> { - if refresh_token.is_empty() { - return Err(IggyError::RefreshTokenMissing); + /// Refresh the access token using the current access token. + async fn refresh_access_token(&self) -> Result<(), IggyError> { + let token = self.access_token.read().await; + if token.is_empty() { + return Err(IggyError::AccessTokenMissing); } let command = RefreshToken { - refresh_token: refresh_token.to_string(), + token: token.to_owned(), }; let response = self.post("/users/refresh-token", &command).await?; let identity_info: IdentityInfo = response.json().await?; - if identity_info.tokens.is_none() { + if identity_info.access_token.is_none() { return Err(IggyError::JwtMissing); } - self.set_tokens_from_identity(&identity_info).await?; + self.set_token_from_identity(&identity_info).await?; Ok(()) } - /// Set the refresh token. - async fn set_refresh_token(&self, token: Option) { - let mut current_token = self.refresh_token.write().await; - if let Some(token) = token { - *current_token = token; - } else { - *current_token = "".to_string(); - } - } - /// Set the access token. async fn set_access_token(&self, token: Option) { let mut current_token = self.access_token.write().await; @@ -210,29 +200,17 @@ impl HttpTransport for HttpClient { } } - /// Set the access token and refresh token from the provided identity. - async fn set_tokens_from_identity(&self, identity: &IdentityInfo) -> Result<(), IggyError> { - if identity.tokens.is_none() { - return Err(IggyError::JwtMissing); - } - - let tokens = identity.tokens.as_ref().unwrap(); - if tokens.access_token.token.is_empty() { + /// Set the access token from the provided identity. + async fn set_token_from_identity(&self, identity: &IdentityInfo) -> Result<(), IggyError> { + if identity.access_token.is_none() { return Err(IggyError::JwtMissing); } - self.set_access_token(Some(tokens.access_token.token.clone())) - .await; - self.set_refresh_token(Some(tokens.refresh_token.token.clone())) + let access_token = identity.access_token.as_ref().unwrap(); + self.set_access_token(Some(access_token.token.clone())) .await; Ok(()) } - - /// Refresh the access token using the provided refresh token. - async fn refresh_access_token_using_current_refresh_token(&self) -> Result<(), IggyError> { - let refresh_token = self.refresh_token.read().await; - self.refresh_access_token(&refresh_token).await - } } impl HttpClient { @@ -260,7 +238,6 @@ impl HttpClient { api_url, client, access_token: IggySharedMut::new("".to_string()), - refresh_token: IggySharedMut::new("".to_string()), }) } @@ -295,5 +272,5 @@ impl HttpClient { #[derive(Debug, Serialize)] struct RefreshToken { - refresh_token: String, + token: String, } diff --git a/sdk/src/http/mod.rs b/sdk/src/http/mod.rs index 91dd9919f..ac0fdbd08 100644 --- a/sdk/src/http/mod.rs +++ b/sdk/src/http/mod.rs @@ -60,17 +60,11 @@ pub trait HttpTransport { async fn is_authenticated(&self) -> bool; /// Refresh the access token using the provided refresh token. - async fn refresh_access_token(&self, refresh_token: &str) -> Result<(), IggyError>; - - /// Set the refresh token. - async fn set_refresh_token(&self, token: Option); + async fn refresh_access_token(&self) -> Result<(), IggyError>; /// Set the access token. async fn set_access_token(&self, token: Option); /// Set the access token and refresh token from the provided identity. - async fn set_tokens_from_identity(&self, identity: &IdentityInfo) -> Result<(), IggyError>; - - /// Refresh the access token using the provided refresh token. - async fn refresh_access_token_using_current_refresh_token(&self) -> Result<(), IggyError>; + async fn set_token_from_identity(&self, identity: &IdentityInfo) -> Result<(), IggyError>; } diff --git a/sdk/src/http/personal_access_tokens.rs b/sdk/src/http/personal_access_tokens.rs index 7bbf02865..787c1ba08 100644 --- a/sdk/src/http/personal_access_tokens.rs +++ b/sdk/src/http/personal_access_tokens.rs @@ -29,7 +29,7 @@ impl PersonalAccessTokenClient for HttpClient { PATH, &CreatePersonalAccessToken { name: name.to_string(), - expiry: expiry.into(), + expiry, }, ) .await?; @@ -55,7 +55,7 @@ impl PersonalAccessTokenClient for HttpClient { ) .await?; let identity_info: IdentityInfo = response.json().await?; - self.set_tokens_from_identity(&identity_info).await?; + self.set_token_from_identity(&identity_info).await?; Ok(identity_info) } } diff --git a/sdk/src/http/topics.rs b/sdk/src/http/topics.rs index 20cf4008b..1de12f517 100644 --- a/sdk/src/http/topics.rs +++ b/sdk/src/http/topics.rs @@ -7,8 +7,8 @@ use crate::identifier::Identifier; use crate::models::topic::{Topic, TopicDetails}; use crate::topics::create_topic::CreateTopic; use crate::topics::update_topic::UpdateTopic; -use crate::utils::byte_size::IggyByteSize; use crate::utils::expiry::IggyExpiry; +use crate::utils::topic_size::MaxTopicSize; use async_trait::async_trait; #[async_trait] @@ -43,7 +43,7 @@ impl TopicClient for HttpClient { replication_factor: Option, topic_id: Option, message_expiry: IggyExpiry, - max_topic_size: Option, + max_topic_size: MaxTopicSize, ) -> Result<(), IggyError> { self.post( &get_path(&stream_id.as_cow_str()), @@ -54,7 +54,7 @@ impl TopicClient for HttpClient { compression_algorithm, replication_factor, topic_id, - message_expiry: message_expiry.into(), + message_expiry, max_topic_size, }, ) @@ -70,7 +70,7 @@ impl TopicClient for HttpClient { compression_algorithm: CompressionAlgorithm, replication_factor: Option, message_expiry: IggyExpiry, - max_topic_size: Option, + max_topic_size: MaxTopicSize, ) -> Result<(), IggyError> { self.put( &get_details_path(&stream_id.as_cow_str(), &topic_id.as_cow_str()), @@ -80,7 +80,7 @@ impl TopicClient for HttpClient { name: name.to_string(), compression_algorithm, replication_factor, - message_expiry: message_expiry.into(), + message_expiry, max_topic_size, }, ) diff --git a/sdk/src/http/users.rs b/sdk/src/http/users.rs index 033b208ae..2bec95525 100644 --- a/sdk/src/http/users.rs +++ b/sdk/src/http/users.rs @@ -115,18 +115,19 @@ impl UserClient for HttpClient { &LoginUser { username: username.to_string(), password: password.to_string(), + version: Some("0.5.0".to_string()), + context: Some("".to_string()), }, ) .await?; let identity_info = response.json().await?; - self.set_tokens_from_identity(&identity_info).await?; + self.set_token_from_identity(&identity_info).await?; Ok(identity_info) } async fn logout_user(&self) -> Result<(), IggyError> { self.delete(&format!("{PATH}/logout")).await?; self.set_access_token(None).await; - self.set_refresh_token(None).await; Ok(()) } } diff --git a/sdk/src/identifier.rs b/sdk/src/identifier.rs index ec4c00650..61343e9aa 100644 --- a/sdk/src/identifier.rs +++ b/sdk/src/identifier.rs @@ -7,6 +7,7 @@ use serde_with::base64::Base64; use serde_with::serde_as; use std::borrow::Cow; use std::fmt::Display; +use std::hash::{Hash, Hasher}; use std::str::FromStr; /// `Identifier` represents the unique identifier of the resources such as stream, topic, partition, user etc. @@ -15,7 +16,7 @@ use std::str::FromStr; /// - `length`: the length of the identifier payload. /// - `value`: the binary value of the identifier payload. #[serde_as] -#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, Eq)] pub struct Identifier { /// The kind of the identifier. pub kind: IdKind, @@ -28,7 +29,7 @@ pub struct Identifier { } /// `IdKind` represents the kind of the identifier. -#[derive(Debug, Serialize, Deserialize, PartialEq, Default, Copy, Clone)] +#[derive(Debug, Serialize, Deserialize, PartialEq, Default, Copy, Clone, Eq)] #[serde(rename_all = "snake_case")] pub enum IdKind { /// The identifier is numeric. @@ -288,6 +289,19 @@ impl Display for IdKind { } } +impl Hash for Identifier { + fn hash(&self, state: &mut H) { + match self.kind { + IdKind::Numeric => { + self.get_u32_value().unwrap().hash(state); + } + IdKind::String => { + self.get_cow_str_value().unwrap().hash(state); + } + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/sdk/src/messages/poll_messages.rs b/sdk/src/messages/poll_messages.rs index f030e2c5f..1c2246289 100644 --- a/sdk/src/messages/poll_messages.rs +++ b/sdk/src/messages/poll_messages.rs @@ -3,6 +3,7 @@ use crate::command::CommandPayload; use crate::consumer::{Consumer, ConsumerKind}; use crate::error::IggyError; use crate::identifier::Identifier; +use crate::utils::timestamp::IggyTimestamp; use crate::validatable::Validatable; use bytes::{BufMut, Bytes, BytesMut}; use serde::{Deserialize, Serialize}; @@ -142,10 +143,10 @@ impl PollingStrategy { } /// Poll messages from the specified timestamp. - pub fn timestamp(value: u64) -> Self { + pub fn timestamp(value: IggyTimestamp) -> Self { Self { kind: PollingKind::Timestamp, - value, + value: value.into(), } } diff --git a/sdk/src/models/identity_info.rs b/sdk/src/models/identity_info.rs index 7d0d46955..5efd5b542 100644 --- a/sdk/src/models/identity_info.rs +++ b/sdk/src/models/identity_info.rs @@ -4,28 +4,16 @@ use serde::{Deserialize, Serialize}; /// `IdentityInfo` represents the information about an identity. /// It consists of the following fields: /// - `user_id`: the unique identifier (numeric) of the user. -/// - `tokens`: the optional tokens, used only by HTTP transport. +/// - `access_token`: the optional access token, used only by HTTP transport. #[derive(Debug, Serialize, Deserialize)] pub struct IdentityInfo { /// The unique identifier (numeric) of the user. pub user_id: UserId, /// The optional tokens, used only by HTTP transport. - pub tokens: Option, + pub access_token: Option, } -/// `IdentityTokens` represents the information about the tokens, currently used only by HTTP transport. -/// It consists of the following fields: -/// - `access_token`: the access token used for the authentication. -/// - `refresh_token`: the refresh token used to refresh the access token. -#[derive(Debug, Serialize, Deserialize)] -pub struct IdentityTokens { - /// The access token used for the authentication. - pub access_token: TokenInfo, - /// The refresh token used to refresh the access token. - pub refresh_token: TokenInfo, -} - -/// `TokenInfo` represents the details of the particular token. +/// `TokenInfo` represents the details of the access token. /// It consists of the following fields: /// - `token`: the value of token. /// - `expiry`: the expiry of token. diff --git a/sdk/src/models/messages.rs b/sdk/src/models/messages.rs index abd1ce082..2f361562f 100644 --- a/sdk/src/models/messages.rs +++ b/sdk/src/models/messages.rs @@ -2,6 +2,7 @@ use crate::bytes_serializable::BytesSerializable; use crate::error::IggyError; use crate::models::header; use crate::models::header::{HeaderKey, HeaderValue}; +use crate::utils::timestamp::IggyTimestamp; use bytes::{BufMut, Bytes, BytesMut}; use serde::{Deserialize, Serialize}; use serde_with::base64::Base64; @@ -45,7 +46,7 @@ pub struct PolledMessage { /// The state of the message. pub state: MessageState, /// The timestamp of the message. - pub timestamp: u64, + pub timestamp: IggyTimestamp, /// The identifier of the message. pub id: u128, /// The checksum of the message, can be used to verify the integrity of the message. @@ -125,7 +126,7 @@ impl PolledMessage { /// Creates a new message from the `Message` struct being part of `SendMessages` command. /// Creates a new message without a specified offset. pub fn empty( - timestamp: u64, + timestamp: IggyTimestamp, state: MessageState, id: u128, payload: Bytes, @@ -139,7 +140,7 @@ impl PolledMessage { pub fn create( offset: u64, state: MessageState, - timestamp: u64, + timestamp: IggyTimestamp, id: u128, payload: Bytes, checksum: u32, @@ -168,7 +169,7 @@ impl PolledMessage { pub fn extend(&self, bytes: &mut BytesMut) { bytes.put_u64_le(self.offset); bytes.put_u8(self.state.as_code()); - bytes.put_u64_le(self.timestamp); + bytes.put_u64_le(self.timestamp.into()); bytes.put_u128_le(self.id); bytes.put_u32_le(self.checksum); if let Some(headers) = &self.headers { diff --git a/sdk/src/models/partition.rs b/sdk/src/models/partition.rs index a593820c6..61580018e 100644 --- a/sdk/src/models/partition.rs +++ b/sdk/src/models/partition.rs @@ -1,4 +1,5 @@ use crate::utils::byte_size::IggyByteSize; +use crate::utils::timestamp::IggyTimestamp; use serde::{Deserialize, Serialize}; /// `Partition` represents the information about a partition. @@ -14,7 +15,7 @@ pub struct Partition { /// Unique identifier of the partition. pub id: u32, /// The timestamp of the partition creation. - pub created_at: u64, + pub created_at: IggyTimestamp, /// The number of segments in the partition. pub segments_count: u32, /// The current offset of the partition. diff --git a/sdk/src/models/personal_access_token.rs b/sdk/src/models/personal_access_token.rs index 2ef1cc2ec..93aa157e7 100644 --- a/sdk/src/models/personal_access_token.rs +++ b/sdk/src/models/personal_access_token.rs @@ -1,3 +1,4 @@ +use crate::utils::timestamp::IggyTimestamp; use serde::{Deserialize, Serialize}; /// `RawPersonalAccessToken` represents the raw personal access token - the secured token which is returned only once during the creation. @@ -18,5 +19,5 @@ pub struct PersonalAccessTokenInfo { /// The unique name of the token. pub name: String, /// The optional expiry of the token. - pub expiry: Option, + pub expiry_at: Option, } diff --git a/sdk/src/models/topic.rs b/sdk/src/models/topic.rs index 763716ab7..8aea63453 100644 --- a/sdk/src/models/topic.rs +++ b/sdk/src/models/topic.rs @@ -1,5 +1,9 @@ use crate::compression::compression_algorithm::CompressionAlgorithm; -use crate::{models::partition::Partition, utils::byte_size::IggyByteSize}; +use crate::models::partition::Partition; +use crate::utils::byte_size::IggyByteSize; +use crate::utils::expiry::IggyExpiry; +use crate::utils::timestamp::IggyTimestamp; +use crate::utils::topic_size::MaxTopicSize; use serde::{Deserialize, Serialize}; /// `Topic` represents the medium level of logical separation of data as it's a part of the stream. @@ -18,18 +22,18 @@ pub struct Topic { /// The unique identifier (numeric) of the topic. pub id: u32, /// The timestamp when the topic was created. - pub created_at: u64, + pub created_at: IggyTimestamp, /// The unique name of the topic. pub name: String, /// The total size of the topic in bytes. pub size: IggyByteSize, - /// The optional expiry of the messages in the topic in seconds. - pub message_expiry: Option, + /// The expiry of the messages in the topic. + pub message_expiry: IggyExpiry, /// Compression algorithm for the topic. pub compression_algorithm: CompressionAlgorithm, /// The optional maximum size of the topic. /// Can't be lower than segment size in the config. - pub max_topic_size: Option, + pub max_topic_size: MaxTopicSize, /// Replication factor for the topic. pub replication_factor: u8, /// The total number of messages in the topic. @@ -55,18 +59,18 @@ pub struct TopicDetails { /// The unique identifier (numeric) of the topic. pub id: u32, /// The timestamp when the topic was created. - pub created_at: u64, + pub created_at: IggyTimestamp, /// The unique name of the topic. pub name: String, /// The total size of the topic. pub size: IggyByteSize, - /// The optional expiry of the messages in the topic. - pub message_expiry: Option, + /// The expiry of the messages in the topic. + pub message_expiry: IggyExpiry, /// Compression algorithm for the topic. pub compression_algorithm: CompressionAlgorithm, /// The optional maximum size of the topic. /// Can't be lower than segment size in the config. - pub max_topic_size: Option, + pub max_topic_size: MaxTopicSize, /// Replication factor for the topic. pub replication_factor: u8, /// The total number of messages in the topic. diff --git a/sdk/src/models/user_info.rs b/sdk/src/models/user_info.rs index c71c95565..be555e6f8 100644 --- a/sdk/src/models/user_info.rs +++ b/sdk/src/models/user_info.rs @@ -1,5 +1,6 @@ use crate::models::permissions::Permissions; use crate::models::user_status::UserStatus; +use crate::utils::timestamp::IggyTimestamp; use serde::{Deserialize, Serialize}; use std::sync::atomic::AtomicU32; @@ -20,7 +21,7 @@ pub struct UserInfo { /// The unique identifier (numeric) of the user. pub id: UserId, /// The timestamp when the user was created. - pub created_at: u64, + pub created_at: IggyTimestamp, /// The status of the user. pub status: UserStatus, /// The username of the user. @@ -39,7 +40,7 @@ pub struct UserInfoDetails { /// The unique identifier (numeric) of the user. pub id: UserId, /// The timestamp when the user was created. - pub created_at: u64, + pub created_at: IggyTimestamp, /// The status of the user. pub status: UserStatus, /// The username of the user. diff --git a/sdk/src/personal_access_tokens/create_personal_access_token.rs b/sdk/src/personal_access_tokens/create_personal_access_token.rs index 88eb6de65..7755f8106 100644 --- a/sdk/src/personal_access_tokens/create_personal_access_token.rs +++ b/sdk/src/personal_access_tokens/create_personal_access_token.rs @@ -2,6 +2,7 @@ use crate::bytes_serializable::BytesSerializable; use crate::command::CommandPayload; use crate::error::IggyError; use crate::users::defaults::*; +use crate::utils::expiry::IggyExpiry; use crate::utils::text; use crate::validatable::Validatable; use bytes::{BufMut, Bytes, BytesMut}; @@ -18,7 +19,7 @@ pub struct CreatePersonalAccessToken { /// Unique name of the token, must be between 3 and 30 characters long. pub name: String, /// Expiry in seconds (optional), if provided, must be between 1 and 4294967295. Otherwise, the token will never expire. - pub expiry: Option, + pub expiry: IggyExpiry, } impl CommandPayload for CreatePersonalAccessToken {} @@ -27,7 +28,7 @@ impl Default for CreatePersonalAccessToken { fn default() -> Self { CreatePersonalAccessToken { name: "token".to_string(), - expiry: None, + expiry: IggyExpiry::NeverExpire, } } } @@ -55,12 +56,12 @@ impl BytesSerializable for CreatePersonalAccessToken { #[allow(clippy::cast_possible_truncation)] bytes.put_u8(self.name.len() as u8); bytes.put_slice(self.name.as_bytes()); - bytes.put_u32_le(self.expiry.unwrap_or(0)); + bytes.put_u64_le(self.expiry.into()); bytes.freeze() } fn from_bytes(bytes: Bytes) -> Result { - if bytes.len() < 8 { + if bytes.len() < 12 { return Err(IggyError::InvalidCommand); } @@ -71,11 +72,8 @@ impl BytesSerializable for CreatePersonalAccessToken { } let position = 1 + name_length as usize; - let expiry = u32::from_le_bytes(bytes[position..position + 4].try_into()?); - let expiry = match expiry { - 0 => None, - _ => Some(expiry), - }; + let expiry = u64::from_le_bytes(bytes[position..position + 8].try_into()?); + let expiry: IggyExpiry = expiry.into(); let command = CreatePersonalAccessToken { name, expiry }; command.validate()?; @@ -85,7 +83,7 @@ impl BytesSerializable for CreatePersonalAccessToken { impl Display for CreatePersonalAccessToken { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{}|{}", self.name, self.expiry.unwrap_or(0)) + write!(f, "{}|{}", self.name, self.expiry) } } @@ -97,22 +95,18 @@ mod tests { fn should_be_serialized_as_bytes() { let command = CreatePersonalAccessToken { name: "test".to_string(), - expiry: Some(100), + expiry: IggyExpiry::NeverExpire, }; let bytes = command.as_bytes(); let name_length = bytes[0]; let name = from_utf8(&bytes[1..1 + name_length as usize]).unwrap(); - let expiry = u32::from_le_bytes( - bytes[1 + name_length as usize..5 + name_length as usize] + let expiry = u64::from_le_bytes( + bytes[1 + name_length as usize..9 + name_length as usize] .try_into() .unwrap(), ); - let expiry = match expiry { - 0 => None, - _ => Some(expiry), - }; - + let expiry: IggyExpiry = expiry.into(); assert!(!bytes.is_empty()); assert_eq!(name, command.name); assert_eq!(expiry, command.expiry); @@ -121,18 +115,18 @@ mod tests { #[test] fn should_be_deserialized_from_bytes() { let name = "test"; - let expiry = 100; + let expiry = IggyExpiry::NeverExpire; let mut bytes = BytesMut::new(); #[allow(clippy::cast_possible_truncation)] bytes.put_u8(name.len() as u8); bytes.put_slice(name.as_bytes()); - bytes.put_u32_le(expiry); + bytes.put_u64_le(expiry.into()); let command = CreatePersonalAccessToken::from_bytes(bytes.freeze()); assert!(command.is_ok()); let command = command.unwrap(); assert_eq!(command.name, name); - assert_eq!(command.expiry, Some(expiry)); + assert_eq!(command.expiry, expiry); } } diff --git a/sdk/src/topics/create_topic.rs b/sdk/src/topics/create_topic.rs index 7ebc6ae6b..1777b388e 100644 --- a/sdk/src/topics/create_topic.rs +++ b/sdk/src/topics/create_topic.rs @@ -4,8 +4,9 @@ use crate::compression::compression_algorithm::CompressionAlgorithm; use crate::error::IggyError; use crate::identifier::Identifier; use crate::topics::{MAX_NAME_LENGTH, MAX_PARTITIONS_COUNT}; -use crate::utils::byte_size::IggyByteSize; +use crate::utils::expiry::IggyExpiry; use crate::utils::text; +use crate::utils::topic_size::MaxTopicSize; use crate::validatable::Validatable; use bytes::{BufMut, Bytes, BytesMut}; use serde::{Deserialize, Serialize}; @@ -33,10 +34,10 @@ pub struct CreateTopic { pub partitions_count: u32, /// Compression algorithm for the topic. pub compression_algorithm: CompressionAlgorithm, - /// Optional message expiry in seconds, if `None` then messages will never expire. - pub message_expiry: Option, - /// The optional maximum size of the topic. - pub max_topic_size: Option, + /// Optional message expiry. + pub message_expiry: IggyExpiry, + /// The maximum size of the topic. + pub max_topic_size: MaxTopicSize, /// Replication factor for the topic. pub replication_factor: Option, /// Unique topic name, max length is 255 characters. @@ -52,8 +53,8 @@ impl Default for CreateTopic { topic_id: Some(1), partitions_count: 1, compression_algorithm: CompressionAlgorithm::None, - message_expiry: None, - max_topic_size: None, + message_expiry: IggyExpiry::NeverExpire, + max_topic_size: MaxTopicSize::ServerDefault, replication_factor: None, name: "topic".to_string(), } @@ -98,14 +99,8 @@ impl BytesSerializable for CreateTopic { bytes.put_u32_le(self.topic_id.unwrap_or(0)); bytes.put_u32_le(self.partitions_count); bytes.put_u8(self.compression_algorithm.as_code()); - match self.message_expiry { - Some(message_expiry) => bytes.put_u32_le(message_expiry), - None => bytes.put_u32_le(0), - } - match self.max_topic_size { - Some(max_topic_size) => bytes.put_u64_le(max_topic_size.as_bytes_u64()), - None => bytes.put_u64_le(0), - } + bytes.put_u64_le(self.message_expiry.into()); + bytes.put_u64_le(self.max_topic_size.into()); match self.replication_factor { Some(replication_factor) => bytes.put_u8(replication_factor), None => bytes.put_u8(0), @@ -127,23 +122,17 @@ impl BytesSerializable for CreateTopic { let topic_id = if topic_id == 0 { None } else { Some(topic_id) }; let partitions_count = u32::from_le_bytes(bytes[position + 4..position + 8].try_into()?); let compression_algorithm = CompressionAlgorithm::from_code(bytes[position + 8])?; - let message_expiry = - match u32::from_le_bytes(bytes[position + 9..position + 13].try_into()?) { - 0 => None, - size => Some(size), - }; - let max_topic_size = - match u64::from_le_bytes(bytes[position + 13..position + 21].try_into()?) { - 0 => None, - size => Some(IggyByteSize::from(size)), - }; - let replication_factor = match bytes[position + 21] { + let message_expiry = u64::from_le_bytes(bytes[position + 9..position + 17].try_into()?); + let message_expiry: IggyExpiry = message_expiry.into(); + let max_topic_size = u64::from_le_bytes(bytes[position + 17..position + 25].try_into()?); + let max_topic_size: MaxTopicSize = max_topic_size.into(); + let replication_factor = match bytes[position + 25] { 0 => None, factor => Some(factor), }; - let name_length = bytes[position + 22]; + let name_length = bytes[position + 26]; let name = - from_utf8(&bytes[position + 23..(position + 23 + name_length as usize)])?.to_string(); + from_utf8(&bytes[position + 27..(position + 27 + name_length as usize)])?.to_string(); if name.len() != name_length as usize { return Err(IggyError::InvalidCommand); } @@ -164,18 +153,14 @@ impl BytesSerializable for CreateTopic { impl Display for CreateTopic { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let max_topic_size = match self.max_topic_size { - Some(max_topic_size) => max_topic_size.to_string(), - None => "unlimited".to_string(), - }; write!( f, "{}|{}|{}|{}|{}|{}|{}", self.stream_id, self.topic_id.unwrap_or(0), self.partitions_count, - self.message_expiry.unwrap_or(0), - max_topic_size, + self.message_expiry, + self.max_topic_size, self.replication_factor.unwrap_or(0), self.name ) @@ -193,9 +178,9 @@ mod tests { stream_id: Identifier::numeric(1).unwrap(), topic_id: Some(2), partitions_count: 3, - message_expiry: Some(10), + message_expiry: IggyExpiry::NeverExpire, compression_algorithm: CompressionAlgorithm::None, - max_topic_size: Some(IggyByteSize::from(100)), + max_topic_size: MaxTopicSize::ServerDefault, replication_factor: Some(1), name: "test".to_string(), }; @@ -208,18 +193,14 @@ mod tests { u32::from_le_bytes(bytes[position + 4..position + 8].try_into().unwrap()); let compression_algorithm = CompressionAlgorithm::from_code(bytes[position + 8]).unwrap(); let message_expiry = - match u32::from_le_bytes(bytes[position + 9..position + 13].try_into().unwrap()) { - 0 => None, - secs => Some(secs), - }; + u64::from_le_bytes(bytes[position + 9..position + 17].try_into().unwrap()); + let message_expiry: IggyExpiry = message_expiry.into(); let max_topic_size = - match u64::from_le_bytes(bytes[position + 13..position + 21].try_into().unwrap()) { - 0 => None, - size => Some(IggyByteSize::from(size)), - }; - let replication_factor = bytes[position + 21]; - let name_length = bytes[position + 22]; - let name = from_utf8(&bytes[position + 23..(position + 23 + name_length as usize)]) + u64::from_le_bytes(bytes[position + 17..position + 25].try_into().unwrap()); + let max_topic_size: MaxTopicSize = max_topic_size.into(); + let replication_factor = bytes[position + 25]; + let name_length = bytes[position + 26]; + let name = from_utf8(&bytes[position + 27..(position + 27 + name_length as usize)]) .unwrap() .to_string(); @@ -242,8 +223,8 @@ mod tests { let partitions_count = 3u32; let compression_algorithm = CompressionAlgorithm::None; let name = "test".to_string(); - let message_expiry = 10; - let max_topic_size = IggyByteSize::from(100); + let message_expiry = IggyExpiry::NeverExpire; + let max_topic_size = MaxTopicSize::ServerDefault; let replication_factor = 1; let stream_id_bytes = stream_id.as_bytes(); let mut bytes = BytesMut::with_capacity(14 + stream_id_bytes.len() + name.len()); @@ -251,7 +232,7 @@ mod tests { bytes.put_u32_le(topic_id); bytes.put_u32_le(partitions_count); bytes.put_u8(compression_algorithm.as_code()); - bytes.put_u32_le(message_expiry); + bytes.put_u64_le(message_expiry.into()); bytes.put_u64_le(max_topic_size.as_bytes_u64()); bytes.put_u8(replication_factor); #[allow(clippy::cast_possible_truncation)] @@ -267,8 +248,8 @@ mod tests { assert_eq!(command.name, name); assert_eq!(command.partitions_count, partitions_count); assert_eq!(command.compression_algorithm, compression_algorithm); - assert_eq!(command.message_expiry, Some(message_expiry)); - assert_eq!(command.max_topic_size, Some(max_topic_size)); + assert_eq!(command.message_expiry, message_expiry); + assert_eq!(command.max_topic_size, max_topic_size); assert_eq!(command.replication_factor.unwrap(), replication_factor); assert_eq!(command.partitions_count, partitions_count); } diff --git a/sdk/src/topics/update_topic.rs b/sdk/src/topics/update_topic.rs index c53420c39..3cdab7419 100644 --- a/sdk/src/topics/update_topic.rs +++ b/sdk/src/topics/update_topic.rs @@ -4,8 +4,9 @@ use crate::compression::compression_algorithm::CompressionAlgorithm; use crate::error::IggyError; use crate::identifier::Identifier; use crate::topics::MAX_NAME_LENGTH; -use crate::utils::byte_size::IggyByteSize; +use crate::utils::expiry::IggyExpiry; use crate::utils::text; +use crate::utils::topic_size::MaxTopicSize; use crate::validatable::Validatable; use bytes::{BufMut, Bytes, BytesMut}; use serde::{Deserialize, Serialize}; @@ -32,10 +33,10 @@ pub struct UpdateTopic { /// Compression algorithm for the topic. pub compression_algorithm: CompressionAlgorithm, /// Optional message expiry in seconds, if `None` then messages will never expire. - pub message_expiry: Option, + pub message_expiry: IggyExpiry, /// Optional max topic size, if `None` then topic size is unlimited. /// Can't be lower than segment size in the config. - pub max_topic_size: Option, + pub max_topic_size: MaxTopicSize, /// Replication factor for the topic. pub replication_factor: Option, /// Unique topic name, max length is 255 characters. @@ -50,8 +51,8 @@ impl Default for UpdateTopic { stream_id: Identifier::default(), topic_id: Identifier::default(), compression_algorithm: Default::default(), - message_expiry: None, - max_topic_size: None, + message_expiry: IggyExpiry::NeverExpire, + max_topic_size: MaxTopicSize::ServerDefault, replication_factor: None, name: "topic".to_string(), } @@ -88,14 +89,8 @@ impl BytesSerializable for UpdateTopic { bytes.put_slice(&stream_id_bytes.clone()); bytes.put_slice(&topic_id_bytes.clone()); bytes.put_u8(self.compression_algorithm.as_code()); - match self.message_expiry { - Some(message_expiry) => bytes.put_u32_le(message_expiry), - None => bytes.put_u32_le(0), - } - match self.max_topic_size { - Some(max_topic_size) => bytes.put_u64_le(max_topic_size.as_bytes_u64()), - None => bytes.put_u64_le(0), - } + bytes.put_u64_le(self.message_expiry.into()); + bytes.put_u64_le(self.max_topic_size.into()); match self.replication_factor { Some(replication_factor) => bytes.put_u8(replication_factor), None => bytes.put_u8(0), @@ -107,7 +102,7 @@ impl BytesSerializable for UpdateTopic { } fn from_bytes(bytes: Bytes) -> Result { - if bytes.len() < 12 { + if bytes.len() < 16 { return Err(IggyError::InvalidCommand); } let mut position = 0; @@ -117,23 +112,17 @@ impl BytesSerializable for UpdateTopic { position += topic_id.get_size_bytes() as usize; let compression_algorithm = CompressionAlgorithm::from_code(bytes[position])?; position += 1; - let message_expiry = u32::from_le_bytes(bytes[position..position + 4].try_into()?); - let message_expiry = match message_expiry { - 0 => None, - _ => Some(message_expiry), - }; - let max_topic_size = - match u64::from_le_bytes(bytes[position + 4..position + 12].try_into()?) { - 0 => None, - size => Some(IggyByteSize::from(size)), - }; - let replication_factor = match bytes[position + 12] { + let message_expiry = u64::from_le_bytes(bytes[position..position + 8].try_into()?); + let message_expiry: IggyExpiry = message_expiry.into(); + let max_topic_size = u64::from_le_bytes(bytes[position + 8..position + 16].try_into()?); + let max_topic_size: MaxTopicSize = max_topic_size.into(); + let replication_factor = match bytes[position + 16] { 0 => None, factor => Some(factor), }; - let name_length = bytes[position + 13]; + let name_length = bytes[position + 17]; let name = - from_utf8(&bytes[position + 14..(position + 14 + name_length as usize)])?.to_string(); + from_utf8(&bytes[position + 18..(position + 18 + name_length as usize)])?.to_string(); if name.len() != name_length as usize { return Err(IggyError::InvalidCommand); } @@ -153,17 +142,13 @@ impl BytesSerializable for UpdateTopic { impl Display for UpdateTopic { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let max_topic_size = match self.max_topic_size { - Some(max_topic_size) => max_topic_size.to_string(), - None => String::from("unlimited"), - }; write!( f, "{}|{}|{}|{}|{}|{}", self.stream_id, self.topic_id, - self.message_expiry.unwrap_or(0), - max_topic_size, + self.message_expiry, + self.max_topic_size, self.replication_factor.unwrap_or(0), self.name, ) @@ -173,6 +158,7 @@ impl Display for UpdateTopic { #[cfg(test)] mod tests { use super::*; + use crate::utils::byte_size::IggyByteSize; use bytes::BufMut; #[test] @@ -181,8 +167,8 @@ mod tests { stream_id: Identifier::numeric(1).unwrap(), topic_id: Identifier::numeric(2).unwrap(), compression_algorithm: CompressionAlgorithm::None, - message_expiry: Some(10), - max_topic_size: Some(IggyByteSize::from(100)), + message_expiry: IggyExpiry::NeverExpire, + max_topic_size: MaxTopicSize::ServerDefault, replication_factor: Some(1), name: "test".to_string(), }; @@ -195,19 +181,14 @@ mod tests { position += topic_id.get_size_bytes() as usize; let compression_algorithm = CompressionAlgorithm::from_code(bytes[position]).unwrap(); position += 1; - let message_expiry = u32::from_le_bytes(bytes[position..position + 4].try_into().unwrap()); - let message_expiry = match message_expiry { - 0 => None, - _ => Some(message_expiry), - }; + let message_expiry = u64::from_le_bytes(bytes[position..position + 8].try_into().unwrap()); + let message_expiry: IggyExpiry = message_expiry.into(); let max_topic_size = - match u64::from_le_bytes(bytes[position + 4..position + 12].try_into().unwrap()) { - 0 => None, - size => Some(IggyByteSize::from(size)), - }; - let replication_factor = bytes[position + 12]; - let name_length = bytes[position + 13]; - let name = from_utf8(&bytes[position + 14..position + 14 + name_length as usize]) + u64::from_le_bytes(bytes[position + 8..position + 16].try_into().unwrap()); + let max_topic_size: MaxTopicSize = max_topic_size.into(); + let replication_factor = bytes[position + 16]; + let name_length = bytes[position + 17]; + let name = from_utf8(&bytes[position + 18..position + 18 + name_length as usize]) .unwrap() .to_string(); @@ -228,8 +209,8 @@ mod tests { let topic_id = Identifier::numeric(2).unwrap(); let compression_algorithm = CompressionAlgorithm::None; let name = "test".to_string(); - let message_expiry = 10; - let max_topic_size = IggyByteSize::from(100); + let message_expiry = IggyExpiry::NeverExpire; + let max_topic_size = MaxTopicSize::Custom(IggyByteSize::from(100)); let replication_factor = 1; let stream_id_bytes = stream_id.as_bytes(); @@ -239,7 +220,7 @@ mod tests { bytes.put_slice(&stream_id_bytes); bytes.put_slice(&topic_id_bytes); bytes.put_u8(compression_algorithm.as_code()); - bytes.put_u32_le(message_expiry); + bytes.put_u64_le(message_expiry.into()); bytes.put_u64_le(max_topic_size.as_bytes_u64()); bytes.put_u8(replication_factor); @@ -253,7 +234,7 @@ mod tests { let command = command.unwrap(); assert_eq!(command.stream_id, stream_id); assert_eq!(command.topic_id, topic_id); - assert_eq!(command.message_expiry, Some(message_expiry)); + assert_eq!(command.message_expiry, message_expiry); assert_eq!(command.stream_id, stream_id); assert_eq!(command.topic_id, topic_id); } diff --git a/sdk/src/users/login_user.rs b/sdk/src/users/login_user.rs index 23e161f01..b87543314 100644 --- a/sdk/src/users/login_user.rs +++ b/sdk/src/users/login_user.rs @@ -19,6 +19,10 @@ pub struct LoginUser { pub username: String, /// Password, must be between 3 and 100 characters long. pub password: String, + // Version metadata added by SDK. + pub version: Option, + // Context metadata added by SDK. + pub context: Option, } impl CommandPayload for LoginUser {} @@ -28,6 +32,8 @@ impl Default for LoginUser { LoginUser { username: "user".to_string(), password: "secret".to_string(), + version: None, + context: None, } } } @@ -65,6 +71,24 @@ impl BytesSerializable for LoginUser { #[allow(clippy::cast_possible_truncation)] bytes.put_u8(self.password.len() as u8); bytes.put_slice(self.password.as_bytes()); + match &self.version { + Some(version) => { + bytes.put_u32_le(version.len() as u32); + bytes.put_slice(version.as_bytes()); + } + None => { + bytes.put_u32_le(0); + } + } + match &self.context { + Some(context) => { + bytes.put_u32_le(context.len() as u32); + bytes.put_slice(context.as_bytes()); + } + None => { + bytes.put_u32_le(0); + } + } bytes.freeze() } @@ -89,7 +113,35 @@ impl BytesSerializable for LoginUser { return Err(IggyError::InvalidCommand); } - let command = LoginUser { username, password }; + let position = 2 + username_length as usize + password_length as usize; + let version_length = u32::from_le_bytes(bytes[position..position + 4].try_into()?); + let version = match version_length { + 0 => None, + _ => { + let version = + from_utf8(&bytes[position + 4..position + 4 + version_length as usize])? + .to_string(); + Some(version) + } + }; + let position = position + 4 + version_length as usize; + let context_length = u32::from_le_bytes(bytes[position..position + 4].try_into()?); + let context = match context_length { + 0 => None, + _ => { + let context = + from_utf8(&bytes[position + 4..position + 4 + context_length as usize])? + .to_string(); + Some(context) + } + }; + + let command = LoginUser { + username, + password, + version, + context, + }; command.validate()?; Ok(command) } @@ -110,6 +162,8 @@ mod tests { let command = LoginUser { username: "user".to_string(), password: "secret".to_string(), + version: Some("1.0.0".to_string()), + context: Some("test".to_string()), }; let bytes = command.as_bytes(); @@ -121,16 +175,34 @@ mod tests { ..2 + username_length as usize + password_length as usize], ) .unwrap(); + let position = 2 + username_length as usize + password_length as usize; + let version_length = u32::from_le_bytes(bytes[position..position + 4].try_into().unwrap()); + let version = Some( + from_utf8(&bytes[position + 4..position + 4 + version_length as usize]) + .unwrap() + .to_string(), + ); + let position = position + 4 + version_length as usize; + let context_length = u32::from_le_bytes(bytes[position..position + 4].try_into().unwrap()); + let context = Some( + from_utf8(&bytes[position + 4..position + 4 + context_length as usize]) + .unwrap() + .to_string(), + ); assert!(!bytes.is_empty()); assert_eq!(username, command.username); assert_eq!(password, command.password); + assert_eq!(version, command.version); + assert_eq!(context, command.context); } #[test] fn should_be_deserialized_from_bytes() { let username = "user"; let password = "secret"; + let version = "1.0.0".to_string(); + let context = "test".to_string(); let mut bytes = BytesMut::new(); #[allow(clippy::cast_possible_truncation)] bytes.put_u8(username.len() as u8); @@ -138,11 +210,17 @@ mod tests { #[allow(clippy::cast_possible_truncation)] bytes.put_u8(password.len() as u8); bytes.put_slice(password.as_bytes()); + bytes.put_u32_le(version.len() as u32); + bytes.put_slice(version.as_bytes()); + bytes.put_u32_le(context.len() as u32); + bytes.put_slice(context.as_bytes()); let command = LoginUser::from_bytes(bytes.freeze()); assert!(command.is_ok()); let command = command.unwrap(); assert_eq!(command.username, username); assert_eq!(command.password, password); + assert_eq!(command.version, Some(version)); + assert_eq!(command.context, Some(context)); } } diff --git a/sdk/src/utils/duration.rs b/sdk/src/utils/duration.rs index dea35ebcc..9fd73e6cc 100644 --- a/sdk/src/utils/duration.rs +++ b/sdk/src/utils/duration.rs @@ -73,7 +73,7 @@ impl From> for IggyDuration { fn from(byte_size: Option) -> Self { match byte_size { Some(value) => IggyDuration { - duration: Duration::from_secs(value), + duration: Duration::from_micros(value), }, None => IggyDuration { duration: Duration::new(0, 0), @@ -85,7 +85,7 @@ impl From> for IggyDuration { impl From for IggyDuration { fn from(value: u64) -> Self { IggyDuration { - duration: Duration::from_secs(value), + duration: Duration::from_micros(value), } } } @@ -139,7 +139,7 @@ impl Serialize for IggyDuration { where S: Serializer, { - serializer.serialize_u32(self.as_secs()) + serializer.serialize_u64(self.as_micros()) } } @@ -165,7 +165,7 @@ impl<'de> Visitor<'de> for IggyDurationVisitor { where E: serde::de::Error, { - Ok(IggyDuration::new(Duration::from_secs(value))) + Ok(IggyDuration::new(Duration::from_micros(value))) } } diff --git a/sdk/src/utils/expiry.rs b/sdk/src/utils/expiry.rs index f262d8c4f..b5babd459 100644 --- a/sdk/src/utils/expiry.rs +++ b/sdk/src/utils/expiry.rs @@ -1,6 +1,9 @@ use crate::utils::duration::IggyDuration; use humantime::format_duration; use humantime::Duration as HumanDuration; +use serde::de::Visitor; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use std::fmt; use std::fmt::Display; use std::iter::Sum; use std::ops::Add; @@ -8,11 +11,12 @@ use std::str::FromStr; use std::time::Duration; /// Helper enum for various time-based expiry related functionalities -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Copy, Default, Clone, Eq, PartialEq)] pub enum IggyExpiry { /// Set expiry time to given value ExpireDuration(IggyDuration), /// Never expire + #[default] NeverExpire, } @@ -22,10 +26,10 @@ impl IggyExpiry { } } -impl From<&IggyExpiry> for Option { +impl From<&IggyExpiry> for Option { fn from(value: &IggyExpiry) -> Self { match value { - IggyExpiry::ExpireDuration(value) => Some(value.as_secs()), + IggyExpiry::ExpireDuration(value) => Some(value.as_micros()), IggyExpiry::NeverExpire => None, } } @@ -87,15 +91,24 @@ impl FromStr for IggyExpiry { } } -impl From for Option { +impl From for Option { fn from(val: IggyExpiry) -> Self { match val { - IggyExpiry::ExpireDuration(value) => Some(value.as_secs()), + IggyExpiry::ExpireDuration(value) => Some(value.as_micros()), IggyExpiry::NeverExpire => None, } } } +impl From for u64 { + fn from(val: IggyExpiry) -> Self { + match val { + IggyExpiry::ExpireDuration(value) => value.as_micros(), + IggyExpiry::NeverExpire => 0, + } + } +} + impl From> for IggyExpiry { fn from(values: Vec) -> Self { let mut result = IggyExpiry::NeverExpire; @@ -106,15 +119,63 @@ impl From> for IggyExpiry { } } -impl From> for IggyExpiry { - fn from(value: Option) -> Self { +impl From for IggyExpiry { + fn from(value: u64) -> Self { + match value { + 0 => IggyExpiry::NeverExpire, + value => IggyExpiry::ExpireDuration(IggyDuration::from(value)), + } + } +} + +impl From> for IggyExpiry { + fn from(value: Option) -> Self { match value { - Some(value) => IggyExpiry::ExpireDuration(IggyDuration::from(value as u64)), + Some(value) => IggyExpiry::ExpireDuration(IggyDuration::from(value)), None => IggyExpiry::NeverExpire, } } } +impl Serialize for IggyExpiry { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let expiry = match self { + IggyExpiry::ExpireDuration(value) => value.as_micros(), + IggyExpiry::NeverExpire => 0, + }; + serializer.serialize_u64(expiry) + } +} + +impl<'de> Deserialize<'de> for IggyExpiry { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_u64(IggyExpiryVisitor) + } +} + +struct IggyExpiryVisitor; + +impl<'de> Visitor<'de> for IggyExpiryVisitor { + type Value = IggyExpiry; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a microsecond expiry as a u64") + } + + fn visit_u64(self, value: u64) -> Result + where + E: de::Error, + { + Ok(IggyExpiry::from(value)) + } +} + #[cfg(test)] mod tests { use super::*; @@ -127,19 +188,25 @@ mod tests { ); assert_eq!( IggyExpiry::from_str("15days").unwrap(), - IggyExpiry::ExpireDuration(IggyDuration::from(60 * 60 * 24 * 15)) + IggyExpiry::ExpireDuration(IggyDuration::from(1000000 * 60 * 60 * 24 * 15)) ); assert_eq!( IggyExpiry::from_str("2min").unwrap(), - IggyExpiry::ExpireDuration(IggyDuration::from(60 * 2)) + IggyExpiry::ExpireDuration(IggyDuration::from(1000000 * 60 * 2)) + ); + assert_eq!( + IggyExpiry::from_str("1ms").unwrap(), + IggyExpiry::ExpireDuration(IggyDuration::from(1000)) ); assert_eq!( IggyExpiry::from_str("1s").unwrap(), - IggyExpiry::ExpireDuration(IggyDuration::from(1)) + IggyExpiry::ExpireDuration(IggyDuration::from(1000000)) ); assert_eq!( IggyExpiry::from_str("15days 2min 2s").unwrap(), - IggyExpiry::ExpireDuration(IggyDuration::from(60 * 60 * 24 * 15 + 60 * 2 + 2)) + IggyExpiry::ExpireDuration(IggyDuration::from( + 1000000 * (60 * 60 * 24 * 15 + 60 * 2 + 2) + )) ); } @@ -198,7 +265,7 @@ mod tests { fn should_check_display_expiry() { assert_eq!(IggyExpiry::NeverExpire.to_string(), "none"); assert_eq!( - IggyExpiry::ExpireDuration(IggyDuration::from(333333)).to_string(), + IggyExpiry::ExpireDuration(IggyDuration::from(333333000000)).to_string(), "3days 20h 35m 33s" ); } @@ -206,7 +273,7 @@ mod tests { #[test] fn should_calculate_none_from_never_expiry() { let expiry = IggyExpiry::NeverExpire; - let result: Option = From::from(&expiry); + let result: Option = From::from(&expiry); assert_eq!(result, None); } @@ -214,8 +281,8 @@ mod tests { fn should_calculate_some_seconds_from_message_expire() { let duration = IggyDuration::new(Duration::new(42, 0)); let expiry = IggyExpiry::ExpireDuration(duration); - let result: Option = From::from(&expiry); - assert_eq!(result, Some(42)); + let result: Option = From::from(&expiry); + assert_eq!(result, Some(42000000)); } #[test] diff --git a/sdk/src/utils/mod.rs b/sdk/src/utils/mod.rs index c2ec18e66..74ea2ff3e 100644 --- a/sdk/src/utils/mod.rs +++ b/sdk/src/utils/mod.rs @@ -6,3 +6,4 @@ pub mod expiry; pub mod personal_access_token_expiry; pub mod text; pub mod timestamp; +pub mod topic_size; diff --git a/sdk/src/utils/timestamp.rs b/sdk/src/utils/timestamp.rs index cc92272ac..df3043473 100644 --- a/sdk/src/utils/timestamp.rs +++ b/sdk/src/utils/timestamp.rs @@ -22,7 +22,7 @@ use std::{ /// assert_eq!(timestamp.to_utc_string("%Y-%m-%d %H:%M:%S"), "2023-09-17 16:34:06"); /// assert_eq!(timestamp.to_micros(), 1694968446131680); /// ``` -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct IggyTimestamp(SystemTime); pub const UTC_TIME_FORMAT: &str = "%Y-%m-%d %H:%M:%S"; @@ -32,6 +32,10 @@ impl IggyTimestamp { IggyTimestamp::default() } + pub fn zero() -> Self { + IggyTimestamp(UNIX_EPOCH) + } + pub fn to_secs(&self) -> u64 { self.0.duration_since(UNIX_EPOCH).unwrap().as_secs() } diff --git a/sdk/src/utils/topic_size.rs b/sdk/src/utils/topic_size.rs new file mode 100644 index 000000000..04ee7209d --- /dev/null +++ b/sdk/src/utils/topic_size.rs @@ -0,0 +1,158 @@ +use core::fmt; +use serde::{ + de::{self, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, +}; +use std::str::FromStr; + +use super::byte_size::IggyByteSize; + +const DEFAULT_SIZE_BYTES: u64 = 10 * 1000 * 1000 * 1000; + +#[derive(Debug, Default, Clone, Copy, PartialEq)] +pub enum MaxTopicSize { + #[default] + ServerDefault, + Custom(IggyByteSize), +} + +impl MaxTopicSize { + pub fn new(value: Option) -> Self { + match value { + Some(value) => match value.as_bytes_u64() { + 0 => MaxTopicSize::ServerDefault, + _ => MaxTopicSize::Custom(value), + }, + None => MaxTopicSize::ServerDefault, + } + } + + pub fn as_bytes_u64(&self) -> u64 { + match self { + MaxTopicSize::ServerDefault => 0, + MaxTopicSize::Custom(iggy_byte_size) => iggy_byte_size.as_bytes_u64(), + } + } + + pub fn get_server_default() -> Self { + MaxTopicSize::Custom(IggyByteSize::from(DEFAULT_SIZE_BYTES)) + } +} + +impl From for MaxTopicSize { + fn from(value: IggyByteSize) -> Self { + match value.as_bytes_u64() { + 0 => MaxTopicSize::ServerDefault, + _ => MaxTopicSize::Custom(value), + } + } +} + +impl From for MaxTopicSize { + fn from(value: u64) -> Self { + match value { + 0 => MaxTopicSize::ServerDefault, + _ => MaxTopicSize::Custom(IggyByteSize::from(value)), + } + } +} + +impl From for u64 { + fn from(value: MaxTopicSize) -> u64 { + match value { + MaxTopicSize::ServerDefault => 0, + MaxTopicSize::Custom(iggy_byte_size) => iggy_byte_size.as_bytes_u64(), + } + } +} + +impl From> for MaxTopicSize { + fn from(value: Option) -> Self { + match value { + Some(value) => match value.as_bytes_u64() { + 0 => MaxTopicSize::ServerDefault, + _ => MaxTopicSize::Custom(value), + }, + None => MaxTopicSize::ServerDefault, + } + } +} + +impl FromStr for MaxTopicSize { + type Err = String; + + fn from_str(s: &str) -> Result { + let result = match s { + "0" | "server_default" | "none" | "None" => MaxTopicSize::ServerDefault, + value => { + let size = value.parse::().map_err(|e| format!("{e}"))?; + match size.as_bytes_u64() { + 0 => MaxTopicSize::ServerDefault, + _ => MaxTopicSize::Custom(size), + } + } + }; + + Ok(result) + } +} + +impl Serialize for MaxTopicSize { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let value = match *self { + MaxTopicSize::ServerDefault => 0, + MaxTopicSize::Custom(ref iggy_byte_size) => iggy_byte_size.as_bytes_u64(), + }; + serializer.serialize_u64(value) + } +} + +struct MaxTopicSizeVisitor; + +impl<'de> Visitor<'de> for MaxTopicSizeVisitor { + type Value = MaxTopicSize; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a u64 bytes representing a MaxTopicSize") + } + + fn visit_u64(self, value: u64) -> Result + where + E: de::Error, + { + let result = match value { + 0 => MaxTopicSize::ServerDefault, + _ => MaxTopicSize::Custom(IggyByteSize::from(value)), + }; + Ok(result) + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + MaxTopicSize::from_str(value) + .map_err(|e| de::Error::custom(format!("Invalid MaxTopicSize: {}", e))) + } +} + +impl<'de> Deserialize<'de> for MaxTopicSize { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_u64(MaxTopicSizeVisitor) + } +} + +impl fmt::Display for MaxTopicSize { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + MaxTopicSize::Custom(value) => write!(f, "{}", value), + MaxTopicSize::ServerDefault => write!(f, "{}", IggyByteSize::from(DEFAULT_SIZE_BYTES)), + } + } +} diff --git a/server/Cargo.toml b/server/Cargo.toml index 8383a138b..701ea2394 100644 --- a/server/Cargo.toml +++ b/server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "server" -version = "0.2.24" +version = "0.3.0" edition = "2021" build = "src/build.rs" @@ -17,6 +17,7 @@ atone = "0.3.7" axum = "0.7.5" axum-server = { version = "0.6.0", features = ["tls-rustls"] } bcrypt = "0.15.1" +bincode = "1.3.3" blake3 = "1.5.1" bytes = "1.6.0" clap = { version = "4.5.4", features = ["derive"] } @@ -29,6 +30,7 @@ futures = "0.3.30" iggy = { path = "../sdk" } jsonwebtoken = "9.3.0" keepcalm = "0.3.5" +log = "0.4.20" moka = { version = "0.12.5", features = ["future"] } prometheus-client = "0.22.2" quinn = { version = "0.11.1" } diff --git a/server/server.http b/server/server.http index 2ae9728b0..368422dd1 100644 --- a/server/server.http +++ b/server/server.http @@ -14,7 +14,6 @@ @user1_username = user1 @user1_password = secret @access_token = secret -@refresh_token = secret @root_id = 1 @user1_id = 2 @pat_name = dev_token @@ -56,7 +55,7 @@ POST {{url}}/users/refresh-token Content-Type: application/json { - "refresh_token": "{{refresh_token}}" + "token": "{{access_token}}" } ### diff --git a/server/src/binary/handlers/consumer_groups/create_consumer_group_handler.rs b/server/src/binary/handlers/consumer_groups/create_consumer_group_handler.rs index 55d5f5c21..14832cff3 100644 --- a/server/src/binary/handlers/consumer_groups/create_consumer_group_handler.rs +++ b/server/src/binary/handlers/consumer_groups/create_consumer_group_handler.rs @@ -3,6 +3,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::CREATE_CONSUMER_GROUP_CODE; use iggy::consumer_groups::create_consumer_group::CreateConsumerGroup; use iggy::error::IggyError; use tracing::debug; @@ -14,18 +16,34 @@ pub async fn handle( system: &SharedSystem, ) -> Result<(), IggyError> { debug!("session: {session}, command: {command}"); - let mut system = system.write(); - let consumer_group = system - .create_consumer_group( - session, - &command.stream_id, - &command.topic_id, - command.group_id, - &command.name, - ) - .await?; - let consumer_group = consumer_group.read().await; - let consumer_group = mapper::map_consumer_group(&consumer_group).await; - sender.send_ok_response(&consumer_group).await?; + let consumer_group_bytes; + { + let mut system = system.write(); + let consumer_group = system + .create_consumer_group( + session, + &command.stream_id, + &command.topic_id, + command.group_id, + &command.name, + ) + .await?; + let consumer_group = consumer_group.read().await; + consumer_group_bytes = mapper::map_consumer_group(&consumer_group).await; + } + { + let system = system.read(); + system + .state + .apply( + CREATE_CONSUMER_GROUP_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; + } + + sender.send_ok_response(&consumer_group_bytes).await?; Ok(()) } diff --git a/server/src/binary/handlers/consumer_groups/delete_consumer_group_handler.rs b/server/src/binary/handlers/consumer_groups/delete_consumer_group_handler.rs index 627aafb3d..752a28083 100644 --- a/server/src/binary/handlers/consumer_groups/delete_consumer_group_handler.rs +++ b/server/src/binary/handlers/consumer_groups/delete_consumer_group_handler.rs @@ -2,6 +2,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::DELETE_CONSUMER_GROUP_CODE; use iggy::consumer_groups::delete_consumer_group::DeleteConsumerGroup; use iggy::error::IggyError; use tracing::debug; @@ -22,6 +24,15 @@ pub async fn handle( &command.group_id, ) .await?; + system + .state + .apply( + DELETE_CONSUMER_GROUP_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/consumer_offsets/store_consumer_offset_handler.rs b/server/src/binary/handlers/consumer_offsets/store_consumer_offset_handler.rs index 7bb881c66..9bbf22a32 100644 --- a/server/src/binary/handlers/consumer_offsets/store_consumer_offset_handler.rs +++ b/server/src/binary/handlers/consumer_offsets/store_consumer_offset_handler.rs @@ -17,6 +17,7 @@ pub async fn handle( let system = system.read(); let consumer = PollingConsumer::from_consumer(&command.consumer, session.client_id, command.partition_id); + system .store_consumer_offset( session, diff --git a/server/src/binary/handlers/partitions/create_partitions_handler.rs b/server/src/binary/handlers/partitions/create_partitions_handler.rs index f7890529f..331e6e143 100644 --- a/server/src/binary/handlers/partitions/create_partitions_handler.rs +++ b/server/src/binary/handlers/partitions/create_partitions_handler.rs @@ -2,6 +2,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::CREATE_PARTITIONS_CODE; use iggy::error::IggyError; use iggy::partitions::create_partitions::CreatePartitions; use tracing::debug; @@ -22,6 +24,15 @@ pub async fn handle( command.partitions_count, ) .await?; + system + .state + .apply( + CREATE_PARTITIONS_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/partitions/delete_partitions_handler.rs b/server/src/binary/handlers/partitions/delete_partitions_handler.rs index dfa53b3c2..0f927844b 100644 --- a/server/src/binary/handlers/partitions/delete_partitions_handler.rs +++ b/server/src/binary/handlers/partitions/delete_partitions_handler.rs @@ -2,6 +2,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::DELETE_PARTITIONS_CODE; use iggy::error::IggyError; use iggy::partitions::delete_partitions::DeletePartitions; use tracing::debug; @@ -22,6 +24,15 @@ pub async fn handle( command.partitions_count, ) .await?; + system + .state + .apply( + DELETE_PARTITIONS_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/personal_access_tokens/create_personal_access_token_handler.rs b/server/src/binary/handlers/personal_access_tokens/create_personal_access_token_handler.rs index e22b692d8..76880efaf 100644 --- a/server/src/binary/handlers/personal_access_tokens/create_personal_access_token_handler.rs +++ b/server/src/binary/handlers/personal_access_tokens/create_personal_access_token_handler.rs @@ -1,8 +1,12 @@ use crate::binary::mapper; use crate::binary::sender::Sender; +use crate::state::models::CreatePersonalAccessTokenWithHash; +use crate::streaming::personal_access_tokens::personal_access_token::PersonalAccessToken; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::CREATE_PERSONAL_ACCESS_TOKEN_CODE; use iggy::error::IggyError; use iggy::personal_access_tokens::create_personal_access_token::CreatePersonalAccessToken; use tracing::debug; @@ -14,11 +18,29 @@ pub async fn handle( system: &SharedSystem, ) -> Result<(), IggyError> { debug!("session: {session}, command: {command}"); - let system = system.read(); + let mut system = system.write(); let token = system .create_personal_access_token(session, &command.name, command.expiry) .await?; let bytes = mapper::map_raw_pat(&token); + let token_hash = PersonalAccessToken::hash_token(&token); + + system + .state + .apply( + CREATE_PERSONAL_ACCESS_TOKEN_CODE, + session.get_user_id(), + &CreatePersonalAccessTokenWithHash { + command: CreatePersonalAccessToken { + name: command.name.to_owned(), + expiry: command.expiry, + }, + hash: token_hash, + } + .as_bytes(), + None, + ) + .await?; sender.send_ok_response(&bytes).await?; Ok(()) } diff --git a/server/src/binary/handlers/personal_access_tokens/delete_personal_access_token_handler.rs b/server/src/binary/handlers/personal_access_tokens/delete_personal_access_token_handler.rs index a5aa70836..9b190eaf7 100644 --- a/server/src/binary/handlers/personal_access_tokens/delete_personal_access_token_handler.rs +++ b/server/src/binary/handlers/personal_access_tokens/delete_personal_access_token_handler.rs @@ -2,6 +2,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::DELETE_PERSONAL_ACCESS_TOKEN_CODE; use iggy::error::IggyError; use iggy::personal_access_tokens::delete_personal_access_token::DeletePersonalAccessToken; use tracing::debug; @@ -13,10 +15,19 @@ pub async fn handle( system: &SharedSystem, ) -> Result<(), IggyError> { debug!("session: {session}, command: {command}"); - let system = system.read(); + let mut system = system.write(); system .delete_personal_access_token(session, &command.name) .await?; + system + .state + .apply( + DELETE_PERSONAL_ACCESS_TOKEN_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/streams/create_stream_handler.rs b/server/src/binary/handlers/streams/create_stream_handler.rs index 4db848386..d9764bb33 100644 --- a/server/src/binary/handlers/streams/create_stream_handler.rs +++ b/server/src/binary/handlers/streams/create_stream_handler.rs @@ -2,6 +2,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::CREATE_STREAM_CODE; use iggy::error::IggyError; use iggy::streams::create_stream::CreateStream; use tracing::debug; @@ -17,6 +19,15 @@ pub async fn handle( system .create_stream(session, command.stream_id, &command.name) .await?; + system + .state + .apply( + CREATE_STREAM_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/streams/delete_stream_handler.rs b/server/src/binary/handlers/streams/delete_stream_handler.rs index b3bfa1432..fdb7ee83c 100644 --- a/server/src/binary/handlers/streams/delete_stream_handler.rs +++ b/server/src/binary/handlers/streams/delete_stream_handler.rs @@ -2,6 +2,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::DELETE_STREAM_CODE; use iggy::error::IggyError; use iggy::streams::delete_stream::DeleteStream; use tracing::debug; @@ -15,6 +17,15 @@ pub async fn handle( debug!("session: {session}, command: {command}"); let mut system = system.write(); system.delete_stream(session, &command.stream_id).await?; + system + .state + .apply( + DELETE_STREAM_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/streams/purge_stream_handler.rs b/server/src/binary/handlers/streams/purge_stream_handler.rs index 037608e9b..6a46ed298 100644 --- a/server/src/binary/handlers/streams/purge_stream_handler.rs +++ b/server/src/binary/handlers/streams/purge_stream_handler.rs @@ -2,6 +2,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::PURGE_STREAM_CODE; use iggy::error::IggyError; use iggy::streams::purge_stream::PurgeStream; use tracing::debug; @@ -15,6 +17,15 @@ pub async fn handle( debug!("session: {session}, command: {command}"); let system = system.read(); system.purge_stream(session, &command.stream_id).await?; + system + .state + .apply( + PURGE_STREAM_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/streams/update_stream_handler.rs b/server/src/binary/handlers/streams/update_stream_handler.rs index 189fccbec..4a7b7c309 100644 --- a/server/src/binary/handlers/streams/update_stream_handler.rs +++ b/server/src/binary/handlers/streams/update_stream_handler.rs @@ -2,6 +2,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::UPDATE_STREAM_CODE; use iggy::error::IggyError; use iggy::streams::update_stream::UpdateStream; use tracing::debug; @@ -17,6 +19,15 @@ pub async fn handle( system .update_stream(session, &command.stream_id, &command.name) .await?; + system + .state + .apply( + UPDATE_STREAM_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/topics/create_topic_handler.rs b/server/src/binary/handlers/topics/create_topic_handler.rs index 04860a0f6..62d4430d7 100644 --- a/server/src/binary/handlers/topics/create_topic_handler.rs +++ b/server/src/binary/handlers/topics/create_topic_handler.rs @@ -2,6 +2,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::CREATE_TOPIC_CODE; use iggy::error::IggyError; use iggy::topics::create_topic::CreateTopic; use tracing::debug; @@ -27,6 +29,15 @@ pub async fn handle( command.replication_factor, ) .await?; + system + .state + .apply( + CREATE_TOPIC_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/topics/delete_topic_handler.rs b/server/src/binary/handlers/topics/delete_topic_handler.rs index f1b0685c3..9b174b532 100644 --- a/server/src/binary/handlers/topics/delete_topic_handler.rs +++ b/server/src/binary/handlers/topics/delete_topic_handler.rs @@ -2,6 +2,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::DELETE_TOPIC_CODE; use iggy::error::IggyError; use iggy::topics::delete_topic::DeleteTopic; use tracing::debug; @@ -17,6 +19,15 @@ pub async fn handle( system .delete_topic(session, &command.stream_id, &command.topic_id) .await?; + system + .state + .apply( + DELETE_TOPIC_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/topics/purge_topic_handler.rs b/server/src/binary/handlers/topics/purge_topic_handler.rs index 5bc8819de..fd793b2a4 100644 --- a/server/src/binary/handlers/topics/purge_topic_handler.rs +++ b/server/src/binary/handlers/topics/purge_topic_handler.rs @@ -2,6 +2,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::PURGE_TOPIC_CODE; use iggy::error::IggyError; use iggy::topics::purge_topic::PurgeTopic; use tracing::debug; @@ -17,6 +19,15 @@ pub async fn handle( system .purge_topic(session, &command.stream_id, &command.topic_id) .await?; + system + .state + .apply( + PURGE_TOPIC_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/topics/update_topic_handler.rs b/server/src/binary/handlers/topics/update_topic_handler.rs index 1d216225a..1fccca522 100644 --- a/server/src/binary/handlers/topics/update_topic_handler.rs +++ b/server/src/binary/handlers/topics/update_topic_handler.rs @@ -2,6 +2,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::UPDATE_TOPIC_CODE; use iggy::error::IggyError; use iggy::topics::update_topic::UpdateTopic; use tracing::debug; @@ -26,6 +28,15 @@ pub async fn handle( command.replication_factor, ) .await?; + system + .state + .apply( + UPDATE_TOPIC_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/users/change_password_handler.rs b/server/src/binary/handlers/users/change_password_handler.rs index da25da211..57f0f7be6 100644 --- a/server/src/binary/handlers/users/change_password_handler.rs +++ b/server/src/binary/handlers/users/change_password_handler.rs @@ -1,7 +1,10 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; +use crate::streaming::utils::crypto; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::CHANGE_PASSWORD_CODE; use iggy::error::IggyError; use iggy::users::change_password::ChangePassword; use tracing::debug; @@ -13,7 +16,7 @@ pub async fn handle( system: &SharedSystem, ) -> Result<(), IggyError> { debug!("session: {session}, command: {command}"); - let system = system.read(); + let mut system = system.write(); system .change_password( session, @@ -22,6 +25,22 @@ pub async fn handle( &command.new_password, ) .await?; + + // For the security of the system, we hash the password before storing it in metadata. + system + .state + .apply( + CHANGE_PASSWORD_CODE, + session.get_user_id(), + &ChangePassword { + user_id: command.user_id.to_owned(), + current_password: "".into(), + new_password: crypto::hash_password(&command.new_password), + } + .as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/users/create_user_handler.rs b/server/src/binary/handlers/users/create_user_handler.rs index 66bb1958f..1cd199abc 100644 --- a/server/src/binary/handlers/users/create_user_handler.rs +++ b/server/src/binary/handlers/users/create_user_handler.rs @@ -1,7 +1,10 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; +use crate::streaming::utils::crypto; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::CREATE_USER_CODE; use iggy::error::IggyError; use iggy::users::create_user::CreateUser; use tracing::debug; @@ -23,6 +26,23 @@ pub async fn handle( command.permissions.clone(), ) .await?; + + // For the security of the system, we hash the password before storing it in metadata. + system + .state + .apply( + CREATE_USER_CODE, + session.get_user_id(), + &CreateUser { + username: command.username.to_owned(), + password: crypto::hash_password(&command.password), + status: command.status, + permissions: command.permissions.clone(), + } + .as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/users/delete_user_handler.rs b/server/src/binary/handlers/users/delete_user_handler.rs index 195e5d4a4..694dcf9e4 100644 --- a/server/src/binary/handlers/users/delete_user_handler.rs +++ b/server/src/binary/handlers/users/delete_user_handler.rs @@ -2,6 +2,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::DELETE_USER_CODE; use iggy::error::IggyError; use iggy::users::delete_user::DeleteUser; use tracing::debug; @@ -15,6 +17,15 @@ pub async fn handle( debug!("session: {session}, command: {command}"); let mut system = system.write(); system.delete_user(session, &command.user_id).await?; + system + .state + .apply( + DELETE_USER_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/users/get_user_handler.rs b/server/src/binary/handlers/users/get_user_handler.rs index 0da19477e..f58cceb7d 100644 --- a/server/src/binary/handlers/users/get_user_handler.rs +++ b/server/src/binary/handlers/users/get_user_handler.rs @@ -14,8 +14,8 @@ pub async fn handle( ) -> Result<(), IggyError> { debug!("session: {session}, command: {command}"); let system = system.read(); - let user = system.find_user(session, &command.user_id).await?; - let bytes = mapper::map_user(&user); + let user = system.find_user(session, &command.user_id)?; + let bytes = mapper::map_user(user); sender.send_ok_response(&bytes).await?; Ok(()) } diff --git a/server/src/binary/handlers/users/update_permissions_handler.rs b/server/src/binary/handlers/users/update_permissions_handler.rs index 7b646d202..940ca099a 100644 --- a/server/src/binary/handlers/users/update_permissions_handler.rs +++ b/server/src/binary/handlers/users/update_permissions_handler.rs @@ -2,6 +2,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::UPDATE_PERMISSIONS_CODE; use iggy::error::IggyError; use iggy::users::update_permissions::UpdatePermissions; use tracing::debug; @@ -17,6 +19,15 @@ pub async fn handle( system .update_permissions(session, &command.user_id, command.permissions.clone()) .await?; + system + .state + .apply( + UPDATE_PERMISSIONS_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/handlers/users/update_user_handler.rs b/server/src/binary/handlers/users/update_user_handler.rs index 76db8358d..3203432ca 100644 --- a/server/src/binary/handlers/users/update_user_handler.rs +++ b/server/src/binary/handlers/users/update_user_handler.rs @@ -2,6 +2,8 @@ use crate::binary::sender::Sender; use crate::streaming::session::Session; use crate::streaming::systems::system::SharedSystem; use anyhow::Result; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::UPDATE_USER_CODE; use iggy::error::IggyError; use iggy::users::update_user::UpdateUser; use tracing::debug; @@ -13,7 +15,7 @@ pub async fn handle( system: &SharedSystem, ) -> Result<(), IggyError> { debug!("session: {session}, command: {command}"); - let system = system.read(); + let mut system = system.write(); system .update_user( session, @@ -22,6 +24,15 @@ pub async fn handle( command.status, ) .await?; + system + .state + .apply( + UPDATE_USER_CODE, + session.get_user_id(), + &command.as_bytes(), + None, + ) + .await?; sender.send_empty_ok_response().await?; Ok(()) } diff --git a/server/src/binary/mapper.rs b/server/src/binary/mapper.rs index b668f219d..84cac9250 100644 --- a/server/src/binary/mapper.rs +++ b/server/src/binary/mapper.rs @@ -88,7 +88,7 @@ pub fn map_user(user: &User) -> Bytes { bytes.freeze() } -pub fn map_users(users: &[User]) -> Bytes { +pub fn map_users(users: &[&User]) -> Bytes { let mut bytes = BytesMut::new(); for user in users { extend_user(user, &mut bytes); @@ -109,7 +109,7 @@ pub fn map_raw_pat(token: &str) -> Bytes { bytes.freeze() } -pub fn map_personal_access_tokens(personal_access_tokens: &[PersonalAccessToken]) -> Bytes { +pub fn map_personal_access_tokens(personal_access_tokens: &[&PersonalAccessToken]) -> Bytes { let mut bytes = BytesMut::new(); for personal_access_token in personal_access_tokens { extend_pat(personal_access_token, &mut bytes); @@ -208,17 +208,11 @@ async fn extend_stream(stream: &Stream, bytes: &mut BytesMut) { async fn extend_topic(topic: &Topic, bytes: &mut BytesMut) { bytes.put_u32_le(topic.topic_id); - bytes.put_u64_le(topic.created_at); + bytes.put_u64_le(topic.created_at.into()); bytes.put_u32_le(topic.get_partitions().len() as u32); - match topic.message_expiry { - Some(message_expiry) => bytes.put_u32_le(message_expiry), - None => bytes.put_u32_le(0), - }; + bytes.put_u64_le(topic.message_expiry.into()); bytes.put_u8(topic.compression_algorithm.as_code()); - match topic.max_topic_size { - Some(max_topic_size) => bytes.put_u64_le(max_topic_size.as_bytes_u64()), - None => bytes.put_u64_le(0), - }; + bytes.put_u64_le(topic.max_topic_size.into()); bytes.put_u8(topic.replication_factor); bytes.put_u64_le(topic.get_size().as_bytes_u64()); bytes.put_u64_le(topic.get_messages_count()); @@ -228,7 +222,7 @@ async fn extend_topic(topic: &Topic, bytes: &mut BytesMut) { fn extend_partition(partition: &Partition, bytes: &mut BytesMut) { bytes.put_u32_le(partition.partition_id); - bytes.put_u64_le(partition.created_at); + bytes.put_u64_le(partition.created_at.into()); bytes.put_u32_le(partition.get_segments().len() as u32); bytes.put_u64_le(partition.current_offset); bytes.put_u64_le(partition.get_size_bytes()); @@ -259,7 +253,7 @@ fn extend_client(client: &Client, bytes: &mut BytesMut) { fn extend_user(user: &User, bytes: &mut BytesMut) { bytes.put_u32_le(user.id); - bytes.put_u64_le(user.created_at); + bytes.put_u64_le(user.created_at.into()); bytes.put_u8(user.status.as_code()); bytes.put_u8(user.username.len() as u8); bytes.put_slice(user.username.as_bytes()); @@ -268,5 +262,12 @@ fn extend_user(user: &User, bytes: &mut BytesMut) { fn extend_pat(personal_access_token: &PersonalAccessToken, bytes: &mut BytesMut) { bytes.put_u8(personal_access_token.name.len() as u8); bytes.put_slice(personal_access_token.name.as_bytes()); - bytes.put_u64_le(personal_access_token.expiry.unwrap_or(0)); + match &personal_access_token.expiry_at { + Some(expiry_at) => { + bytes.put_u64_le(expiry_at.to_micros()); + } + None => { + bytes.put_u64_le(0); + } + } } diff --git a/server/src/channels/commands/clean_messages.rs b/server/src/channels/commands/clean_messages.rs index 1892d9a87..c3eb45e4a 100644 --- a/server/src/channels/commands/clean_messages.rs +++ b/server/src/channels/commands/clean_messages.rs @@ -64,7 +64,7 @@ impl MessagesCleaner { #[async_trait] impl ServerCommand for CleanMessagesExecutor { async fn execute(&mut self, system: &SharedSystem, _command: CleanMessagesCommand) { - let now = IggyTimestamp::now().to_micros(); + let now = IggyTimestamp::now(); let system = system.read(); let streams = system.get_streams(); for stream in streams { @@ -119,7 +119,7 @@ impl ServerCommand for CleanMessagesExecutor { async fn delete_expired_segments( topic: &Topic, - now: u64, + now: IggyTimestamp, ) -> Result, IggyError> { let expired_segments = topic .get_expired_segments_start_offsets_per_partition(now) diff --git a/server/src/channels/commands/clean_personal_access_tokens.rs b/server/src/channels/commands/clean_personal_access_tokens.rs index 0e992c9eb..cb18f3cd1 100644 --- a/server/src/channels/commands/clean_personal_access_tokens.rs +++ b/server/src/channels/commands/clean_personal_access_tokens.rs @@ -65,56 +65,31 @@ impl PersonalAccessTokenCleaner { #[async_trait] impl ServerCommand for CleanPersonalAccessTokensExecutor { async fn execute(&mut self, system: &SharedSystem, _command: CleanPersonalAccessTokensCommand) { - let system = system.read(); - let tokens = system.storage.personal_access_token.load_all().await; - if tokens.is_err() { - error!("Failed to load personal access tokens: {:?}", tokens); - return; - } - - let tokens = tokens.unwrap(); - if tokens.is_empty() { - debug!("No personal access tokens to delete."); - return; - } - - let now = IggyTimestamp::now().to_micros(); - let expired_tokens = tokens - .into_iter() - .filter(|token| token.is_expired(now)) - .collect::>(); - - if expired_tokens.is_empty() { - debug!("No expired personal access tokens to delete."); - return; - } - - let expired_tokens_count = expired_tokens.len(); + // TODO: System write lock, investigate if it's necessary. + let mut system = system.write(); + let now = IggyTimestamp::now(); let mut deleted_tokens_count = 0; - debug!("Found {expired_tokens_count} expired personal access tokens."); - for token in expired_tokens { - let result = system - .storage - .personal_access_token - .delete_for_user(token.user_id, &token.name) - .await; - if result.is_err() { - error!( - "Failed to delete personal access token: {} for user with ID: {}. Error: {:?}", - token.name, - token.user_id, - result.err().unwrap() + for (_, user) in system.users.iter_mut() { + let expired_tokens = user + .personal_access_tokens + .values() + .filter(|token| token.is_expired(now)) + .map(|token| token.token.clone()) + .collect::>(); + + for token in expired_tokens { + debug!( + "Personal access token: {token} for user with ID: {} is expired.", + user.id + ); + deleted_tokens_count += 1; + user.personal_access_tokens.remove(&token); + debug!( + "Deleted personal access token: {token} for user with ID: {}.", + user.id ); - continue; } - - deleted_tokens_count += 1; - debug!( - "Deleted personal access token: {} for user with ID: {}.", - token.name, token.user_id - ); } - info!("Deleted {deleted_tokens_count} expired personal access tokens."); } diff --git a/server/src/compat/binary_schema.rs b/server/src/compat/message_conversion/binary_schema.rs similarity index 100% rename from server/src/compat/binary_schema.rs rename to server/src/compat/message_conversion/binary_schema.rs diff --git a/server/src/compat/chunks_error.rs b/server/src/compat/message_conversion/chunks_error.rs similarity index 100% rename from server/src/compat/chunks_error.rs rename to server/src/compat/message_conversion/chunks_error.rs diff --git a/server/src/compat/conversion_writer.rs b/server/src/compat/message_conversion/conversion_writer.rs similarity index 100% rename from server/src/compat/conversion_writer.rs rename to server/src/compat/message_conversion/conversion_writer.rs diff --git a/server/src/compat/message_converter.rs b/server/src/compat/message_conversion/message_converter.rs similarity index 86% rename from server/src/compat/message_converter.rs rename to server/src/compat/message_conversion/message_converter.rs index 7a195b0a1..630823b1d 100644 --- a/server/src/compat/message_converter.rs +++ b/server/src/compat/message_conversion/message_converter.rs @@ -1,11 +1,12 @@ -use crate::compat::samplers::message_sampler::MessageSampler; -use crate::compat::samplers::retained_batch_sampler::RetainedMessageBatchSampler; -use crate::compat::schema_sampler::BinarySchemaSampler; +use crate::compat::message_conversion::samplers::message_sampler::MessageSampler; +use crate::compat::message_conversion::samplers::retained_batch_sampler::RetainedMessageBatchSampler; +use crate::compat::message_conversion::schema_sampler::BinarySchemaSampler; use crate::streaming::sizeable::Sizeable; use bytes::{BufMut, BytesMut}; use iggy::error::IggyError; use crate::streaming::segments::storage::{INDEX_SIZE, TIME_INDEX_SIZE}; +use iggy::utils::timestamp::IggyTimestamp; use tokio::io::{AsyncWrite, AsyncWriteExt}; pub trait Extendable { @@ -22,7 +23,7 @@ pub trait MessageFormatConverterPersister { ) -> Result<(), IggyError>; async fn persist_time_index( &self, - timestamp: u64, + timestamp: IggyTimestamp, relative_offset: u32, writer: &mut W, ) -> Result<(), IggyError>; @@ -57,13 +58,13 @@ where async fn persist_time_index( &self, - timestamp: u64, + timestamp: IggyTimestamp, relative_offset: u32, writer: &mut W, ) -> Result<(), IggyError> { let mut time_index_bytes = BytesMut::with_capacity(TIME_INDEX_SIZE as usize); time_index_bytes.put_u32_le(relative_offset); - time_index_bytes.put_u64_le(timestamp); + time_index_bytes.put_u64_le(timestamp.into()); writer.write_all(&time_index_bytes).await?; Ok(()) diff --git a/server/src/compat/message_stream.rs b/server/src/compat/message_conversion/message_stream.rs similarity index 100% rename from server/src/compat/message_stream.rs rename to server/src/compat/message_conversion/message_stream.rs diff --git a/server/src/compat/message_conversion/mod.rs b/server/src/compat/message_conversion/mod.rs new file mode 100644 index 000000000..715891011 --- /dev/null +++ b/server/src/compat/message_conversion/mod.rs @@ -0,0 +1,9 @@ +pub(crate) mod binary_schema; +pub(crate) mod chunks_error; +pub(crate) mod conversion_writer; +pub(crate) mod message_converter; +pub(crate) mod message_stream; +pub(crate) mod samplers; +pub(crate) mod schema_sampler; +pub(crate) mod snapshots; +pub(crate) mod streams; diff --git a/server/src/compat/samplers/message_sampler.rs b/server/src/compat/message_conversion/samplers/message_sampler.rs similarity index 87% rename from server/src/compat/samplers/message_sampler.rs rename to server/src/compat/message_conversion/samplers/message_sampler.rs index c7739e215..4e6e98afe 100644 --- a/server/src/compat/samplers/message_sampler.rs +++ b/server/src/compat/message_conversion/samplers/message_sampler.rs @@ -1,6 +1,6 @@ -use crate::compat::binary_schema::BinarySchema; -use crate::compat::schema_sampler::BinarySchemaSampler; -use crate::compat::snapshots::message_snapshot::MessageSnapshot; +use crate::compat::message_conversion::binary_schema::BinarySchema; +use crate::compat::message_conversion::schema_sampler::BinarySchemaSampler; +use crate::compat::message_conversion::snapshots::message_snapshot::MessageSnapshot; use crate::server_error::ServerError; use crate::streaming::utils::file; use async_trait::async_trait; diff --git a/server/src/compat/samplers/mod.rs b/server/src/compat/message_conversion/samplers/mod.rs similarity index 100% rename from server/src/compat/samplers/mod.rs rename to server/src/compat/message_conversion/samplers/mod.rs diff --git a/server/src/compat/samplers/retained_batch_sampler.rs b/server/src/compat/message_conversion/samplers/retained_batch_sampler.rs similarity index 89% rename from server/src/compat/samplers/retained_batch_sampler.rs rename to server/src/compat/message_conversion/samplers/retained_batch_sampler.rs index 2fbfee469..a05fb191c 100644 --- a/server/src/compat/samplers/retained_batch_sampler.rs +++ b/server/src/compat/message_conversion/samplers/retained_batch_sampler.rs @@ -1,6 +1,6 @@ -use crate::compat::binary_schema::BinarySchema; -use crate::compat::schema_sampler::BinarySchemaSampler; -use crate::compat::snapshots::retained_batch_snapshot::RetainedMessageBatchSnapshot; +use crate::compat::message_conversion::binary_schema::BinarySchema; +use crate::compat::message_conversion::schema_sampler::BinarySchemaSampler; +use crate::compat::message_conversion::snapshots::retained_batch_snapshot::RetainedMessageBatchSnapshot; use crate::server_error::ServerError; use crate::streaming::utils::file; use async_trait::async_trait; diff --git a/server/src/compat/schema_sampler.rs b/server/src/compat/message_conversion/schema_sampler.rs similarity index 74% rename from server/src/compat/schema_sampler.rs rename to server/src/compat/message_conversion/schema_sampler.rs index b35dda06e..6c1f86f14 100644 --- a/server/src/compat/schema_sampler.rs +++ b/server/src/compat/message_conversion/schema_sampler.rs @@ -1,4 +1,4 @@ -use crate::compat::binary_schema::BinarySchema; +use crate::compat::message_conversion::binary_schema::BinarySchema; use crate::server_error::ServerError; use async_trait::async_trait; diff --git a/server/src/compat/snapshots/message_snapshot.rs b/server/src/compat/message_conversion/snapshots/message_snapshot.rs similarity index 95% rename from server/src/compat/snapshots/message_snapshot.rs rename to server/src/compat/message_conversion/snapshots/message_snapshot.rs index 806615774..6c46dead6 100644 --- a/server/src/compat/snapshots/message_snapshot.rs +++ b/server/src/compat/message_conversion/snapshots/message_snapshot.rs @@ -1,17 +1,18 @@ -use crate::compat::message_converter::Extendable; +use crate::compat::message_conversion::message_converter::Extendable; use crate::server_error::ServerError; use crate::streaming::sizeable::Sizeable; use bytes::{BufMut, Bytes, BytesMut}; use iggy::bytes_serializable::BytesSerializable; use iggy::models::header::{self, HeaderKey, HeaderValue}; use iggy::models::messages::MessageState; +use iggy::utils::timestamp::IggyTimestamp; use std::collections::HashMap; #[derive(Debug)] pub struct MessageSnapshot { pub offset: u64, pub state: MessageState, - pub timestamp: u64, + pub timestamp: IggyTimestamp, pub id: u128, pub payload: Bytes, pub checksum: u32, @@ -22,7 +23,7 @@ impl MessageSnapshot { pub fn new( offset: u64, state: MessageState, - timestamp: u64, + timestamp: IggyTimestamp, id: u128, payload: Bytes, checksum: u32, @@ -54,7 +55,7 @@ impl Extendable for MessageSnapshot { bytes.put_u32_le(length); bytes.put_u64_le(offset); bytes.put_u8(message_state.as_code()); - bytes.put_u64_le(timestamp); + bytes.put_u64_le(timestamp.into()); bytes.put_u128_le(id); bytes.put_u32_le(checksum); if let Some(headers) = headers { @@ -165,7 +166,7 @@ impl TryFrom for MessageSnapshot { Ok(MessageSnapshot { offset, state, - timestamp, + timestamp: timestamp.into(), id, payload, checksum, diff --git a/server/src/compat/snapshots/mod.rs b/server/src/compat/message_conversion/snapshots/mod.rs similarity index 100% rename from server/src/compat/snapshots/mod.rs rename to server/src/compat/message_conversion/snapshots/mod.rs diff --git a/server/src/compat/snapshots/retained_batch_snapshot.rs b/server/src/compat/message_conversion/snapshots/retained_batch_snapshot.rs similarity index 93% rename from server/src/compat/snapshots/retained_batch_snapshot.rs rename to server/src/compat/message_conversion/snapshots/retained_batch_snapshot.rs index 71f59ee7a..15f7c0029 100644 --- a/server/src/compat/snapshots/retained_batch_snapshot.rs +++ b/server/src/compat/message_conversion/snapshots/retained_batch_snapshot.rs @@ -1,14 +1,15 @@ use super::message_snapshot::MessageSnapshot; -use crate::compat::message_converter::Extendable; +use crate::compat::message_conversion::message_converter::Extendable; use crate::server_error::ServerError; use crate::streaming::sizeable::Sizeable; use bytes::{BufMut, Bytes, BytesMut}; use iggy::error::IggyError; +use iggy::utils::timestamp::IggyTimestamp; pub struct RetainedMessageBatchSnapshot { pub base_offset: u64, pub last_offset_delta: u32, - pub max_timestamp: u64, + pub max_timestamp: IggyTimestamp, pub length: u32, pub bytes: Bytes, } @@ -17,7 +18,7 @@ impl RetainedMessageBatchSnapshot { pub fn new( base_offset: u64, last_offset_delta: u32, - max_timestamp: u64, + max_timestamp: IggyTimestamp, length: u32, bytes: Bytes, ) -> RetainedMessageBatchSnapshot { @@ -71,7 +72,7 @@ impl Extendable for RetainedMessageBatchSnapshot { bytes.put_u64_le(self.base_offset); bytes.put_u32_le(self.length); bytes.put_u32_le(self.last_offset_delta); - bytes.put_u64_le(self.max_timestamp); + bytes.put_u64_le(self.max_timestamp.into()); bytes.put_slice(&self.bytes); } } @@ -132,7 +133,7 @@ impl TryFrom for RetainedMessageBatchSnapshot { Ok(RetainedMessageBatchSnapshot { base_offset, last_offset_delta, - max_timestamp, + max_timestamp: max_timestamp.into(), length, bytes, }) diff --git a/server/src/compat/streams/mod.rs b/server/src/compat/message_conversion/streams/mod.rs similarity index 100% rename from server/src/compat/streams/mod.rs rename to server/src/compat/message_conversion/streams/mod.rs diff --git a/server/src/compat/streams/retained_batch.rs b/server/src/compat/message_conversion/streams/retained_batch.rs similarity index 100% rename from server/src/compat/streams/retained_batch.rs rename to server/src/compat/message_conversion/streams/retained_batch.rs diff --git a/server/src/compat/streams/retained_message.rs b/server/src/compat/message_conversion/streams/retained_message.rs similarity index 92% rename from server/src/compat/streams/retained_message.rs rename to server/src/compat/message_conversion/streams/retained_message.rs index 37213f759..25ee8c2d2 100644 --- a/server/src/compat/streams/retained_message.rs +++ b/server/src/compat/message_conversion/streams/retained_message.rs @@ -1,5 +1,5 @@ -use crate::compat::message_stream::MessageStream; -use crate::compat::snapshots::message_snapshot::MessageSnapshot; +use crate::compat::message_conversion::message_stream::MessageStream; +use crate::compat::message_conversion::snapshots::message_snapshot::MessageSnapshot; use async_stream::try_stream; use bytes::{BufMut, BytesMut}; @@ -74,7 +74,7 @@ impl MessageStream for RetainedMessageStream { self.read_bytes += 4 + payload_len as u64; let message = - MessageSnapshot::new(offset, state, timestamp, id, payload.freeze(), checksum, headers); + MessageSnapshot::new(offset, state, timestamp.into(), id, payload.freeze(), checksum, headers); yield message; } } diff --git a/server/src/compat/mod.rs b/server/src/compat/mod.rs index 715891011..9211d2ce9 100644 --- a/server/src/compat/mod.rs +++ b/server/src/compat/mod.rs @@ -1,9 +1,2 @@ -pub(crate) mod binary_schema; -pub(crate) mod chunks_error; -pub(crate) mod conversion_writer; -pub(crate) mod message_converter; -pub(crate) mod message_stream; -pub(crate) mod samplers; -pub(crate) mod schema_sampler; -pub(crate) mod snapshots; -pub(crate) mod streams; +pub mod message_conversion; +pub mod storage_conversion; diff --git a/server/src/compat/storage_conversion/converter.rs b/server/src/compat/storage_conversion/converter.rs new file mode 100644 index 000000000..04a7d9d73 --- /dev/null +++ b/server/src/compat/storage_conversion/converter.rs @@ -0,0 +1,225 @@ +use crate::state::models::CreatePersonalAccessTokenWithHash; +use crate::state::State; +use crate::streaming::personal_access_tokens::personal_access_token::PersonalAccessToken; +use crate::streaming::storage::SystemStorage; +use crate::streaming::streams::stream::Stream; +use crate::streaming::users::user::User; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::{ + CREATE_CONSUMER_GROUP_CODE, CREATE_PERSONAL_ACCESS_TOKEN_CODE, CREATE_STREAM_CODE, + CREATE_TOPIC_CODE, CREATE_USER_CODE, +}; +use iggy::consumer_groups::create_consumer_group::CreateConsumerGroup; +use iggy::error::IggyError; +use iggy::locking::IggySharedMutFn; +use iggy::personal_access_tokens::create_personal_access_token::CreatePersonalAccessToken; +use iggy::streams::create_stream::CreateStream; +use iggy::topics::create_topic::CreateTopic; +use iggy::users::create_user::CreateUser; +use iggy::utils::expiry::IggyExpiry; +use iggy::utils::timestamp::IggyTimestamp; +use std::path::Path; +use std::sync::Arc; +use tokio::fs::create_dir; +use tracing::{error, info}; + +pub async fn convert( + state: Arc, + storage: Arc, + mut streams: Vec, + mut users: Vec, + personal_access_tokens: Vec, +) -> Result<(), IggyError> { + info!("Converting storage to new format"); + state.init().await?; + streams.sort_by(|a, b| a.stream_id.cmp(&b.stream_id)); + users.sort_by(|a, b| a.id.cmp(&b.id)); + info!("Converting {} users", users.len()); + for user in users { + state + .apply( + CREATE_USER_CODE, + 0, + &CreateUser { + username: user.username, + password: user.password, + status: user.status, + permissions: user.permissions.clone(), + } + .as_bytes(), + None, + ) + .await?; + } + + info!( + "Converting {} personal access tokens", + personal_access_tokens.len() + ); + for personal_access_token in personal_access_tokens { + let now = IggyTimestamp::now(); + let mut expiry = IggyExpiry::NeverExpire; + if let Some(expiry_at) = personal_access_token.expiry_at { + if expiry_at.to_micros() <= now.to_micros() { + continue; + } + expiry = IggyExpiry::ExpireDuration((expiry_at.to_micros() - now.to_micros()).into()); + } + + state + .apply( + CREATE_PERSONAL_ACCESS_TOKEN_CODE, + personal_access_token.user_id, + &CreatePersonalAccessTokenWithHash { + command: CreatePersonalAccessToken { + name: personal_access_token.name, + expiry, + }, + hash: personal_access_token.token, + } + .as_bytes(), + None, + ) + .await?; + } + + info!("Converting {} streams", streams.len()); + for stream in streams { + state + .apply( + CREATE_STREAM_CODE, + 0, + &CreateStream { + stream_id: Some(stream.stream_id), + name: stream.name, + } + .as_bytes(), + None, + ) + .await?; + + info!( + "Converting {} topics for stream with ID: {}", + stream.topics.len(), + stream.stream_id + ); + for topic in stream.topics.into_values() { + state + .apply( + CREATE_TOPIC_CODE, + 0, + &CreateTopic { + stream_id: topic.stream_id.try_into()?, + topic_id: Some(topic.topic_id), + partitions_count: topic.partitions.len() as u32, + compression_algorithm: topic.compression_algorithm, + message_expiry: topic.message_expiry, + max_topic_size: topic.max_topic_size, + replication_factor: if topic.replication_factor > 0 { + Some(topic.replication_factor) + } else { + None + }, + name: topic.name, + } + .as_bytes(), + None, + ) + .await?; + + info!( + "Converting {} consumer groups for topic with ID: {}", + topic.consumer_groups.len(), + topic.topic_id, + ); + for group in topic.consumer_groups.into_values() { + let group = group.read().await; + state + .apply( + CREATE_CONSUMER_GROUP_CODE, + 0, + &CreateConsumerGroup { + stream_id: stream.stream_id.try_into()?, + topic_id: topic.topic_id.try_into()?, + group_id: Some(group.group_id), + name: group.name.to_owned(), + } + .as_bytes(), + None, + ) + .await?; + } + + info!( + "Converting {} partitions for topic with ID: {}", + topic.partitions.len(), + topic.topic_id, + ); + for partition in topic.partitions.into_values() { + let partition = partition.read().await; + + if !Path::new(&partition.offsets_path).exists() + && create_dir(&partition.offsets_path).await.is_err() + { + error!( + "Failed to create offsets directory for partition with ID: {} for stream with ID: {} and topic with ID: {}.", + partition.partition_id, partition.stream_id, partition.topic_id + ); + return Err(IggyError::CannotCreatePartition( + partition.partition_id, + partition.stream_id, + partition.topic_id, + )); + } + + info!("Creating consumer offsets directory for partition with ID: {} for stream with ID: {} and topic with ID: {}, path: {}", + partition.partition_id, partition.stream_id, partition.topic_id, partition.consumer_offsets_path); + if !Path::new(&partition.consumer_offsets_path).exists() + && create_dir(&partition.consumer_offsets_path).await.is_err() + { + error!( + "Failed to create consumer offsets directory for partition with ID: {} for stream with ID: {} and topic with ID: {}.", + partition.partition_id, partition.stream_id, partition.topic_id + ); + return Err(IggyError::CannotCreatePartition( + partition.partition_id, + partition.stream_id, + partition.topic_id, + )); + } + + info!("Creating consumer group offsets directory for partition with ID: {} for stream with ID: {} and topic with ID: {}, path: {}", + partition.partition_id, partition.stream_id, partition.topic_id, partition.consumer_group_offsets_path); + if !Path::new(&partition.consumer_group_offsets_path).exists() + && create_dir(&partition.consumer_group_offsets_path) + .await + .is_err() + { + error!( + "Failed to create consumer group offsets directory for partition with ID: {} for stream with ID: {} and topic with ID: {}.", + partition.partition_id, partition.stream_id, partition.topic_id + ); + return Err(IggyError::CannotCreatePartition( + partition.partition_id, + partition.stream_id, + partition.topic_id, + )); + } + + info!("Converting {} consumer offsets for partition with ID: {} for stream with ID: {} and topic with ID: {}", + partition.consumer_offsets.len(), partition.partition_id, partition.stream_id, partition.topic_id); + for offset in partition.consumer_offsets.iter() { + storage.partition.save_consumer_offset(&offset).await?; + } + + info!("Converting {} consumer group offsets for partition with ID: {} for stream with ID: {} and topic with ID: {}", + partition.consumer_group_offsets.len(), partition.partition_id, partition.stream_id, partition.topic_id); + for offset in partition.consumer_group_offsets.iter() { + storage.partition.save_consumer_offset(&offset).await?; + } + } + } + } + info!("Conversion completed"); + Ok(()) +} diff --git a/server/src/compat/storage_conversion/mod.rs b/server/src/compat/storage_conversion/mod.rs new file mode 100644 index 000000000..e1fe3a3a3 --- /dev/null +++ b/server/src/compat/storage_conversion/mod.rs @@ -0,0 +1,290 @@ +mod converter; +mod persistency; + +use crate::compat::storage_conversion::persistency::{personal_access_tokens, streams, users}; +use crate::configs::system::SystemConfig; +use crate::state::system::{PartitionState, StreamState, TopicState}; +use crate::state::State; +use crate::streaming::batching::message_batch::RetainedMessageBatch; +use crate::streaming::partitions::partition::{ConsumerOffset, Partition}; +use crate::streaming::persistence::persister::Persister; +use crate::streaming::segments::index::{Index, IndexRange}; +use crate::streaming::segments::segment::Segment; +use crate::streaming::segments::time_index::TimeIndex; +use crate::streaming::storage::{ + PartitionStorage, SegmentStorage, StreamStorage, SystemInfoStorage, SystemStorage, TopicStorage, +}; +use crate::streaming::streams::stream::Stream; +use crate::streaming::systems::info::SystemInfo; +use crate::streaming::topics::topic::Topic; +use async_trait::async_trait; +use iggy::consumer::ConsumerKind; +use iggy::error::IggyError; +use iggy::utils::timestamp::IggyTimestamp; +use std::path::Path; +use std::sync::Arc; +use tokio::fs::{read_dir, rename}; +use tracing::{error, info}; + +pub async fn init( + config: Arc, + metadata: Arc, + storage: Arc, +) -> Result<(), IggyError> { + if Path::new(&config.get_state_log_path()).exists() { + info!("State log already exists, skipping storage migration"); + return Ok(()); + } + + let path = config.get_database_path(); + if path.is_none() { + info!("No database path configured, skipping storage migration"); + return Ok(()); + } + + let database_path = path.unwrap(); + if !Path::new(&database_path).exists() { + error!("Database directory: {database_path} does not exist - cannot migrate storage."); + return Err(IggyError::CannotOpenDatabase(database_path)); + } + + let db_file = format!("{database_path}/db"); + if !Path::new(&db_file).exists() { + error!("Database file at path: {db_file} does not exist - cannot migrate storage."); + return Err(IggyError::CannotOpenDatabase(db_file)); + } + + info!("Starting storage migration, database path: {database_path}"); + let db = sled::open(&database_path); + if db.is_err() { + panic!("Cannot open database at: {database_path}"); + } + let db = db.unwrap(); + let mut streams = Vec::new(); + let dir_entries = read_dir(&config.get_streams_path()).await; + if let Err(error) = dir_entries { + error!("Cannot read streams directory: {}", error); + return Err(IggyError::CannotReadStreams); + } + + let noop_storage = SystemStorage { + info: Arc::new(NoopSystemInfoStorage {}), + stream: Arc::new(NoopStreamStorage {}), + topic: Arc::new(NoopTopicStorage {}), + partition: Arc::new(NoopPartitionStorage {}), + segment: Arc::new(NoopSegmentStorage {}), + persister: Arc::new(NoopPersister {}), + }; + let noop_storage = Arc::new(noop_storage); + let mut dir_entries = dir_entries.unwrap(); + while let Some(dir_entry) = dir_entries.next_entry().await.unwrap_or(None) { + let name = dir_entry.file_name().into_string().unwrap(); + let stream_id = name.parse::(); + if stream_id.is_err() { + error!("Invalid stream ID file with name: '{}'.", name); + continue; + } + + let stream_id = stream_id.unwrap(); + let mut stream = Stream::empty(stream_id, "stream", config.clone(), noop_storage.clone()); + streams::load(&config, &db, &mut stream).await?; + streams.push(stream); + } + + let users = users::load_all(&db).await?; + let personal_access_tokens = personal_access_tokens::load_all(&db).await?; + converter::convert(metadata, storage, streams, users, personal_access_tokens).await?; + let old_database_path = format!("{database_path}_old"); + rename(&database_path, &old_database_path).await?; + info!("Storage migration has completed, new state log was cacreated and old database was moved to: {old_database_path} (now it can be safely deleted)."); + Ok(()) +} + +struct NoopPersister {} +struct NoopSystemInfoStorage {} +struct NoopStreamStorage {} +struct NoopTopicStorage {} +struct NoopPartitionStorage {} +struct NoopSegmentStorage {} + +#[async_trait] +impl Persister for NoopPersister { + async fn append(&self, _path: &str, _bytes: &[u8]) -> Result<(), IggyError> { + Ok(()) + } + + async fn overwrite(&self, _path: &str, _bytes: &[u8]) -> Result<(), IggyError> { + Ok(()) + } + + async fn delete(&self, _path: &str) -> Result<(), IggyError> { + Ok(()) + } +} + +#[async_trait] +impl SystemInfoStorage for NoopSystemInfoStorage { + async fn load(&self) -> Result { + Ok(SystemInfo::default()) + } + + async fn save(&self, _system_info: &SystemInfo) -> Result<(), IggyError> { + Ok(()) + } +} + +#[async_trait] +impl StreamStorage for NoopStreamStorage { + async fn load(&self, _stream: &mut Stream, _state: StreamState) -> Result<(), IggyError> { + Ok(()) + } + + async fn save(&self, _stream: &Stream) -> Result<(), IggyError> { + Ok(()) + } + + async fn delete(&self, _stream: &Stream) -> Result<(), IggyError> { + Ok(()) + } +} + +#[async_trait] +impl TopicStorage for NoopTopicStorage { + async fn load(&self, _topic: &mut Topic, _state: TopicState) -> Result<(), IggyError> { + Ok(()) + } + + async fn save(&self, _topic: &Topic) -> Result<(), IggyError> { + Ok(()) + } + + async fn delete(&self, _topic: &Topic) -> Result<(), IggyError> { + Ok(()) + } +} + +#[async_trait] +impl PartitionStorage for NoopPartitionStorage { + async fn load( + &self, + _partition: &mut Partition, + _state: PartitionState, + ) -> Result<(), IggyError> { + Ok(()) + } + + async fn save(&self, _partition: &Partition) -> Result<(), IggyError> { + Ok(()) + } + + async fn delete(&self, _partition: &Partition) -> Result<(), IggyError> { + Ok(()) + } + + async fn save_consumer_offset(&self, _offset: &ConsumerOffset) -> Result<(), IggyError> { + Ok(()) + } + + async fn load_consumer_offsets( + &self, + _kind: ConsumerKind, + _path: &str, + ) -> Result, IggyError> { + Ok(vec![]) + } + + async fn delete_consumer_offsets(&self, _path: &str) -> Result<(), IggyError> { + Ok(()) + } + + async fn delete_consumer_offset(&self, _path: &str) -> Result<(), IggyError> { + Ok(()) + } +} + +#[async_trait] +impl SegmentStorage for NoopSegmentStorage { + async fn load(&self, _segment: &mut Segment) -> Result<(), IggyError> { + Ok(()) + } + + async fn save(&self, _segment: &Segment) -> Result<(), IggyError> { + Ok(()) + } + + async fn delete(&self, _segment: &Segment) -> Result<(), IggyError> { + Ok(()) + } + + async fn load_message_batches( + &self, + _segment: &Segment, + _index_range: &IndexRange, + ) -> Result, IggyError> { + Ok(vec![]) + } + + async fn load_newest_batches_by_size( + &self, + _segment: &Segment, + _size: u64, + ) -> Result, IggyError> { + Ok(vec![]) + } + + async fn save_batches( + &self, + _segment: &Segment, + _batches: &[Arc], + ) -> Result { + Ok(0) + } + + async fn load_message_ids(&self, _segment: &Segment) -> Result, IggyError> { + Ok(vec![]) + } + + async fn load_checksums(&self, _segment: &Segment) -> Result<(), IggyError> { + Ok(()) + } + + async fn load_all_indexes(&self, _segment: &Segment) -> Result, IggyError> { + Ok(vec![]) + } + + async fn load_index_range( + &self, + _segment: &Segment, + _index_start_offset: u64, + _index_end_offset: u64, + ) -> Result, IggyError> { + Ok(None) + } + + async fn save_index(&self, _segment: &Segment) -> Result<(), IggyError> { + Ok(()) + } + + async fn try_load_time_index_for_timestamp( + &self, + _segment: &Segment, + _timestamp: IggyTimestamp, + ) -> Result, IggyError> { + Ok(None) + } + + async fn load_all_time_indexes(&self, _segment: &Segment) -> Result, IggyError> { + Ok(vec![]) + } + + async fn load_last_time_index( + &self, + _segment: &Segment, + ) -> Result, IggyError> { + Ok(None) + } + + async fn save_time_index(&self, _segment: &Segment) -> Result<(), IggyError> { + Ok(()) + } +} diff --git a/server/src/compat/storage_conversion/persistency/mod.rs b/server/src/compat/storage_conversion/persistency/mod.rs new file mode 100644 index 000000000..e7b273914 --- /dev/null +++ b/server/src/compat/storage_conversion/persistency/mod.rs @@ -0,0 +1,5 @@ +pub mod partitions; +pub mod personal_access_tokens; +pub mod streams; +pub mod topics; +pub mod users; diff --git a/server/src/compat/storage_conversion/persistency/partitions.rs b/server/src/compat/storage_conversion/persistency/partitions.rs new file mode 100644 index 000000000..03a818478 --- /dev/null +++ b/server/src/compat/storage_conversion/persistency/partitions.rs @@ -0,0 +1,293 @@ +use crate::configs::system::SystemConfig; +use crate::streaming::partitions::partition::{ConsumerOffset, Partition}; +use crate::streaming::segments::segment::{Segment, LOG_EXTENSION}; +use anyhow::Context; +use iggy::consumer::ConsumerKind; +use iggy::error::IggyError; +use iggy::utils::timestamp::IggyTimestamp; +use serde::{Deserialize, Serialize}; +use sled::Db; +use std::sync::atomic::Ordering; +use tokio::fs; +use tracing::{info, warn}; + +pub async fn load_consumer_offsets( + db: &Db, + config: &SystemConfig, + kind: ConsumerKind, + stream_id: u32, + topic_id: u32, + partition_id: u32, +) -> Result, IggyError> { + let mut consumer_offsets = Vec::new(); + let key_prefix = format!( + "{}:", + get_key_prefix(kind, stream_id, topic_id, partition_id) + ); + for data in db.scan_prefix(&key_prefix) { + let consumer_offset = match data.with_context(|| { + format!( + "Failed to load consumer offset, when searching by key: {}", + key_prefix + ) + }) { + Ok((key, value)) => { + let key = String::from_utf8(key.to_vec()).unwrap(); + let offset = u64::from_be_bytes(value.as_ref().try_into().unwrap()); + let consumer_id = key.split(':').last().unwrap().parse::().unwrap(); + ConsumerOffsetCompat { + key, + kind, + consumer_id, + offset, + } + } + Err(err) => { + return Err(IggyError::CannotLoadResource(err)); + } + }; + consumer_offsets.push(consumer_offset); + } + + consumer_offsets.sort_by(|a, b| a.consumer_id.cmp(&b.consumer_id)); + let consumer_offsets = consumer_offsets + .into_iter() + .map(|consumer_offset| { + let path = match kind { + ConsumerKind::Consumer => { + config.get_consumer_offsets_path(stream_id, topic_id, partition_id) + } + ConsumerKind::ConsumerGroup => { + config.get_consumer_group_offsets_path(stream_id, topic_id, partition_id) + } + }; + let path = format!("{path}/{}", consumer_offset.consumer_id); + ConsumerOffset { + kind: consumer_offset.kind, + consumer_id: consumer_offset.consumer_id, + offset: consumer_offset.offset, + path, + } + }) + .collect::>(); + + Ok(consumer_offsets) +} + +pub async fn load( + config: &SystemConfig, + db: &Db, + partition: &mut Partition, +) -> Result<(), IggyError> { + info!( + "Loading partition with ID: {} for stream with ID: {} and topic with ID: {}, for path: {} from disk...", + partition.partition_id, partition.stream_id, partition.topic_id, partition.partition_path + ); + let dir_entries = fs::read_dir(&partition.partition_path).await; + if let Err(err) = fs::read_dir(&partition.partition_path) + .await + .with_context(|| format!("Failed to read partition with ID: {} for stream with ID: {} and topic with ID: {} and path: {}", partition.partition_id, partition.stream_id, partition.topic_id, partition.partition_path)) + { + return Err(IggyError::CannotReadPartitions(err)); + } + + let key = get_partition_key( + partition.stream_id, + partition.topic_id, + partition.partition_id, + ); + let partition_data = match db + .get(&key) + .with_context(|| format!("Failed to load partition with key: {}", key)) + { + Ok(partition_data) => { + if let Some(partition_data) = partition_data { + let partition_data = rmp_serde::from_slice::(&partition_data) + .with_context(|| format!("Failed to deserialize partition with key: {}", key)); + if let Err(err) = partition_data { + return Err(IggyError::CannotDeserializeResource(err)); + } else { + partition_data.unwrap() + } + } else { + return Err(IggyError::ResourceNotFound(key)); + } + } + Err(err) => { + return Err(IggyError::CannotLoadResource(err)); + } + }; + + partition.created_at = partition_data.created_at; + + let consumer_offsets_for_consumer = load_consumer_offsets( + db, + config, + ConsumerKind::Consumer, + partition.stream_id, + partition.topic_id, + partition.partition_id, + ) + .await?; + + let consumer_offsets_for_group = load_consumer_offsets( + db, + config, + ConsumerKind::ConsumerGroup, + partition.stream_id, + partition.topic_id, + partition.partition_id, + ) + .await?; + + for consumer_offset in consumer_offsets_for_consumer { + partition + .consumer_offsets + .insert(consumer_offset.consumer_id, consumer_offset); + } + + for consumer_offset in consumer_offsets_for_group { + partition + .consumer_group_offsets + .insert(consumer_offset.consumer_id, consumer_offset); + } + + let mut dir_entries = dir_entries.unwrap(); + while let Some(dir_entry) = dir_entries.next_entry().await.unwrap_or(None) { + let metadata = dir_entry.metadata().await.unwrap(); + if metadata.is_dir() { + continue; + } + + let path = dir_entry.path(); + let extension = path.extension(); + if extension.is_none() || extension.unwrap() != LOG_EXTENSION { + continue; + } + + let log_file_name = dir_entry + .file_name() + .into_string() + .unwrap() + .replace(&format!(".{}", LOG_EXTENSION), ""); + + let start_offset = log_file_name.parse::().unwrap(); + let mut segment = Segment::create( + partition.stream_id, + partition.topic_id, + partition.partition_id, + start_offset, + partition.config.clone(), + partition.storage.clone(), + partition.message_expiry, + partition.size_of_parent_stream.clone(), + partition.size_of_parent_topic.clone(), + partition.size_bytes.clone(), + partition.messages_count_of_parent_stream.clone(), + partition.messages_count_of_parent_topic.clone(), + partition.messages_count.clone(), + ); + + segment.load().await?; + if !segment.is_closed { + segment.unsaved_batches = Some(Vec::new()) + } + + // If the first segment has at least a single message, we should increment the offset. + if !partition.should_increment_offset { + partition.should_increment_offset = segment.size_bytes > 0; + } + + if partition.config.partition.validate_checksum { + info!("Validating messages checksum for partition with ID: {} and segment with start offset: {}...", partition.partition_id, segment.start_offset); + segment.storage.segment.load_checksums(&segment).await?; + info!("Validated messages checksum for partition with ID: {} and segment with start offset: {}.", partition.partition_id, segment.start_offset); + } + + // Load the unique message IDs for the partition if the deduplication feature is enabled. + let mut unique_message_ids_count = 0; + if let Some(message_deduplicator) = &partition.message_deduplicator { + info!("Loading unique message IDs for partition with ID: {} and segment with start offset: {}...", partition.partition_id, segment.start_offset); + let message_ids = segment.storage.segment.load_message_ids(&segment).await?; + for message_id in message_ids { + if message_deduplicator.try_insert(&message_id).await { + unique_message_ids_count += 1; + } else { + warn!("Duplicated message ID: {} for partition with ID: {} and segment with start offset: {}.", message_id, partition.partition_id, segment.start_offset); + } + } + info!("Loaded: {} unique message IDs for partition with ID: {} and segment with start offset: {}...", unique_message_ids_count, partition.partition_id, segment.start_offset); + } + + partition + .segments_count_of_parent_stream + .fetch_add(1, Ordering::SeqCst); + partition.segments.push(segment); + } + + partition + .segments + .sort_by(|a, b| a.start_offset.cmp(&b.start_offset)); + + let end_offsets = partition + .segments + .iter() + .skip(1) + .map(|segment| segment.start_offset - 1) + .collect::>(); + + let segments_count = partition.segments.len(); + for (end_offset_index, segment) in partition.get_segments_mut().iter_mut().enumerate() { + if end_offset_index == segments_count - 1 { + break; + } + + segment.end_offset = end_offsets[end_offset_index]; + } + + if !partition.segments.is_empty() { + let last_segment = partition.segments.last_mut().unwrap(); + if last_segment.is_closed { + last_segment.end_offset = last_segment.current_offset; + } + + partition.current_offset = last_segment.current_offset; + } + + partition.load_consumer_offsets().await?; + info!( + "Loaded partition with ID: {} for stream with ID: {} and topic with ID: {}, current offset: {}.", + partition.partition_id, partition.stream_id, partition.topic_id, partition.current_offset + ); + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize)] +struct PartitionData { + created_at: IggyTimestamp, +} + +#[derive(Debug, PartialEq, Clone)] +struct ConsumerOffsetCompat { + pub key: String, + pub kind: ConsumerKind, + pub consumer_id: u32, + pub offset: u64, +} + +fn get_partition_key(stream_id: u32, topic_id: u32, partition_id: u32) -> String { + format!( + "streams:{}:topics:{}:partitions:{}", + stream_id, topic_id, partition_id + ) +} + +pub fn get_key_prefix( + kind: ConsumerKind, + stream_id: u32, + topic_id: u32, + partition_id: u32, +) -> String { + format!("{kind}_offsets:{stream_id}:{topic_id}:{partition_id}") +} diff --git a/server/src/compat/storage_conversion/persistency/personal_access_tokens.rs b/server/src/compat/storage_conversion/persistency/personal_access_tokens.rs new file mode 100644 index 000000000..abfcd46e7 --- /dev/null +++ b/server/src/compat/storage_conversion/persistency/personal_access_tokens.rs @@ -0,0 +1,56 @@ +use crate::streaming::personal_access_tokens::personal_access_token::PersonalAccessToken; +use anyhow::Context; +use iggy::error::IggyError; +use iggy::models::user_info::UserId; +use serde::{Deserialize, Serialize}; +use sled::Db; + +const KEY_PREFIX: &str = "personal_access_token"; + +pub async fn load_all(db: &Db) -> Result, IggyError> { + let mut personal_access_tokens = Vec::new(); + for data in db.scan_prefix(format!("{}:token:", KEY_PREFIX)) { + let personal_access_token_data = match data.with_context(|| { + format!( + "Failed to load personal access token, when searching by key: {}", + KEY_PREFIX + ) + }) { + Ok((_, value)) => match rmp_serde::from_slice::(&value) + .with_context(|| { + format!( + "Failed to deserialize personal access token, when searching by key: {}", + KEY_PREFIX + ) + }) { + Ok(personal_access_token) => personal_access_token, + Err(err) => { + return Err(IggyError::CannotDeserializeResource(err)); + } + }, + Err(err) => { + return Err(IggyError::CannotLoadResource(err)); + } + }; + + let personal_access_token = PersonalAccessToken::raw( + personal_access_token_data.user_id, + &personal_access_token_data.name, + &personal_access_token_data.token, + personal_access_token_data + .expiry + .map(|expiry| expiry.into()), + ); + personal_access_tokens.push(personal_access_token); + } + + Ok(personal_access_tokens) +} + +#[derive(Debug, Serialize, Deserialize)] +struct PersonalAccessTokenData { + pub user_id: UserId, + pub name: String, + pub token: String, + pub expiry: Option, +} diff --git a/server/src/compat/storage_conversion/persistency/streams.rs b/server/src/compat/storage_conversion/persistency/streams.rs new file mode 100644 index 000000000..6907f8bb4 --- /dev/null +++ b/server/src/compat/storage_conversion/persistency/streams.rs @@ -0,0 +1,120 @@ +use crate::compat::storage_conversion::persistency::topics; +use crate::configs::system::SystemConfig; +use crate::streaming::streams::stream::Stream; +use crate::streaming::topics::topic::Topic; +use anyhow::Context; +use iggy::error::IggyError; +use iggy::utils::timestamp::IggyTimestamp; +use serde::{Deserialize, Serialize}; +use sled::Db; +use std::path::Path; +use tokio::fs; +use tracing::{error, info}; + +#[derive(Debug, Serialize, Deserialize)] +struct StreamData { + name: String, + created_at: IggyTimestamp, +} + +pub async fn load(config: &SystemConfig, db: &Db, stream: &mut Stream) -> Result<(), IggyError> { + info!("Loading stream with ID: {} from disk...", stream.stream_id); + if !Path::new(&stream.path).exists() { + return Err(IggyError::StreamIdNotFound(stream.stream_id)); + } + + let key = get_key(stream.stream_id); + let stream_data = match db.get(&key).with_context(|| { + format!( + "Failed to load stream with ID: {}, key: {}", + stream.stream_id, key + ) + }) { + Ok(stream_data) => { + if let Some(stream_data) = stream_data { + let stream_data = + rmp_serde::from_slice::(&stream_data).with_context(|| { + format!( + "Failed to deserialize stream with ID: {}, key: {}", + stream.stream_id, key + ) + }); + match stream_data { + Ok(stream_data) => stream_data, + Err(err) => { + return Err(IggyError::CannotDeserializeResource(err)); + } + } + } else { + return Err(IggyError::ResourceNotFound(key)); + } + } + Err(err) => { + return Err(IggyError::CannotLoadResource(err)); + } + }; + + stream.name = stream_data.name; + stream.created_at = stream_data.created_at; + let mut topics = Vec::new(); + let dir_entries = fs::read_dir(&stream.topics_path).await; + if dir_entries.is_err() { + return Err(IggyError::CannotReadTopics(stream.stream_id)); + } + + let mut dir_entries = dir_entries.unwrap(); + while let Some(dir_entry) = dir_entries.next_entry().await.unwrap_or(None) { + let name = dir_entry.file_name().into_string().unwrap(); + let topic_id = name.parse::(); + if topic_id.is_err() { + error!("Invalid topic ID file with name: '{}'.", name); + continue; + } + + let topic_id = topic_id.unwrap(); + let mut topic = Topic::empty( + stream.stream_id, + topic_id, + "topic", + stream.size_bytes.clone(), + stream.messages_count.clone(), + stream.segments_count.clone(), + stream.config.clone(), + stream.storage.clone(), + ); + topics::load(config, db, &mut topic).await?; + topics.push(topic); + } + + for topic in topics.into_iter() { + if stream.topics.contains_key(&topic.topic_id) { + error!( + "Topic with ID: '{}' already exists for stream with ID: {}.", + &topic.topic_id, &stream.stream_id + ); + continue; + } + + if stream.topics_ids.contains_key(&topic.name) { + error!( + "Topic with name: '{}' already exists for stream with ID: {}.", + &topic.name, &stream.stream_id + ); + continue; + } + + stream.topics_ids.insert(topic.name.clone(), topic.topic_id); + stream.topics.insert(topic.topic_id, topic); + } + + info!( + "Loaded stream: '{}' with ID: {} from disk.", + &stream.name, &stream.stream_id + ); + + Ok(()) +} + +fn get_key(stream_id: u32) -> String { + format!("streams:{}", stream_id) +} diff --git a/server/src/compat/storage_conversion/persistency/topics.rs b/server/src/compat/storage_conversion/persistency/topics.rs new file mode 100644 index 000000000..3937eab15 --- /dev/null +++ b/server/src/compat/storage_conversion/persistency/topics.rs @@ -0,0 +1,179 @@ +use crate::compat::storage_conversion::persistency::partitions; +use crate::configs::system::SystemConfig; +use crate::streaming::partitions::partition::Partition; +use crate::streaming::topics::consumer_group::ConsumerGroup; +use crate::streaming::topics::topic::Topic; +use anyhow::Context; +use iggy::compression::compression_algorithm::CompressionAlgorithm; +use iggy::error::IggyError; +use iggy::locking::IggySharedMut; +use iggy::locking::IggySharedMutFn; +use iggy::utils::byte_size::IggyByteSize; +use iggy::utils::duration::IggyDuration; +use iggy::utils::expiry::IggyExpiry; +use iggy::utils::timestamp::IggyTimestamp; +use serde::{Deserialize, Serialize}; +use sled::Db; +use std::path::Path; +use tokio::fs; +use tokio::sync::RwLock; +use tracing::{error, info}; + +#[derive(Debug, Serialize, Deserialize)] +struct ConsumerGroupData { + id: u32, + name: String, +} + +pub async fn load(config: &SystemConfig, db: &Db, topic: &mut Topic) -> Result<(), IggyError> { + info!("Loading topic {} from disk...", topic); + if !Path::new(&topic.path).exists() { + return Err(IggyError::TopicIdNotFound(topic.topic_id, topic.stream_id)); + } + + let key = get_topic_key(topic.stream_id, topic.topic_id); + let topic_data = match db + .get(&key) + .with_context(|| format!("Failed to load topic with key: {}", key)) + { + Ok(data) => { + if let Some(topic_data) = data { + let topic_data = rmp_serde::from_slice::(&topic_data) + .with_context(|| format!("Failed to deserialize topic with key: {}", key)); + if let Err(err) = topic_data { + return Err(IggyError::CannotDeserializeResource(err)); + } else { + topic_data.unwrap() + } + } else { + return Err(IggyError::ResourceNotFound(key)); + } + } + Err(err) => { + return Err(IggyError::CannotLoadResource(err)); + } + }; + + topic.name = topic_data.name; + topic.created_at = topic_data.created_at; + topic.message_expiry = match topic_data.message_expiry { + Some(expiry) => IggyExpiry::ExpireDuration(IggyDuration::from(expiry as u64 * 1000000)), + None => IggyExpiry::NeverExpire, + }; + topic.compression_algorithm = topic_data.compression_algorithm; + topic.max_topic_size = topic_data.max_topic_size.into(); + topic.replication_factor = topic_data.replication_factor; + + let dir_entries = fs::read_dir(&topic.partitions_path).await + .with_context(|| format!("Failed to read partition with ID: {} for stream with ID: {} for topic with ID: {} and path: {}", + topic.topic_id, topic.stream_id, topic.topic_id, &topic.partitions_path)); + if let Err(err) = dir_entries { + return Err(IggyError::CannotReadPartitions(err)); + } + + let mut dir_entries = dir_entries.unwrap(); + while let Some(dir_entry) = dir_entries.next_entry().await.unwrap_or(None) { + let metadata = dir_entry.metadata().await; + if metadata.is_err() || metadata.unwrap().is_file() { + continue; + } + + let name = dir_entry.file_name().into_string().unwrap(); + let partition_id = name.parse::(); + if partition_id.is_err() { + error!("Invalid partition ID file with name: '{}'.", name); + continue; + } + + let partition_id = partition_id.unwrap(); + let mut partition = Partition::create( + topic.stream_id, + topic.topic_id, + partition_id, + false, + topic.config.clone(), + topic.storage.clone(), + topic.message_expiry, + topic.messages_count_of_parent_stream.clone(), + topic.messages_count.clone(), + topic.size_of_parent_stream.clone(), + topic.size_bytes.clone(), + topic.segments_count_of_parent_stream.clone(), + IggyTimestamp::zero(), + ); + partitions::load(config, db, &mut partition).await?; + topic + .partitions + .insert(partition.partition_id, IggySharedMut::new(partition)); + } + + let consumer_groups = load_consumer_groups(db, topic).await?; + topic.consumer_groups = consumer_groups + .into_iter() + .map(|group| (group.group_id, RwLock::new(group))) + .collect(); + info!("Loaded topic {topic}"); + Ok(()) +} + +pub async fn load_consumer_groups(db: &Db, topic: &Topic) -> Result, IggyError> { + info!("Loading consumer groups for topic {} from disk...", topic); + let key_prefix = get_consumer_groups_key_prefix(topic.stream_id, topic.topic_id); + let mut consumer_groups = Vec::new(); + for data in db.scan_prefix(format!("{}:", key_prefix)) { + let consumer_group = match data.with_context(|| { + format!( + "Failed to load consumer group when searching for key: {}", + key_prefix + ) + }) { + Ok((_, value)) => { + match rmp_serde::from_slice::(&value).with_context(|| { + format!( + "Failed to deserialize consumer group with key: {}", + key_prefix + ) + }) { + Ok(user) => user, + Err(err) => { + return Err(IggyError::CannotDeserializeResource(err)); + } + } + } + Err(err) => { + return Err(IggyError::CannotLoadResource(err)); + } + }; + let consumer_group = ConsumerGroup::new( + topic.topic_id, + consumer_group.id, + &consumer_group.name, + topic.get_partitions_count(), + ); + consumer_groups.push(consumer_group); + } + info!( + "Loaded {} consumer groups for topic {}", + consumer_groups.len(), + topic + ); + Ok(consumer_groups) +} + +#[derive(Debug, Serialize, Deserialize)] +struct TopicData { + name: String, + created_at: IggyTimestamp, + message_expiry: Option, + compression_algorithm: CompressionAlgorithm, + max_topic_size: Option, + replication_factor: u8, +} + +fn get_topic_key(stream_id: u32, topic_id: u32) -> String { + format!("streams:{}:topics:{}", stream_id, topic_id) +} + +fn get_consumer_groups_key_prefix(stream_id: u32, topic_id: u32) -> String { + format!("streams:{stream_id}:topics:{topic_id}:consumer_groups") +} diff --git a/server/src/compat/storage_conversion/persistency/users.rs b/server/src/compat/storage_conversion/persistency/users.rs new file mode 100644 index 000000000..a561e12e0 --- /dev/null +++ b/server/src/compat/storage_conversion/persistency/users.rs @@ -0,0 +1,57 @@ +use crate::streaming::users::user::User; +use anyhow::Context; +use iggy::error::IggyError; +use iggy::models::permissions::Permissions; +use iggy::models::user_info::UserId; +use iggy::models::user_status::UserStatus; +use iggy::utils::timestamp::IggyTimestamp; +use serde::{Deserialize, Serialize}; +use sled::Db; + +const KEY_PREFIX: &str = "users"; + +pub async fn load_all(db: &Db) -> Result, IggyError> { + let mut users = Vec::new(); + for data in db.scan_prefix(format!("{}:", KEY_PREFIX)) { + let user_data = match data.with_context(|| { + format!( + "Failed to load user, when searching for key: {}", + KEY_PREFIX + ) + }) { + Ok((_, value)) => match rmp_serde::from_slice::(&value).with_context(|| { + format!( + "Failed to deserialize user, when searching for key: {}", + KEY_PREFIX + ) + }) { + Ok(user) => user, + Err(err) => { + return Err(IggyError::CannotDeserializeResource(err)); + } + }, + Err(err) => { + return Err(IggyError::CannotLoadResource(err)); + } + }; + let mut user = User::empty(user_data.id); + user.status = user_data.status; + user.username = user_data.username; + user.password = user_data.password; + user.created_at = user_data.created_at; + user.permissions = user_data.permissions; + users.push(user); + } + + Ok(users) +} + +#[derive(Debug, Serialize, Deserialize)] +struct UserData { + pub id: UserId, + pub status: UserStatus, + pub username: String, + pub password: String, + pub created_at: IggyTimestamp, + pub permissions: Option, +} diff --git a/server/src/configs/defaults.rs b/server/src/configs/defaults.rs index 64a931c16..f64cba4a6 100644 --- a/server/src/configs/defaults.rs +++ b/server/src/configs/defaults.rs @@ -7,9 +7,9 @@ use crate::configs::server::{ PersonalAccessTokenConfig, ServerConfig, }; use crate::configs::system::{ - BackupConfig, CacheConfig, CompatibilityConfig, CompressionConfig, DatabaseConfig, - EncryptionConfig, LoggingConfig, MessageDeduplicationConfig, PartitionConfig, - RetentionPolicyConfig, RuntimeConfig, SegmentConfig, StreamConfig, SystemConfig, TopicConfig, + BackupConfig, CacheConfig, CompatibilityConfig, CompressionConfig, EncryptionConfig, + LoggingConfig, MessageDeduplicationConfig, PartitionConfig, RetentionPolicyConfig, + RuntimeConfig, SegmentConfig, StreamConfig, SystemConfig, TopicConfig, }; use crate::configs::tcp::{TcpConfig, TcpTlsConfig}; use std::sync::Arc; @@ -157,7 +157,6 @@ impl Default for HttpJwtConfig { .map(|s| s.parse().unwrap()) .collect(), access_token_expiry: SERVER_CONFIG.http.jwt.access_token_expiry.parse().unwrap(), - refresh_token_expiry: SERVER_CONFIG.http.jwt.refresh_token_expiry.parse().unwrap(), clock_skew: SERVER_CONFIG.http.jwt.clock_skew.parse().unwrap(), not_before: SERVER_CONFIG.http.jwt.not_before.parse().unwrap(), encoding_secret: SERVER_CONFIG.http.jwt.encoding_secret.parse().unwrap(), @@ -233,7 +232,7 @@ impl Default for SystemConfig { SystemConfig { path: SERVER_CONFIG.system.path.parse().unwrap(), backup: BackupConfig::default(), - database: DatabaseConfig::default(), + database: None, runtime: RuntimeConfig::default(), logging: LoggingConfig::default(), cache: CacheConfig::default(), @@ -272,14 +271,6 @@ impl Default for CompatibilityConfig { } } -impl Default for DatabaseConfig { - fn default() -> DatabaseConfig { - DatabaseConfig { - path: SERVER_CONFIG.system.database.path.parse().unwrap(), - } - } -} - impl Default for RuntimeConfig { fn default() -> RuntimeConfig { RuntimeConfig { diff --git a/server/src/configs/displays.rs b/server/src/configs/displays.rs index 3628c1c76..59837aff8 100644 --- a/server/src/configs/displays.rs +++ b/server/src/configs/displays.rs @@ -5,9 +5,8 @@ use crate::configs::{ resource_quota::MemoryResourceQuota, server::{MessageCleanerConfig, MessageSaverConfig, ServerConfig}, system::{ - CacheConfig, CompressionConfig, DatabaseConfig, EncryptionConfig, LoggingConfig, - PartitionConfig, RetentionPolicyConfig, SegmentConfig, StreamConfig, SystemConfig, - TopicConfig, + CacheConfig, CompressionConfig, EncryptionConfig, LoggingConfig, PartitionConfig, + RetentionPolicyConfig, SegmentConfig, StreamConfig, SystemConfig, TopicConfig, }, tcp::{TcpConfig, TcpTlsConfig}, }; @@ -141,12 +140,6 @@ impl Display for MessageSaverConfig { } } -impl Display for DatabaseConfig { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{{ path: {} }}", self.path) - } -} - impl Display for CacheConfig { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "{{ enabled: {}, size: {} }}", self.enabled, self.size) @@ -158,8 +151,7 @@ impl Display for RetentionPolicyConfig { write!( f, "{{ message_expiry {}, max_topic_size: {} }}", - self.message_expiry.as_secs(), - self.max_topic_size.as_human_string_with_zero_as_unlimited() + self.message_expiry, self.max_topic_size ) } } @@ -252,9 +244,8 @@ impl Display for SystemConfig { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!( f, - "{{ path: {}, database: {}, logging: {}, cache: {}, stream: {}, topic: {}, partition: {}, segment: {}, encryption: {} }}", + "{{ path: {}, logging: {}, cache: {}, stream: {}, topic: {}, partition: {}, segment: {}, encryption: {} }}", self.path, - self.database, self.logging, self.cache, self.stream, diff --git a/server/src/configs/http.rs b/server/src/configs/http.rs index 606639629..c2acfb81b 100644 --- a/server/src/configs/http.rs +++ b/server/src/configs/http.rs @@ -1,6 +1,7 @@ use iggy::error::IggyError; use iggy::utils::byte_size::IggyByteSize; use iggy::utils::duration::IggyDuration; +use iggy::utils::expiry::IggyExpiry; use jsonwebtoken::{Algorithm, DecodingKey, EncodingKey}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -37,9 +38,7 @@ pub struct HttpJwtConfig { pub valid_issuers: Vec, pub valid_audiences: Vec, #[serde_as(as = "DisplayFromStr")] - pub access_token_expiry: IggyDuration, - #[serde_as(as = "DisplayFromStr")] - pub refresh_token_expiry: IggyDuration, + pub access_token_expiry: IggyExpiry, #[serde_as(as = "DisplayFromStr")] pub clock_skew: IggyDuration, #[serde_as(as = "DisplayFromStr")] diff --git a/server/src/configs/system.rs b/server/src/configs/system.rs index 1c2e66104..60bb8ce9e 100644 --- a/server/src/configs/system.rs +++ b/server/src/configs/system.rs @@ -1,5 +1,7 @@ use crate::configs::resource_quota::MemoryResourceQuota; use iggy::utils::byte_size::IggyByteSize; +use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use iggy::{ compression::compression_algorithm::CompressionAlgorithm, utils::duration::IggyDuration, }; @@ -11,7 +13,7 @@ use serde_with::DisplayFromStr; pub struct SystemConfig { pub path: String, pub backup: BackupConfig, - pub database: DatabaseConfig, + pub database: Option, pub runtime: RuntimeConfig, pub logging: LoggingConfig, pub cache: CacheConfig, @@ -26,18 +28,18 @@ pub struct SystemConfig { } #[derive(Debug, Deserialize, Serialize)] -pub struct DatabaseConfig { +pub struct BackupConfig { pub path: String, + pub compatibility: CompatibilityConfig, } #[derive(Debug, Deserialize, Serialize)] -pub struct BackupConfig { +pub struct CompatibilityConfig { pub path: String, - pub compatibility: CompatibilityConfig, } #[derive(Debug, Deserialize, Serialize)] -pub struct CompatibilityConfig { +pub struct DatabaseConfig { pub path: String, } @@ -74,8 +76,9 @@ pub struct CacheConfig { #[derive(Debug, Deserialize, Serialize, Copy, Clone)] pub struct RetentionPolicyConfig { #[serde_as(as = "DisplayFromStr")] - pub message_expiry: IggyDuration, - pub max_topic_size: IggyByteSize, + pub message_expiry: IggyExpiry, + #[serde_as(as = "DisplayFromStr")] + pub max_topic_size: MaxTopicSize, } #[derive(Debug, Deserialize, Serialize)] @@ -123,6 +126,27 @@ impl SystemConfig { self.path.to_string() } + pub fn get_database_path(&self) -> Option { + self.database + .as_ref() + .map(|database| format!("{}/{}", self.get_system_path(), database.path)) + } + + pub fn get_state_path(&self) -> String { + format!("{}/state", self.get_system_path()) + } + + pub fn get_state_log_path(&self) -> String { + format!("{}/log", self.get_state_path()) + } + + pub fn get_state_info_path(&self) -> String { + format!("{}/info", self.get_state_path()) + } + pub fn get_state_tokens_path(&self) -> String { + format!("{}/tokens", self.get_state_path()) + } + pub fn get_backup_path(&self) -> String { format!("{}/{}", self.get_system_path(), self.backup.path) } @@ -135,10 +159,6 @@ impl SystemConfig { ) } - pub fn get_database_path(&self) -> String { - format!("{}/{}", self.get_system_path(), self.database.path) - } - pub fn get_runtime_path(&self) -> String { format!("{}/{}", self.get_system_path(), self.runtime.path) } @@ -175,6 +195,37 @@ impl SystemConfig { ) } + pub fn get_offsets_path(&self, stream_id: u32, topic_id: u32, partition_id: u32) -> String { + format!( + "{}/offsets", + self.get_partition_path(stream_id, topic_id, partition_id) + ) + } + + pub fn get_consumer_offsets_path( + &self, + stream_id: u32, + topic_id: u32, + partition_id: u32, + ) -> String { + format!( + "{}/consumers", + self.get_offsets_path(stream_id, topic_id, partition_id) + ) + } + + pub fn get_consumer_group_offsets_path( + &self, + stream_id: u32, + topic_id: u32, + partition_id: u32, + ) -> String { + format!( + "{}/groups", + self.get_offsets_path(stream_id, topic_id, partition_id) + ) + } + pub fn get_segment_path( &self, stream_id: u32, diff --git a/server/src/configs/validators.rs b/server/src/configs/validators.rs index e79643ce7..331008181 100644 --- a/server/src/configs/validators.rs +++ b/server/src/configs/validators.rs @@ -8,6 +8,7 @@ use crate::server_error::ServerError; use crate::streaming::segments::segment; use iggy::compression::compression_algorithm::CompressionAlgorithm; use iggy::utils::byte_size::IggyByteSize; +use iggy::utils::topic_size::MaxTopicSize; use iggy::validatable::Validatable; use sysinfo::System; use tracing::{error, info, warn}; @@ -20,6 +21,16 @@ impl Validatable for ServerConfig { self.system.compression.validate()?; self.personal_access_token.validate()?; + let topic_size = match self.system.retention_policy.max_topic_size { + MaxTopicSize::Custom(size) => size.as_bytes_u64(), + MaxTopicSize::ServerDefault => MaxTopicSize::get_server_default().as_bytes_u64(), + }; + + if topic_size < self.system.segment.size.as_bytes_u64() { + error!("Max topic size cannot be lower than segment size. Max topic size: {}, segment size: {}.",topic_size, self.system.segment.size); + return Err(ServerError::InvalidConfiguration); + } + Ok(()) } } @@ -78,11 +89,6 @@ impl Validatable for CacheConfig { impl Validatable for RetentionPolicyConfig { fn validate(&self) -> Result<(), ServerError> { - // TODO(hubcio): Change this message once topic size based retention policy is fully developed. - if self.max_topic_size.as_bytes_u64() > 0 { - warn!("Retention policy max_topic_size is not implemented yet!"); - } - Ok(()) } } diff --git a/server/src/http/consumer_groups.rs b/server/src/http/consumer_groups.rs index fcb5dda7c..7b2d4373f 100644 --- a/server/src/http/consumer_groups.rs +++ b/server/src/http/consumer_groups.rs @@ -7,7 +7,10 @@ use axum::extract::{Path, State}; use axum::http::StatusCode; use axum::routing::get; use axum::{Extension, Json, Router}; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::{CREATE_CONSUMER_GROUP_CODE, DELETE_CONSUMER_GROUP_CODE}; use iggy::consumer_groups::create_consumer_group::CreateConsumerGroup; +use iggy::consumer_groups::delete_consumer_group::DeleteConsumerGroup; use iggy::identifier::Identifier; use iggy::models::consumer_group::{ConsumerGroup, ConsumerGroupDetails}; use iggy::validatable::Validatable; @@ -72,19 +75,34 @@ async fn create_consumer_group( command.stream_id = Identifier::from_str_value(&stream_id)?; command.topic_id = Identifier::from_str_value(&topic_id)?; command.validate()?; - let mut system = state.system.write(); - let consumer_group = system - .create_consumer_group( - &Session::stateless(identity.user_id, identity.ip_address), - &command.stream_id, - &command.topic_id, - command.group_id, - &command.name, - ) - .await?; - let consumer_group = consumer_group.read().await; - let consumer_group = mapper::map_consumer_group(&consumer_group).await; - Ok((StatusCode::CREATED, Json(consumer_group))) + let consumer_group_details; + { + let mut system = state.system.write(); + let consumer_group = system + .create_consumer_group( + &Session::stateless(identity.user_id, identity.ip_address), + &command.stream_id, + &command.topic_id, + command.group_id, + &command.name, + ) + .await?; + let consumer_group = consumer_group.read().await; + consumer_group_details = mapper::map_consumer_group(&consumer_group).await; + } + { + let system = state.system.read(); + system + .state + .apply( + CREATE_CONSUMER_GROUP_CODE, + identity.user_id, + &command.as_bytes(), + None, + ) + .await?; + } + Ok((StatusCode::CREATED, Json(consumer_group_details))) } async fn delete_consumer_group( @@ -104,5 +122,20 @@ async fn delete_consumer_group( &group_id, ) .await?; + system + .state + .apply( + DELETE_CONSUMER_GROUP_CODE, + identity.user_id, + &DeleteConsumerGroup { + stream_id, + topic_id, + group_id, + } + .as_bytes(), + None, + ) + .await?; + Ok(StatusCode::NO_CONTENT) } diff --git a/server/src/http/http_server.rs b/server/src/http/http_server.rs index e03dd3b6e..acfd78662 100644 --- a/server/src/http/http_server.rs +++ b/server/src/http/http_server.rs @@ -102,17 +102,15 @@ pub async fn start(config: HttpConfig, system: SharedSystem) -> SocketAddr { } async fn build_app_state(config: &HttpConfig, system: SharedSystem) -> Arc { - let db; + let tokens_path; + let persister; { - let system_read = system.read(); - db = system_read - .db - .as_ref() - .expect("Database not initialized") - .clone(); + let system = system.read(); + tokens_path = system.config.get_state_tokens_path(); + persister = system.storage.persister.clone(); } - let jwt_manager = JwtManager::from_config(&config.jwt, db); + let jwt_manager = JwtManager::from_config(persister, &tokens_path, &config.jwt); if let Err(error) = jwt_manager { panic!("Failed to initialize JWT manager: {}", error); } diff --git a/server/src/http/jwt/cleaner.rs b/server/src/http/jwt/cleaner.rs index 07319dbc0..2062ef1aa 100644 --- a/server/src/http/jwt/cleaner.rs +++ b/server/src/http/jwt/cleaner.rs @@ -18,13 +18,6 @@ pub fn start_expired_tokens_cleaner(app_state: Arc) { .unwrap_or_else(|err| { error!("Failed to delete expired revoked access tokens. Error: {err}",); }); - app_state - .jwt_manager - .delete_expired_refresh_tokens(now) - .await - .unwrap_or_else(|err: iggy::error::IggyError| { - error!("Failed to delete expired refresh tokens. Error: {}", err); - }); } }); } diff --git a/server/src/http/jwt/json_web_token.rs b/server/src/http/jwt/json_web_token.rs index 0804ad10c..40d4b24dd 100644 --- a/server/src/http/jwt/json_web_token.rs +++ b/server/src/http/jwt/json_web_token.rs @@ -28,10 +28,8 @@ pub struct RevokedAccessToken { } #[derive(Debug)] -pub struct GeneratedTokens { +pub struct GeneratedToken { pub user_id: UserId, pub access_token: String, pub access_token_expiry: u64, - pub refresh_token: String, - pub refresh_token_expiry: u64, } diff --git a/server/src/http/jwt/jwt_manager.rs b/server/src/http/jwt/jwt_manager.rs index b1e6f9e9e..e0545165b 100644 --- a/server/src/http/jwt/jwt_manager.rs +++ b/server/src/http/jwt/jwt_manager.rs @@ -1,15 +1,15 @@ use crate::configs::http::HttpJwtConfig; -use crate::http::jwt::json_web_token::{GeneratedTokens, JwtClaims, RevokedAccessToken}; -use crate::http::jwt::refresh_token::RefreshToken; +use crate::http::jwt::json_web_token::{GeneratedToken, JwtClaims, RevokedAccessToken}; use crate::http::jwt::storage::TokenStorage; +use crate::streaming::persistence::persister::Persister; use iggy::error::IggyError; use iggy::locking::IggySharedMut; use iggy::locking::IggySharedMutFn; use iggy::models::user_info::UserId; use iggy::utils::duration::IggyDuration; +use iggy::utils::expiry::IggyExpiry; use iggy::utils::timestamp::IggyTimestamp; use jsonwebtoken::{encode, Algorithm, DecodingKey, EncodingKey, Header, TokenData, Validation}; -use sled::Db; use std::collections::HashMap; use std::sync::Arc; use tracing::{debug, error, info}; @@ -17,8 +17,7 @@ use tracing::{debug, error, info}; pub struct IssuerOptions { pub issuer: String, pub audience: String, - pub access_token_expiry: IggyDuration, - pub refresh_token_expiry: IggyDuration, + pub access_token_expiry: IggyExpiry, pub not_before: IggyDuration, pub key: EncodingKey, pub algorithm: Algorithm, @@ -41,9 +40,10 @@ pub struct JwtManager { impl JwtManager { pub fn new( + persister: Arc, + path: &str, issuer: IssuerOptions, validator: ValidatorOptions, - db: Arc, ) -> Result { let validation = JwtManager::create_validation( issuer.algorithm, @@ -56,18 +56,21 @@ impl JwtManager { validations: vec![(issuer.algorithm, validation)].into_iter().collect(), issuer, validator, - tokens_storage: TokenStorage::new(db), + tokens_storage: TokenStorage::new(persister, path), revoked_tokens: IggySharedMut::new(HashMap::new()), }) } - pub fn from_config(config: &HttpJwtConfig, db: Arc) -> Result { + pub fn from_config( + persister: Arc, + path: &str, + config: &HttpJwtConfig, + ) -> Result { let algorithm = config.get_algorithm()?; let issuer = IssuerOptions { issuer: config.issuer.clone(), audience: config.audience.clone(), access_token_expiry: config.access_token_expiry, - refresh_token_expiry: config.refresh_token_expiry, not_before: config.not_before, key: config.get_encoding_key()?, algorithm, @@ -78,7 +81,7 @@ impl JwtManager { clock_skew: config.clock_skew, key: config.get_decoding_key()?, }; - JwtManager::new(issuer, validator, db) + JwtManager::new(persister, path, issuer, validator) } fn create_validation( @@ -95,7 +98,7 @@ impl JwtManager { } pub async fn load_revoked_tokens(&self) -> Result<(), IggyError> { - let revoked_tokens = self.tokens_storage.load_all_revoked_access_tokens()?; + let revoked_tokens = self.tokens_storage.load_all_revoked_access_tokens().await?; let mut tokens = self.revoked_tokens.write().await; for token in revoked_tokens { tokens.insert(token.id, token.expiry); @@ -107,7 +110,7 @@ impl JwtManager { let mut tokens_to_delete = Vec::new(); let revoked_tokens = self.revoked_tokens.read().await; for (id, expiry) in revoked_tokens.iter() { - if expiry < &now { + if expiry <= &now { tokens_to_delete.push(id.to_string()); } } @@ -125,50 +128,26 @@ impl JwtManager { "Deleting {} expired revoked access tokens...", tokens_to_delete.len() ); + self.tokens_storage + .delete_revoked_access_tokens(&tokens_to_delete) + .await?; let mut revoked_tokens = self.revoked_tokens.write().await; for id in tokens_to_delete { revoked_tokens.remove(&id); - self.tokens_storage.delete_revoked_access_token(&id)?; - debug!("Deleted expired revoked access token with ID: {id}") + info!("Deleted expired revoked access token with ID: {id}") } - Ok(()) } - pub async fn delete_expired_refresh_tokens(&self, now: u64) -> Result<(), IggyError> { - let mut tokens_to_delete = Vec::new(); - let refresh_tokens = self.tokens_storage.load_all_refresh_tokens()?; - for token in refresh_tokens { - if token.is_expired(now) { - tokens_to_delete.push(token.token_hash); - } - } - - debug!( - "Found {} expired refresh tokens to delete.", - tokens_to_delete.len() - ); - if tokens_to_delete.is_empty() { - return Ok(()); - } - - debug!( - "Deleting {} expired refresh tokens...", - tokens_to_delete.len() - ); - for token_hash in tokens_to_delete { - self.tokens_storage.delete_refresh_token(&token_hash)?; - debug!("Deleted expired refresh token with hash: {token_hash}") - } - - Ok(()) - } - - pub fn generate(&self, user_id: UserId) -> Result { + pub fn generate(&self, user_id: UserId) -> Result { let header = Header::new(self.issuer.algorithm); let now = IggyTimestamp::now().to_secs(); let iat = now; - let exp = iat + self.issuer.access_token_expiry.as_secs() as u64; + let exp = iat + + (match self.issuer.access_token_expiry { + IggyExpiry::NeverExpire => 1_000_000_000, + IggyExpiry::ExpireDuration(duration) => duration.as_secs(), + }) as u64; let nbf = iat + self.issuer.not_before.as_secs() as u64; let claims = JwtClaims { jti: uuid::Uuid::new_v4().to_string(), @@ -186,41 +165,37 @@ impl JwtManager { return Err(IggyError::CannotGenerateJwt); } - let (refresh_token, raw_refresh_token) = RefreshToken::new( - user_id, - now, - self.issuer.refresh_token_expiry.as_secs() as u64, - ); - self.tokens_storage.save_refresh_token(&refresh_token)?; - - Ok(GeneratedTokens { + Ok(GeneratedToken { user_id, access_token: access_token.unwrap(), - refresh_token: raw_refresh_token, access_token_expiry: exp, - refresh_token_expiry: refresh_token.expiry, }) } - pub fn refresh_token(&self, refresh_token: &str) -> Result { - let now = IggyTimestamp::now().to_secs(); - if refresh_token.is_empty() { - return Err(IggyError::InvalidRefreshToken); + pub async fn refresh_token(&self, token: &str) -> Result { + if token.is_empty() { + return Err(IggyError::InvalidAccessToken); } - let token_hash = RefreshToken::hash_token(refresh_token); - let refresh_token = self.tokens_storage.load_refresh_token(&token_hash); - if refresh_token.is_err() { - return Err(IggyError::InvalidRefreshToken); + let token_header = + jsonwebtoken::decode_header(token).map_err(|_| IggyError::InvalidAccessToken)?; + let jwt_claims = self.decode(token, token_header.alg)?; + let id = jwt_claims.claims.jti; + let expiry = jwt_claims.claims.exp; + if self + .revoked_tokens + .write() + .await + .insert(id.clone(), expiry) + .is_some() + { + return Err(IggyError::InvalidAccessToken); } - let refresh_token = refresh_token.unwrap(); - self.tokens_storage.delete_refresh_token(&token_hash)?; - if refresh_token.expiry < now { - return Err(IggyError::RefreshTokenExpired); - } - - self.generate(refresh_token.user_id) + self.tokens_storage + .save_revoked_access_token(&RevokedAccessToken { id, expiry }) + .await?; + self.generate(jwt_claims.claims.sub) } pub fn decode( @@ -262,7 +237,8 @@ impl JwtManager { .save_revoked_access_token(&RevokedAccessToken { id: token_id.to_string(), expiry, - })?; + }) + .await?; info!("Revoked access token with ID: {token_id}"); Ok(()) } diff --git a/server/src/http/jwt/mod.rs b/server/src/http/jwt/mod.rs index 3bb564cd7..ec93d650a 100644 --- a/server/src/http/jwt/mod.rs +++ b/server/src/http/jwt/mod.rs @@ -2,5 +2,4 @@ pub mod cleaner; pub mod json_web_token; pub mod jwt_manager; pub mod middleware; -pub mod refresh_token; pub mod storage; diff --git a/server/src/http/jwt/refresh_token.rs b/server/src/http/jwt/refresh_token.rs deleted file mode 100644 index 05614ba54..000000000 --- a/server/src/http/jwt/refresh_token.rs +++ /dev/null @@ -1,73 +0,0 @@ -use crate::streaming::utils::hash; -use iggy::models::user_info::UserId; -use iggy::utils::text::as_base64; -use ring::rand::SecureRandom; -use serde::{Deserialize, Serialize}; - -const REFRESH_TOKEN_SIZE: usize = 50; - -#[derive(Debug, Serialize, Deserialize)] -pub struct RefreshToken { - #[serde(skip)] - pub token_hash: String, - pub user_id: u32, - pub expiry: u64, -} - -impl RefreshToken { - pub fn new(user_id: UserId, now: u64, expiry: u64) -> (Self, String) { - let mut buffer: [u8; REFRESH_TOKEN_SIZE] = [0; REFRESH_TOKEN_SIZE]; - let system_random = ring::rand::SystemRandom::new(); - system_random.fill(&mut buffer).unwrap(); - let token = as_base64(&buffer); - let hash = Self::hash_token(&token); - let expiry = now + expiry; - ( - Self { - token_hash: hash, - user_id, - expiry, - }, - token, - ) - } - - pub fn is_expired(&self, now: u64) -> bool { - now > self.expiry - } - - pub fn hash_token(token: &str) -> String { - hash::calculate_256(token.as_bytes()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use iggy::utils::timestamp::IggyTimestamp; - - #[test] - fn refresh_token_should_be_created_with_random_secure_value_and_hashed_successfully() { - let user_id = 1; - let now = IggyTimestamp::now().to_secs(); - let expiry = 10; - let (refresh_token, raw_token) = RefreshToken::new(user_id, now, expiry); - assert_eq!(refresh_token.user_id, user_id); - assert_eq!(refresh_token.expiry, now + expiry); - assert!(!raw_token.is_empty()); - assert_ne!(refresh_token.token_hash, raw_token); - assert_eq!( - refresh_token.token_hash, - RefreshToken::hash_token(&raw_token) - ); - } - - #[test] - fn refresh_access_token_should_be_expired_given_passed_expiry() { - let user_id = 1; - let now = IggyTimestamp::now().to_secs(); - let expiry = 1; - let (refresh_token, _) = RefreshToken::new(user_id, now, expiry); - assert!(refresh_token.is_expired(now + expiry + 1)); - } -} diff --git a/server/src/http/jwt/storage.rs b/server/src/http/jwt/storage.rs index 6c6535f20..f368f1559 100644 --- a/server/src/http/jwt/storage.rs +++ b/server/src/http/jwt/storage.rs @@ -1,195 +1,92 @@ use crate::http::jwt::json_web_token::RevokedAccessToken; -use crate::http::jwt::refresh_token::RefreshToken; +use crate::streaming::persistence::persister::Persister; +use crate::streaming::utils::file; use anyhow::Context; +use bytes::{BufMut, BytesMut}; use iggy::error::IggyError; -use sled::Db; -use std::str::from_utf8; +use std::collections::HashMap; use std::sync::Arc; -use tracing::{error, info}; - -const REVOKED_ACCESS_TOKENS_KEY_PREFIX: &str = "revoked_access_token"; -const REFRESH_TOKENS_KEY_PREFIX: &str = "refresh_token"; +use tokio::io::AsyncReadExt; +use tracing::info; #[derive(Debug)] pub struct TokenStorage { - db: Arc, + persister: Arc, + path: String, } impl TokenStorage { - pub fn new(db: Arc) -> Self { - Self { db } - } - - pub fn load_refresh_token(&self, token_hash: &str) -> Result { - let key = Self::get_refresh_token_key(token_hash); - let token_data = self - .db - .get(&key) - .with_context(|| format!("Failed to load refresh token, key: {}", key)); - if let Err(err) = token_data { - return Err(IggyError::CannotLoadResource(err)); - } - - let token_data = token_data.unwrap(); - if token_data.is_none() { - return Err(IggyError::ResourceNotFound(key)); - } - - let token_data = token_data.unwrap(); - let token_data = rmp_serde::from_slice::(&token_data) - .with_context(|| format!("Failed to deserialize refresh token, key: {}", key)); - if let Err(err) = token_data { - return Err(IggyError::CannotDeserializeResource(err)); + pub fn new(persister: Arc, path: &str) -> Self { + Self { + persister, + path: path.to_owned(), } - - let mut token_data = token_data.unwrap(); - token_data.token_hash = token_hash.to_string(); - Ok(token_data) } - pub fn load_all_refresh_tokens(&self) -> Result, IggyError> { - let key = format!("{REFRESH_TOKENS_KEY_PREFIX}:"); - let refresh_tokens: Result, IggyError> = self - .db - .scan_prefix(&key) - .map(|data| { - let (hash, value) = data - .with_context(|| { - format!( - "Failed to load refresh token, when searching by key: {}", - key - ) - }) - .map_err(IggyError::CannotLoadResource)?; - - let mut token = rmp_serde::from_slice::(&value) - .with_context(|| { - format!( - "Failed to deserialize refresh token, when searching by key: {}", - key - ) - }) - .map_err(IggyError::CannotDeserializeResource)?; - - token.token_hash = from_utf8(&hash) - .with_context(|| "Failed to convert hash to UTF-8 string") - .map_err(IggyError::CannotDeserializeResource)? - .to_string(); - Ok(token) - }) - .collect(); - - let refresh_tokens = refresh_tokens?; - if !refresh_tokens.is_empty() { - info!("Loaded {} refresh tokens", refresh_tokens.len()); + pub async fn load_all_revoked_access_tokens( + &self, + ) -> Result, IggyError> { + let file = file::open(&self.path).await; + if file.is_err() { + info!("No revoked access tokens found to load."); + return Ok(vec![]); } - Ok(refresh_tokens) - } - pub fn load_all_revoked_access_tokens(&self) -> Result, IggyError> { - let key = format!("{REVOKED_ACCESS_TOKENS_KEY_PREFIX}:"); - let revoked_tokens: Result, IggyError> = self - .db - .scan_prefix(&key) - .map(|data| { - let (_, value) = data - .with_context(|| { - format!( - "Failed to load invoked refresh token, when searching by key: {}", - key - ) - }) - .map_err(IggyError::CannotLoadResource)?; + info!("Loading revoked access tokens from: {}", self.path); + let mut file = file.unwrap(); + let file_size = file.metadata().await?.len() as usize; + let mut buffer = BytesMut::with_capacity(file_size); + buffer.put_bytes(0, file_size); + file.read_exact(&mut buffer).await?; - let token = rmp_serde::from_slice::(&value) - .with_context(|| { - format!( - "Failed to deserialize revoked access token, when searching by key: {}", - key - ) - }) - .map_err(IggyError::CannotDeserializeResource)?; - Ok(token) - }) - .collect(); + let tokens: HashMap = bincode::deserialize(&buffer) + .with_context(|| "Failed to deserialize revoked access tokens") + .map_err(IggyError::CannotDeserializeResource)?; - let revoked_tokens = revoked_tokens?; - info!("Loaded {} revoked access tokens", revoked_tokens.len()); - Ok(revoked_tokens) - } + let tokens = tokens + .into_iter() + .map(|(id, expiry)| RevokedAccessToken { id, expiry }) + .collect::>(); - pub fn save_revoked_access_token(&self, token: &RevokedAccessToken) -> Result<(), IggyError> { - let key = Self::get_revoked_token_key(&token.id); - match rmp_serde::to_vec(&token) - .with_context(|| format!("Failed to serialize revoked access token, key: {}", key)) - { - Ok(data) => { - if let Err(err) = self - .db - .insert(&key, data) - .with_context(|| "Failed to save revoked access token") - { - return Err(IggyError::CannotSaveResource(err)); - } - } - Err(err) => { - return Err(IggyError::CannotSerializeResource(err)); - } - } - Ok(()) + info!("Loaded {} revoked access tokens", tokens.len()); + Ok(tokens) } - pub fn save_refresh_token(&self, token: &RefreshToken) -> Result<(), IggyError> { - let key = Self::get_refresh_token_key(&token.token_hash); - match rmp_serde::to_vec(&token) - .with_context(|| format!("Failed to serialize refresh token, key: {}", key)) - { - Ok(data) => { - if let Err(err) = self - .db - .insert(&key, data) - .with_context(|| format!("Failed to save refresh token, key: {}", key)) - { - return Err(IggyError::CannotSaveResource(err)); - } - } - Err(err) => { - return Err(IggyError::CannotSerializeResource(err)); - } - } + pub async fn save_revoked_access_token( + &self, + token: &RevokedAccessToken, + ) -> Result<(), IggyError> { + let tokens = self.load_all_revoked_access_tokens().await?; + let mut map = tokens + .into_iter() + .map(|token| (token.id, token.expiry)) + .collect::>(); + map.insert(token.id.to_owned(), token.expiry); + let bytes = bincode::serialize(&map) + .with_context(|| "Failed to serialize revoked access tokens") + .map_err(IggyError::CannotSerializeResource)?; + self.persister.overwrite(&self.path, &bytes).await?; Ok(()) } - pub fn delete_revoked_access_token(&self, id: &str) -> Result<(), IggyError> { - let key = Self::get_revoked_token_key(id); - if let Err(err) = self - .db - .remove(&key) - .with_context(|| format!("Failed to delete revoked access token, key: {}", key)) - { - return Err(IggyError::CannotDeleteResource(err)); + pub async fn delete_revoked_access_tokens(&self, id: &[String]) -> Result<(), IggyError> { + let tokens = self.load_all_revoked_access_tokens().await?; + if tokens.is_empty() { + return Ok(()); } - Ok(()) - } - pub fn delete_refresh_token(&self, token_hash: &str) -> Result<(), IggyError> { - let key = Self::get_refresh_token_key(token_hash); - if let Err(err) = self - .db - .remove(&key) - .with_context(|| format!("Failed to delete refresh token, key: {}", key)) - { - error!("Cannot delete refresh token. Error: {err}"); - return Err(IggyError::CannotDeleteResource(err)); + let mut map = tokens + .into_iter() + .map(|token| (token.id, token.expiry)) + .collect::>(); + for id in id { + map.remove(id); } - Ok(()) - } - fn get_revoked_token_key(id: &str) -> String { - format!("{REVOKED_ACCESS_TOKENS_KEY_PREFIX}:{id}") - } - - fn get_refresh_token_key(token_hash: &str) -> String { - format!("{REFRESH_TOKENS_KEY_PREFIX}:{token_hash}") + let bytes = bincode::serialize(&map) + .with_context(|| "Failed to serialize revoked access tokens") + .map_err(IggyError::CannotSerializeResource)?; + self.persister.overwrite(&self.path, &bytes).await?; + Ok(()) } } diff --git a/server/src/http/mapper.rs b/server/src/http/mapper.rs index b09f690ab..e14a6d39f 100644 --- a/server/src/http/mapper.rs +++ b/server/src/http/mapper.rs @@ -1,4 +1,4 @@ -use crate::http::jwt::json_web_token::GeneratedTokens; +use crate::http::jwt::json_web_token::GeneratedToken; use crate::streaming::clients::client_manager::Client; use crate::streaming::personal_access_tokens::personal_access_token::PersonalAccessToken; use crate::streaming::streams::stream::Stream; @@ -9,7 +9,7 @@ use iggy::locking::IggySharedMut; use iggy::locking::IggySharedMutFn; use iggy::models::client_info::ConsumerGroupInfo; use iggy::models::consumer_group::{ConsumerGroupDetails, ConsumerGroupMember}; -use iggy::models::identity_info::{IdentityInfo, IdentityTokens, TokenInfo}; +use iggy::models::identity_info::{IdentityInfo, TokenInfo}; use iggy::models::personal_access_token::PersonalAccessTokenInfo; use iggy::models::stream::StreamDetails; use iggy::models::topic::TopicDetails; @@ -111,7 +111,7 @@ pub fn map_user(user: &User) -> UserInfoDetails { } } -pub fn map_users(users: &[User]) -> Vec { +pub fn map_users(users: &[&User]) -> Vec { let mut users_data = Vec::with_capacity(users.len()); for user in users { let user = UserInfo { @@ -127,13 +127,13 @@ pub fn map_users(users: &[User]) -> Vec { } pub fn map_personal_access_tokens( - personal_access_tokens: &[PersonalAccessToken], + personal_access_tokens: &[&PersonalAccessToken], ) -> Vec { let mut personal_access_tokens_data = Vec::with_capacity(personal_access_tokens.len()); for personal_access_token in personal_access_tokens { let personal_access_token = PersonalAccessTokenInfo { name: personal_access_token.name.clone(), - expiry: personal_access_token.expiry, + expiry_at: personal_access_token.expiry_at, }; personal_access_tokens_data.push(personal_access_token); } @@ -220,20 +220,12 @@ pub async fn map_consumer_group(consumer_group: &ConsumerGroup) -> ConsumerGroup consumer_group_details } -pub fn map_generated_tokens_to_identity_info(tokens: GeneratedTokens) -> IdentityInfo { +pub fn map_generated_access_token_to_identity_info(token: GeneratedToken) -> IdentityInfo { IdentityInfo { - user_id: tokens.user_id, - tokens: Some({ - IdentityTokens { - access_token: TokenInfo { - token: tokens.access_token, - expiry: tokens.access_token_expiry, - }, - refresh_token: TokenInfo { - token: tokens.refresh_token, - expiry: tokens.refresh_token_expiry, - }, - } + user_id: token.user_id, + access_token: Some(TokenInfo { + token: token.access_token, + expiry: token.access_token_expiry, }), } } diff --git a/server/src/http/partitions.rs b/server/src/http/partitions.rs index e50649ade..f4a83ceb8 100644 --- a/server/src/http/partitions.rs +++ b/server/src/http/partitions.rs @@ -6,6 +6,8 @@ use axum::extract::{Path, Query, State}; use axum::http::StatusCode; use axum::routing::post; use axum::{Extension, Json, Router}; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::{CREATE_PARTITIONS_CODE, DELETE_PARTITIONS_CODE}; use iggy::identifier::Identifier; use iggy::partitions::create_partitions::CreatePartitions; use iggy::partitions::delete_partitions::DeletePartitions; @@ -39,6 +41,15 @@ async fn create_partitions( command.partitions_count, ) .await?; + system + .state + .apply( + CREATE_PARTITIONS_CODE, + identity.user_id, + &command.as_bytes(), + None, + ) + .await?; Ok(StatusCode::CREATED) } @@ -55,10 +66,24 @@ async fn delete_partitions( system .delete_partitions( &Session::stateless(identity.user_id, identity.ip_address), - &query.stream_id, - &query.topic_id, + &query.stream_id.clone(), + &query.topic_id.clone(), query.partitions_count, ) .await?; + system + .state + .apply( + DELETE_PARTITIONS_CODE, + identity.user_id, + &DeletePartitions { + stream_id: query.stream_id.clone(), + topic_id: query.topic_id.clone(), + partitions_count: query.partitions_count, + } + .as_bytes(), + None, + ) + .await?; Ok(StatusCode::NO_CONTENT) } diff --git a/server/src/http/personal_access_tokens.rs b/server/src/http/personal_access_tokens.rs index 674fe512e..820967aca 100644 --- a/server/src/http/personal_access_tokens.rs +++ b/server/src/http/personal_access_tokens.rs @@ -1,16 +1,21 @@ use crate::http::error::CustomError; use crate::http::jwt::json_web_token::Identity; use crate::http::mapper; -use crate::http::mapper::map_generated_tokens_to_identity_info; +use crate::http::mapper::map_generated_access_token_to_identity_info; use crate::http::shared::AppState; +use crate::state::models::CreatePersonalAccessTokenWithHash; +use crate::streaming::personal_access_tokens::personal_access_token::PersonalAccessToken; use crate::streaming::session::Session; use axum::extract::{Path, State}; use axum::http::StatusCode; use axum::routing::{delete, get, post}; use axum::{Extension, Json, Router}; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::{CREATE_PERSONAL_ACCESS_TOKEN_CODE, DELETE_PERSONAL_ACCESS_TOKEN_CODE}; use iggy::models::identity_info::IdentityInfo; use iggy::models::personal_access_token::{PersonalAccessTokenInfo, RawPersonalAccessToken}; use iggy::personal_access_tokens::create_personal_access_token::CreatePersonalAccessToken; +use iggy::personal_access_tokens::delete_personal_access_token::DeletePersonalAccessToken; use iggy::personal_access_tokens::login_with_personal_access_token::LoginWithPersonalAccessToken; use iggy::validatable::Validatable; use std::sync::Arc; @@ -50,7 +55,7 @@ async fn create_personal_access_token( Json(command): Json, ) -> Result, CustomError> { command.validate()?; - let system = state.system.read(); + let mut system = state.system.write(); let token = system .create_personal_access_token( &Session::stateless(identity.user_id, identity.ip_address), @@ -58,6 +63,24 @@ async fn create_personal_access_token( command.expiry, ) .await?; + + let token_hash = PersonalAccessToken::hash_token(&token); + system + .state + .apply( + CREATE_PERSONAL_ACCESS_TOKEN_CODE, + identity.user_id, + &CreatePersonalAccessTokenWithHash { + command: CreatePersonalAccessToken { + name: command.name.to_owned(), + expiry: command.expiry, + }, + hash: token_hash, + } + .as_bytes(), + None, + ) + .await?; Ok(Json(RawPersonalAccessToken { token })) } @@ -66,13 +89,22 @@ async fn delete_personal_access_token( Extension(identity): Extension, Path(name): Path, ) -> Result { - let system = state.system.read(); + let mut system = state.system.write(); system .delete_personal_access_token( &Session::stateless(identity.user_id, identity.ip_address), &name, ) .await?; + system + .state + .apply( + DELETE_PERSONAL_ACCESS_TOKEN_CODE, + identity.user_id, + &DeletePersonalAccessToken { name }.as_bytes(), + None, + ) + .await?; Ok(StatusCode::NO_CONTENT) } @@ -86,5 +118,5 @@ async fn login_with_personal_access_token( .login_with_personal_access_token(&command.token, None) .await?; let tokens = state.jwt_manager.generate(user.id)?; - Ok(Json(map_generated_tokens_to_identity_info(tokens))) + Ok(Json(map_generated_access_token_to_identity_info(tokens))) } diff --git a/server/src/http/streams.rs b/server/src/http/streams.rs index dee34c465..eaaefb07a 100644 --- a/server/src/http/streams.rs +++ b/server/src/http/streams.rs @@ -7,9 +7,15 @@ use axum::extract::{Path, State}; use axum::http::StatusCode; use axum::routing::{delete, get}; use axum::{Extension, Json, Router}; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::{ + CREATE_STREAM_CODE, DELETE_STREAM_CODE, PURGE_STREAM_CODE, UPDATE_STREAM_CODE, +}; use iggy::identifier::Identifier; use iggy::models::stream::{Stream, StreamDetails}; use iggy::streams::create_stream::CreateStream; +use iggy::streams::delete_stream::DeleteStream; +use iggy::streams::purge_stream::PurgeStream; use iggy::streams::update_stream::UpdateStream; use iggy::validatable::Validatable; use std::sync::Arc; @@ -65,6 +71,15 @@ async fn create_stream( &command.name, ) .await?; + system + .state + .apply( + CREATE_STREAM_CODE, + identity.user_id, + &command.as_bytes(), + None, + ) + .await?; Ok(StatusCode::CREATED) } @@ -84,6 +99,15 @@ async fn update_stream( &command.name, ) .await?; + system + .state + .apply( + UPDATE_STREAM_CODE, + identity.user_id, + &command.as_bytes(), + None, + ) + .await?; Ok(StatusCode::NO_CONTENT) } @@ -100,6 +124,15 @@ async fn delete_stream( &stream_id, ) .await?; + system + .state + .apply( + DELETE_STREAM_CODE, + identity.user_id, + &DeleteStream { stream_id }.as_bytes(), + None, + ) + .await?; Ok(StatusCode::NO_CONTENT) } @@ -116,5 +149,14 @@ async fn purge_stream( &stream_id, ) .await?; + system + .state + .apply( + PURGE_STREAM_CODE, + identity.user_id, + &PurgeStream { stream_id }.as_bytes(), + None, + ) + .await?; Ok(StatusCode::NO_CONTENT) } diff --git a/server/src/http/topics.rs b/server/src/http/topics.rs index 0e935d480..c80ac4ceb 100644 --- a/server/src/http/topics.rs +++ b/server/src/http/topics.rs @@ -7,9 +7,13 @@ use axum::extract::{Path, State}; use axum::http::StatusCode; use axum::routing::{delete, get}; use axum::{Extension, Json, Router}; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::{CREATE_TOPIC_CODE, DELETE_TOPIC_CODE, PURGE_TOPIC_CODE, UPDATE_TOPIC_CODE}; use iggy::identifier::Identifier; use iggy::models::topic::{Topic, TopicDetails}; use iggy::topics::create_topic::CreateTopic; +use iggy::topics::delete_topic::DeleteTopic; +use iggy::topics::purge_topic::PurgeTopic; use iggy::topics::update_topic::UpdateTopic; use iggy::validatable::Validatable; use std::sync::Arc; @@ -85,6 +89,15 @@ async fn create_topic( command.replication_factor, ) .await?; + system + .state + .apply( + CREATE_TOPIC_CODE, + identity.user_id, + &command.as_bytes(), + None, + ) + .await?; Ok(StatusCode::CREATED) } @@ -110,6 +123,15 @@ async fn update_topic( command.replication_factor, ) .await?; + system + .state + .apply( + UPDATE_TOPIC_CODE, + identity.user_id, + &command.as_bytes(), + None, + ) + .await?; Ok(StatusCode::NO_CONTENT) } @@ -128,6 +150,19 @@ async fn delete_topic( &topic_id, ) .await?; + system + .state + .apply( + DELETE_TOPIC_CODE, + identity.user_id, + &DeleteTopic { + stream_id, + topic_id, + } + .as_bytes(), + None, + ) + .await?; Ok(StatusCode::NO_CONTENT) } @@ -146,5 +181,18 @@ async fn purge_topic( &topic_id, ) .await?; + system + .state + .apply( + PURGE_TOPIC_CODE, + identity.user_id, + &PurgeTopic { + stream_id, + topic_id, + } + .as_bytes(), + None, + ) + .await?; Ok(StatusCode::NO_CONTENT) } diff --git a/server/src/http/users.rs b/server/src/http/users.rs index 473fc2f74..32017c802 100644 --- a/server/src/http/users.rs +++ b/server/src/http/users.rs @@ -1,18 +1,25 @@ use crate::http::error::CustomError; use crate::http::jwt::json_web_token::Identity; use crate::http::mapper; -use crate::http::mapper::map_generated_tokens_to_identity_info; +use crate::http::mapper::map_generated_access_token_to_identity_info; use crate::http::shared::AppState; use crate::streaming::session::Session; +use crate::streaming::utils::crypto; use axum::extract::{Path, State}; use axum::http::StatusCode; use axum::routing::{delete, get, post, put}; use axum::{Extension, Json, Router}; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::{ + CHANGE_PASSWORD_CODE, CREATE_USER_CODE, DELETE_USER_CODE, UPDATE_PERMISSIONS_CODE, + UPDATE_USER_CODE, +}; use iggy::identifier::Identifier; use iggy::models::identity_info::IdentityInfo; use iggy::models::user_info::{UserInfo, UserInfoDetails}; use iggy::users::change_password::ChangePassword; use iggy::users::create_user::CreateUser; +use iggy::users::delete_user::DeleteUser; use iggy::users::login_user::LoginUser; use iggy::users::update_permissions::UpdatePermissions; use iggy::users::update_user::UpdateUser; @@ -42,13 +49,11 @@ async fn get_user( ) -> Result, CustomError> { let user_id = Identifier::from_str_value(&user_id)?; let system = state.system.read(); - let user = system - .find_user( - &Session::stateless(identity.user_id, identity.ip_address), - &user_id, - ) - .await?; - let user = mapper::map_user(&user); + let user = system.find_user( + &Session::stateless(identity.user_id, identity.ip_address), + &user_id, + )?; + let user = mapper::map_user(user); Ok(Json(user)) } @@ -80,6 +85,23 @@ async fn create_user( command.permissions.clone(), ) .await?; + + // For the security of the system, we hash the password before storing it in metadata. + system + .state + .apply( + CREATE_USER_CODE, + identity.user_id, + &CreateUser { + username: command.username.to_owned(), + password: crypto::hash_password(&command.password), + status: command.status, + permissions: command.permissions.clone(), + } + .as_bytes(), + None, + ) + .await?; Ok(StatusCode::NO_CONTENT) } @@ -91,7 +113,8 @@ async fn update_user( ) -> Result { command.user_id = Identifier::from_str_value(&user_id)?; command.validate()?; - let system = state.system.read(); + let bytes = command.as_bytes(); + let mut system = state.system.write(); system .update_user( &Session::stateless(identity.user_id, identity.ip_address), @@ -100,6 +123,10 @@ async fn update_user( command.status, ) .await?; + system + .state + .apply(UPDATE_USER_CODE, identity.user_id, &bytes, None) + .await?; Ok(StatusCode::NO_CONTENT) } @@ -111,6 +138,7 @@ async fn update_permissions( ) -> Result { command.user_id = Identifier::from_str_value(&user_id)?; command.validate()?; + let bytes = command.as_bytes(); let mut system = state.system.write(); system .update_permissions( @@ -119,6 +147,10 @@ async fn update_permissions( command.permissions, ) .await?; + system + .state + .apply(UPDATE_PERMISSIONS_CODE, identity.user_id, &bytes, None) + .await?; Ok(StatusCode::NO_CONTENT) } @@ -130,7 +162,7 @@ async fn change_password( ) -> Result { command.user_id = Identifier::from_str_value(&user_id)?; command.validate()?; - let system = state.system.read(); + let mut system = state.system.write(); system .change_password( &Session::stateless(identity.user_id, identity.ip_address), @@ -139,6 +171,21 @@ async fn change_password( &command.new_password, ) .await?; + // For the security of the system, we hash the password before storing it in metadata. + system + .state + .apply( + CHANGE_PASSWORD_CODE, + identity.user_id, + &ChangePassword { + user_id: command.user_id.to_owned(), + current_password: "".into(), + new_password: crypto::hash_password(&command.new_password), + } + .as_bytes(), + None, + ) + .await?; Ok(StatusCode::NO_CONTENT) } @@ -155,6 +202,15 @@ async fn delete_user( &user_id, ) .await?; + system + .state + .apply( + DELETE_USER_CODE, + identity.user_id, + &DeleteUser { user_id }.as_bytes(), + None, + ) + .await?; Ok(StatusCode::NO_CONTENT) } @@ -168,7 +224,7 @@ async fn login_user( .login_user(&command.username, &command.password, None) .await?; let tokens = state.jwt_manager.generate(user.id)?; - Ok(Json(map_generated_tokens_to_identity_info(tokens))) + Ok(Json(map_generated_access_token_to_identity_info(tokens))) } async fn logout_user( @@ -190,11 +246,11 @@ async fn refresh_token( State(state): State>, Json(command): Json, ) -> Result, CustomError> { - let tokens = state.jwt_manager.refresh_token(&command.refresh_token)?; - Ok(Json(map_generated_tokens_to_identity_info(tokens))) + let token = state.jwt_manager.refresh_token(&command.token).await?; + Ok(Json(map_generated_access_token_to_identity_info(token))) } #[derive(Debug, Deserialize)] struct RefreshToken { - refresh_token: String, + token: String, } diff --git a/server/src/lib.rs b/server/src/lib.rs index b4be2d8f5..9af0acfcb 100644 --- a/server/src/lib.rs +++ b/server/src/lib.rs @@ -17,5 +17,7 @@ pub mod http; pub mod log; pub mod quic; pub mod server_error; +pub mod state; pub mod streaming; pub mod tcp; +pub mod versioning; diff --git a/server/src/main.rs b/server/src/main.rs index 30b54b5e0..2dacb689f 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -42,7 +42,6 @@ async fn main() -> Result<(), ServerError> { let system = SharedSystem::new(System::new( config.system.clone(), - None, config.personal_access_token, )); diff --git a/server/src/state/file.rs b/server/src/state/file.rs new file mode 100644 index 000000000..f23c7cd0e --- /dev/null +++ b/server/src/state/file.rs @@ -0,0 +1,180 @@ +use crate::state::{State, StateEntry}; +use crate::streaming::persistence::persister::Persister; +use crate::streaming::utils::file; +use crate::versioning::SemanticVersion; +use async_trait::async_trait; +use bytes::{BufMut, Bytes, BytesMut}; +use iggy::bytes_serializable::BytesSerializable; +use iggy::error::IggyError; +use iggy::utils::byte_size::IggyByteSize; +use iggy::utils::timestamp::IggyTimestamp; +use log::debug; +use std::fmt::Debug; +use std::path::Path; +use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; +use std::sync::Arc; +use tokio::io::{AsyncReadExt, BufReader}; +use tracing::info; + +const BUF_READER_CAPACITY_BYTES: usize = 512 * 1000; + +#[derive(Debug)] +pub struct FileState { + current_index: AtomicU64, + entries_count: AtomicU64, + current_leader: AtomicU32, + term: AtomicU64, + version: u32, + path: String, + persister: Arc, +} + +impl FileState { + pub fn new(path: &str, version: &SemanticVersion, persister: Arc) -> Self { + Self { + current_index: AtomicU64::new(0), + entries_count: AtomicU64::new(0), + current_leader: AtomicU32::new(0), + term: AtomicU64::new(0), + path: path.into(), + persister, + version: version.get_numeric_version().expect("Invalid version"), + } + } + + pub fn current_index(&self) -> u64 { + self.current_index.load(Ordering::SeqCst) + } + + pub fn entries_count(&self) -> u64 { + self.entries_count.load(Ordering::SeqCst) + } + + pub fn term(&self) -> u64 { + self.term.load(Ordering::SeqCst) + } +} + +#[async_trait] +impl State for FileState { + async fn init(&self) -> Result, IggyError> { + if !Path::new(&self.path).exists() { + info!("State file does not exist, creating a new one"); + self.persister.overwrite(&self.path, &[]).await?; + } + + let entries = self.load_entries().await?; + let entries_count = entries.len() as u64; + self.entries_count.store(entries_count, Ordering::SeqCst); + if entries_count == 0 { + self.current_index.store(0, Ordering::SeqCst); + } else { + self.current_index + .store(entries_count - 1, Ordering::SeqCst); + } + + return Ok(entries); + } + + async fn load_entries(&self) -> Result, IggyError> { + if !Path::new(&self.path).exists() { + return Err(IggyError::StateFileNotFound); + } + + let file = file::open(&self.path).await?; + let file_size = file.metadata().await?.len(); + if file_size == 0 { + info!("State file is empty"); + return Ok(Vec::new()); + } + + info!( + "Loading state, file size: {}", + IggyByteSize::from(file_size).as_human_string() + ); + let mut entries = Vec::new(); + let mut total_size: u64 = 0; + let mut reader = BufReader::with_capacity(BUF_READER_CAPACITY_BYTES, file); + loop { + let index = reader.read_u64_le().await?; + let term = reader.read_u64_le().await?; + let leader_id = reader.read_u32_le().await?; + let version = reader.read_u32_le().await?; + let flags = reader.read_u64_le().await?; + let timestamp = IggyTimestamp::from(reader.read_u64_le().await?); + let user_id = reader.read_u32_le().await?; + let code = reader.read_u32_le().await?; + let payload_length = reader.read_u32_le().await? as usize; + let mut payload = BytesMut::with_capacity(payload_length); + payload.put_bytes(0, payload_length); + reader.read_exact(&mut payload).await?; + let context_length = reader.read_u32_le().await? as usize; + let mut context = BytesMut::with_capacity(context_length); + context.put_bytes(0, context_length); + reader.read_exact(&mut context).await?; + let entry = StateEntry { + index, + term, + leader_id, + version, + flags, + timestamp, + user_id, + code, + payload: payload.freeze(), + context: context.freeze(), + }; + debug!("Read state entry: {entry}"); + entries.push(entry); + total_size += 8 + + 8 + + 4 + + 4 + + 8 + + 8 + + 4 + + 4 + + 4 + + payload_length as u64 + + 4 + + context_length as u64; + if total_size == file_size { + break; + } + } + + info!("Loaded {} state entries", entries.len()); + Ok(entries) + } + + async fn apply( + &self, + code: u32, + user_id: u32, + payload: &[u8], + context: Option<&[u8]>, + ) -> Result<(), IggyError> { + debug!("Applying state entry with code: {code}, user ID: {user_id}"); + let entry = StateEntry { + index: if self.entries_count.load(Ordering::SeqCst) == 0 { + 0 + } else { + self.current_index.fetch_add(1, Ordering::SeqCst) + 1 + }, + term: self.term.load(Ordering::SeqCst), + leader_id: self.current_leader.load(Ordering::SeqCst), + version: self.version, + flags: 0, + timestamp: IggyTimestamp::now(), + user_id, + code, + payload: Bytes::copy_from_slice(payload), + context: context.map_or(Bytes::new(), Bytes::copy_from_slice), + }; + + self.entries_count.fetch_add(1, Ordering::SeqCst); + self.persister.append(&self.path, &entry.as_bytes()).await?; + debug!("Applied state entry with code: {code}, user ID: {user_id}, {entry}",); + Ok(()) + } +} diff --git a/server/src/state/mod.rs b/server/src/state/mod.rs new file mode 100644 index 000000000..7c2915156 --- /dev/null +++ b/server/src/state/mod.rs @@ -0,0 +1,122 @@ +use async_trait::async_trait; +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command; +use iggy::error::IggyError; +use iggy::utils::byte_size::IggyByteSize; +use iggy::utils::timestamp::IggyTimestamp; +use std::fmt::{Debug, Display, Formatter}; + +pub mod file; +pub mod models; +pub mod system; + +#[async_trait] +pub trait State: Send + Sync + Debug { + async fn init(&self) -> Result, IggyError>; + async fn load_entries(&self) -> Result, IggyError>; + async fn apply( + &self, + code: u32, + user_id: u32, + payload: &[u8], + context: Option<&[u8]>, + ) -> Result<(), IggyError>; +} + +/// State entry in the log +/// - `index` - Index (operation number) of the entry in the log +/// - `term` - Election term (view number) for replication +/// - `leader_id` - Leader ID for replication +/// - `version` - Server version based on semver as number e.g. 1.234.567 -> 1234567 +/// - `flags` - Reserved for future use +/// - `timestamp` - Timestamp when the command was issued +/// - `user_id` - User ID of the user who issued the command +/// - `code` - Command code +/// - `payload` - Payload of the command +/// - `context` - Optional context e.g. used to enrich the payload with additional data +#[derive(Debug)] +pub struct StateEntry { + pub index: u64, + pub term: u64, + pub leader_id: u32, + pub version: u32, + pub flags: u64, + pub timestamp: IggyTimestamp, + pub user_id: u32, + pub code: u32, + pub payload: Bytes, + pub context: Bytes, +} + +impl Display for StateEntry { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "StateEntry {{ index: {}, term: {}, leader ID: {}, version: {}, flags: {}, timestamp: {}, user ID: {}, code: {}, name: {}, size: {} }}", + self.index, + self.term, + self.leader_id, + self.version, + self.flags, + self.timestamp, + self.user_id, + self.code, + command::get_name_from_code(self.code).unwrap_or("invalid_command"), + IggyByteSize::from(self.payload.len() as u64).as_human_string() + ) + } +} + +impl BytesSerializable for StateEntry { + fn as_bytes(&self) -> Bytes { + let mut bytes = BytesMut::with_capacity( + 8 + 8 + 4 + 4 + 8 + 8 + 4 + 4 + 4 + self.payload.len() + 4 + self.context.len(), + ); + bytes.put_u64_le(self.index); + bytes.put_u64_le(self.term); + bytes.put_u32_le(self.leader_id); + bytes.put_u32_le(self.version); + bytes.put_u64_le(self.flags); + bytes.put_u64_le(self.timestamp.to_micros()); + bytes.put_u32_le(self.user_id); + bytes.put_u32_le(self.code); + bytes.put_u32_le(self.payload.len() as u32); + bytes.put_slice(&self.payload); + bytes.put_u32_le(self.context.len() as u32); + bytes.put_slice(&self.context); + bytes.freeze() + } + + fn from_bytes(bytes: Bytes) -> Result + where + Self: Sized, + { + let index = bytes.slice(0..8).get_u64_le(); + let term = bytes.slice(8..16).get_u64_le(); + let leader_id = bytes.slice(16..20).get_u32_le(); + let version = bytes.slice(20..24).get_u32_le(); + let flags = bytes.slice(24..32).get_u64_le(); + let timestamp = IggyTimestamp::from(bytes.slice(32..40).get_u64_le()); + let user_id = bytes.slice(40..44).get_u32_le(); + let code = bytes.slice(44..48).get_u32_le(); + let payload_length = bytes.slice(48..52).get_u32_le() as usize; + let payload = bytes.slice(52..52 + payload_length); + let context_length = bytes + .slice(52 + payload_length..56 + payload_length) + .get_u32_le() as usize; + let context = bytes.slice(56 + payload_length..56 + payload_length + context_length); + Ok(StateEntry { + index, + term, + leader_id, + version, + flags, + timestamp, + user_id, + code, + payload, + context, + }) + } +} diff --git a/server/src/state/models.rs b/server/src/state/models.rs new file mode 100644 index 000000000..485db021d --- /dev/null +++ b/server/src/state/models.rs @@ -0,0 +1,40 @@ +use bytes::{BufMut, Bytes, BytesMut}; +use iggy::bytes_serializable::BytesSerializable; +use iggy::error::IggyError; +use iggy::personal_access_tokens::create_personal_access_token::CreatePersonalAccessToken; +use serde::{Deserialize, Serialize}; +use std::str::from_utf8; + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +pub struct CreatePersonalAccessTokenWithHash { + pub command: CreatePersonalAccessToken, + pub hash: String, +} + +impl BytesSerializable for CreatePersonalAccessTokenWithHash { + fn as_bytes(&self) -> Bytes { + let mut bytes = BytesMut::new(); + let command_bytes = self.command.as_bytes(); + bytes.put_u32_le(command_bytes.len() as u32); + bytes.put_slice(&command_bytes); + bytes.put_u32_le(self.hash.len() as u32); + bytes.put_slice(self.hash.as_bytes()); + bytes.freeze() + } + + fn from_bytes(bytes: Bytes) -> Result + where + Self: Sized, + { + let mut position = 0; + let command_length = u32::from_le_bytes(bytes[position..position + 4].try_into()?); + position += 4; + let command_bytes = bytes.slice(position..position + command_length as usize); + position += command_length as usize; + let command = CreatePersonalAccessToken::from_bytes(command_bytes)?; + let hash_length = u32::from_le_bytes(bytes[position..position + 4].try_into()?); + position += 4; + let hash = from_utf8(&bytes[position..position + hash_length as usize])?.to_string(); + Ok(Self { command, hash }) + } +} diff --git a/server/src/state/system.rs b/server/src/state/system.rs new file mode 100644 index 000000000..b0b5d4acd --- /dev/null +++ b/server/src/state/system.rs @@ -0,0 +1,529 @@ +use crate::state::models::CreatePersonalAccessTokenWithHash; +use crate::state::StateEntry; +use crate::streaming::personal_access_tokens::personal_access_token::PersonalAccessToken; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::*; +use iggy::compression::compression_algorithm::CompressionAlgorithm; +use iggy::consumer_groups::create_consumer_group::CreateConsumerGroup; +use iggy::consumer_groups::delete_consumer_group::DeleteConsumerGroup; +use iggy::error::IggyError; +use iggy::identifier::{IdKind, Identifier}; +use iggy::models::permissions::Permissions; +use iggy::models::user_status::UserStatus; +use iggy::partitions::create_partitions::CreatePartitions; +use iggy::partitions::delete_partitions::DeletePartitions; +use iggy::personal_access_tokens::delete_personal_access_token::DeletePersonalAccessToken; +use iggy::streams::create_stream::CreateStream; +use iggy::streams::delete_stream::DeleteStream; +use iggy::streams::update_stream::UpdateStream; +use iggy::topics::create_topic::CreateTopic; +use iggy::topics::delete_topic::DeleteTopic; +use iggy::topics::update_topic::UpdateTopic; +use iggy::users::change_password::ChangePassword; +use iggy::users::create_user::CreateUser; +use iggy::users::delete_user::DeleteUser; +use iggy::users::update_permissions::UpdatePermissions; +use iggy::users::update_user::UpdateUser; +use iggy::utils::expiry::IggyExpiry; +use iggy::utils::timestamp::IggyTimestamp; +use iggy::utils::topic_size::MaxTopicSize; +use std::collections::HashMap; +use std::fmt::Display; +use tracing::{debug, error}; + +#[derive(Debug)] +pub struct SystemState { + pub streams: HashMap, + pub users: HashMap, +} + +#[derive(Debug)] +pub struct StreamState { + pub id: u32, + pub name: String, + pub created_at: IggyTimestamp, + pub topics: HashMap, + pub current_topic_id: u32, +} + +#[derive(Debug)] +pub struct TopicState { + pub id: u32, + pub name: String, + pub partitions: HashMap, + pub consumer_groups: HashMap, + pub compression_algorithm: CompressionAlgorithm, + pub message_expiry: IggyExpiry, + pub max_topic_size: MaxTopicSize, + pub replication_factor: Option, + pub created_at: IggyTimestamp, + pub current_consumer_group_id: u32, +} + +#[derive(Debug)] +pub struct PartitionState { + pub id: u32, + pub created_at: IggyTimestamp, +} + +#[derive(Debug)] +pub struct PersonalAccessTokenState { + pub name: String, + pub token_hash: String, + pub expiry_at: Option, +} + +#[derive(Debug)] +pub struct UserState { + pub id: u32, + pub username: String, + pub password_hash: String, + pub status: UserStatus, + pub permissions: Option, + pub personal_access_tokens: HashMap, +} + +#[derive(Debug)] +pub struct ConsumerGroupState { + pub id: u32, + pub name: String, +} + +// TODO: Consider handling stream and topic purge +impl SystemState { + pub async fn init(entries: Vec) -> Result { + let mut streams = HashMap::new(); + let mut users = HashMap::new(); + let mut current_stream_id = 0; + let mut current_user_id = 0; + for entry in entries { + debug!( + "Processing state entry with code: {}, name: {}", + entry.code, + get_name_from_code(entry.code).unwrap_or("invalid_command") + ); + match entry.code { + CREATE_STREAM_CODE => { + let command = CreateStream::from_bytes(entry.payload)?; + let stream_id = command.stream_id.unwrap_or_else(|| { + current_stream_id += 1; + current_stream_id + }); + let stream = StreamState { + id: stream_id, + name: command.name.clone(), + topics: HashMap::new(), + current_topic_id: 0, + created_at: entry.timestamp, + }; + streams.insert(stream.id, stream); + } + UPDATE_STREAM_CODE => { + let command = UpdateStream::from_bytes(entry.payload)?; + let stream_id = find_stream_id(&streams, &command.stream_id); + let stream = streams + .get_mut(&stream_id) + .unwrap_or_else(|| panic!("{}", format!("Stream: {stream_id} not found"))); + stream.name = command.name; + } + DELETE_STREAM_CODE => { + let command = DeleteStream::from_bytes(entry.payload)?; + let stream_id = find_stream_id(&streams, &command.stream_id); + streams.remove(&stream_id); + } + CREATE_TOPIC_CODE => { + let command = CreateTopic::from_bytes(entry.payload)?; + let stream_id = find_stream_id(&streams, &command.stream_id); + let stream = streams + .get_mut(&stream_id) + .unwrap_or_else(|| panic!("{}", format!("Stream: {stream_id} not found"))); + let topic_id = command.topic_id.unwrap_or_else(|| { + stream.current_topic_id += 1; + stream.current_topic_id + }); + let topic = TopicState { + id: topic_id, + name: command.name, + consumer_groups: HashMap::new(), + current_consumer_group_id: 0, + compression_algorithm: command.compression_algorithm, + message_expiry: command.message_expiry, + max_topic_size: command.max_topic_size, + replication_factor: command.replication_factor, + created_at: entry.timestamp, + partitions: if command.partitions_count > 0 { + let mut partitions = HashMap::new(); + for i in 1..=command.partitions_count { + partitions.insert( + i, + PartitionState { + id: i, + created_at: entry.timestamp, + }, + ); + } + partitions + } else { + HashMap::new() + }, + }; + stream.topics.insert(topic.id, topic); + } + UPDATE_TOPIC_CODE => { + let command = UpdateTopic::from_bytes(entry.payload)?; + let stream_id = find_stream_id(&streams, &command.stream_id); + let stream = streams + .get_mut(&stream_id) + .unwrap_or_else(|| panic!("{}", format!("Stream: {stream_id} not found"))); + let topic_id = find_topic_id(&stream.topics, &command.topic_id); + let topic = stream + .topics + .get_mut(&topic_id) + .unwrap_or_else(|| panic!("{}", format!("Topic: {topic_id} not found"))); + topic.name = command.name; + topic.compression_algorithm = command.compression_algorithm; + topic.message_expiry = command.message_expiry; + topic.max_topic_size = command.max_topic_size; + topic.replication_factor = command.replication_factor; + } + DELETE_TOPIC_CODE => { + let command = DeleteTopic::from_bytes(entry.payload)?; + let stream_id = find_stream_id(&streams, &command.stream_id); + let stream = streams + .get_mut(&stream_id) + .unwrap_or_else(|| panic!("{}", format!("Stream: {stream_id} not found"))); + let topic_id = find_topic_id(&stream.topics, &command.topic_id); + stream.topics.remove(&topic_id); + } + CREATE_PARTITIONS_CODE => { + let command = CreatePartitions::from_bytes(entry.payload)?; + let stream_id = find_stream_id(&streams, &command.stream_id); + let stream = streams + .get_mut(&stream_id) + .unwrap_or_else(|| panic!("{}", format!("Stream: {stream_id} not found"))); + let topic_id = find_topic_id(&stream.topics, &command.topic_id); + let topic = stream + .topics + .get_mut(&topic_id) + .unwrap_or_else(|| panic!("{}", format!("Topic: {topic_id} not found"))); + let last_partition_id = if topic.partitions.is_empty() { + 0 + } else { + topic + .partitions + .values() + .map(|p| p.id) + .max() + .unwrap_or_else(|| panic!("No partition found")) + }; + for i in 1..=command.partitions_count { + topic.partitions.insert( + last_partition_id + i, + PartitionState { + id: last_partition_id + i, + created_at: entry.timestamp, + }, + ); + } + } + DELETE_PARTITIONS_CODE => { + let command = DeletePartitions::from_bytes(entry.payload)?; + let stream_id = find_stream_id(&streams, &command.stream_id); + let stream = streams + .get_mut(&stream_id) + .unwrap_or_else(|| panic!("{}", format!("Stream: {stream_id} not found"))); + let topic_id = find_topic_id(&stream.topics, &command.topic_id); + let topic = stream + .topics + .get_mut(&topic_id) + .unwrap_or_else(|| panic!("{}", format!("Topic: {topic_id} not found"))); + if topic.partitions.is_empty() { + continue; + } + + let last_partition_id = topic + .partitions + .values() + .map(|p| p.id) + .max() + .unwrap_or_else(|| panic!("No partition found")); + for i in 0..command.partitions_count { + topic.partitions.remove(&(last_partition_id - i)); + } + } + CREATE_CONSUMER_GROUP_CODE => { + let command = CreateConsumerGroup::from_bytes(entry.payload)?; + let stream_id = find_stream_id(&streams, &command.stream_id); + let stream = streams + .get_mut(&stream_id) + .unwrap_or_else(|| panic!("{}", format!("Stream: {stream_id} not found"))); + let topic_id = find_topic_id(&stream.topics, &command.topic_id); + let topic = stream + .topics + .get_mut(&topic_id) + .unwrap_or_else(|| panic!("{}", format!("Topic: {topic_id} not found"))); + let consumer_group_id = command.group_id.unwrap_or_else(|| { + topic.current_consumer_group_id += 1; + topic.current_consumer_group_id + }); + let consumer_group = ConsumerGroupState { + id: consumer_group_id, + name: command.name, + }; + topic + .consumer_groups + .insert(consumer_group.id, consumer_group); + } + DELETE_CONSUMER_GROUP_CODE => { + let command = DeleteConsumerGroup::from_bytes(entry.payload)?; + let stream_id = find_stream_id(&streams, &command.stream_id); + let stream = streams + .get_mut(&stream_id) + .unwrap_or_else(|| panic!("{}", format!("Stream: {stream_id} not found"))); + let topic_id = find_topic_id(&stream.topics, &command.topic_id); + let topic = stream + .topics + .get_mut(&topic_id) + .unwrap_or_else(|| panic!("{}", format!("Topic: {topic_id} not found"))); + let consumer_group_id = + find_consumer_group_id(&topic.consumer_groups, &command.group_id); + topic.consumer_groups.remove(&consumer_group_id); + } + CREATE_USER_CODE => { + let command = CreateUser::from_bytes(entry.payload)?; + current_user_id += 1; + let user = UserState { + id: current_user_id, + username: command.username, + password_hash: command.password, // This is already hashed + status: command.status, + permissions: command.permissions, + personal_access_tokens: HashMap::new(), + }; + users.insert(user.id, user); + } + UPDATE_USER_CODE => { + let command = UpdateUser::from_bytes(entry.payload)?; + let user_id = find_user_id(&users, &command.user_id); + let user = users + .get_mut(&user_id) + .unwrap_or_else(|| panic!("{}", format!("User: {user_id} not found"))); + if let Some(username) = &command.username { + user.username.clone_from(username); + } + if let Some(status) = &command.status { + user.status = *status; + } + } + DELETE_USER_CODE => { + let command = DeleteUser::from_bytes(entry.payload)?; + let user_id = find_user_id(&users, &command.user_id); + users.remove(&user_id); + } + CHANGE_PASSWORD_CODE => { + let command = ChangePassword::from_bytes(entry.payload)?; + let user_id = find_user_id(&users, &command.user_id); + let user = users + .get_mut(&user_id) + .unwrap_or_else(|| panic!("{}", format!("User: {user_id} not found"))); + user.password_hash = command.new_password // This is already hashed + } + UPDATE_PERMISSIONS_CODE => { + let command = UpdatePermissions::from_bytes(entry.payload)?; + let user_id = find_user_id(&users, &command.user_id); + let user = users + .get_mut(&user_id) + .unwrap_or_else(|| panic!("{}", format!("User: {user_id} not found"))); + user.permissions = command.permissions; + } + CREATE_PERSONAL_ACCESS_TOKEN_CODE => { + let command = CreatePersonalAccessTokenWithHash::from_bytes(entry.payload)?; + let token_hash = command.hash; + let user_id = find_user_id(&users, &entry.user_id.try_into()?); + let user = users + .get_mut(&user_id) + .unwrap_or_else(|| panic!("{}", format!("User: {user_id} not found"))); + let expiry_at = PersonalAccessToken::calculate_expiry_at( + entry.timestamp, + command.command.expiry, + ); + if let Some(expiry_at) = expiry_at { + if expiry_at.to_micros() <= IggyTimestamp::now().to_micros() { + debug!("Personal access token: {token_hash} has already expired."); + continue; + } + } + + user.personal_access_tokens.insert( + command.command.name.clone(), + PersonalAccessTokenState { + name: command.command.name, + token_hash, + expiry_at, + }, + ); + } + DELETE_PERSONAL_ACCESS_TOKEN_CODE => { + let command = DeletePersonalAccessToken::from_bytes(entry.payload)?; + let user_id = find_user_id(&users, &entry.user_id.try_into()?); + let user = users + .get_mut(&user_id) + .unwrap_or_else(|| panic!("{}", format!("User: {user_id} not found"))); + user.personal_access_tokens.remove(&command.name); + } + code => { + error!("Unsupported state entry code: {code}"); + } + } + } + + let state = SystemState { streams, users }; + debug!("+++ State +++"); + debug!("{state}"); + debug!("+++ State +++"); + Ok(state) + } +} + +fn find_stream_id(streams: &HashMap, stream_id: &Identifier) -> u32 { + match stream_id.kind { + IdKind::Numeric => stream_id + .get_u32_value() + .unwrap_or_else(|_| panic!("{}", format!("Invalid stream ID: {stream_id}"))), + IdKind::String => { + let name = stream_id + .get_cow_str_value() + .unwrap_or_else(|_| panic!("{}", format!("Invalid stream name: {stream_id}"))); + let stream = streams + .values() + .find(|s| s.name == name) + .unwrap_or_else(|| panic!("{}", format!("Stream: {name} not found"))); + stream.id + } + } +} + +fn find_topic_id(topics: &HashMap, topic_id: &Identifier) -> u32 { + match topic_id.kind { + IdKind::Numeric => topic_id + .get_u32_value() + .unwrap_or_else(|_| panic!("{}", format!("Invalid topic ID: {topic_id}"))), + IdKind::String => { + let name = topic_id + .get_cow_str_value() + .unwrap_or_else(|_| panic!("{}", format!("Invalid topic name: {topic_id}"))); + let topic = topics + .values() + .find(|s| s.name == name) + .unwrap_or_else(|| panic!("{}", format!("Topic: {name} not found"))); + topic.id + } + } +} + +fn find_consumer_group_id(groups: &HashMap, group_id: &Identifier) -> u32 { + match group_id.kind { + IdKind::Numeric => group_id + .get_u32_value() + .unwrap_or_else(|_| panic!("{}", format!("Invalid group ID: {group_id}"))), + IdKind::String => { + let name = group_id + .get_cow_str_value() + .unwrap_or_else(|_| panic!("{}", format!("Invalid group name: {group_id}"))); + let group = groups + .values() + .find(|s| s.name == name) + .unwrap_or_else(|| panic!("{}", format!("Consumer group: {name} not found"))); + group.id + } + } +} + +fn find_user_id(users: &HashMap, user_id: &Identifier) -> u32 { + match user_id.kind { + IdKind::Numeric => user_id + .get_u32_value() + .unwrap_or_else(|_| panic!("{}", format!("Invalid user ID: {user_id}"))), + IdKind::String => { + let username = user_id + .get_cow_str_value() + .unwrap_or_else(|_| panic!("{}", format!("Invalid username: {user_id}"))); + let user = users + .values() + .find(|s| s.username == username) + .unwrap_or_else(|| panic!("{}", format!("User: {username} not found"))); + user.id + } + } +} + +impl Display for SystemState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Streams:")?; + for stream in self.streams.iter() { + write!(f, "\n================\n")?; + write!(f, "{}", stream.1)?; + } + write!(f, "Users:")?; + for user in self.users.iter() { + write!(f, "\n================\n")?; + write!(f, "{}", user.1)?; + } + Ok(()) + } +} + +impl Display for ConsumerGroupState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "ConsumerGroup -> ID: {}, Name: {}", self.id, self.name) + } +} + +impl Display for UserState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let permissions = if let Some(permissions) = &self.permissions { + permissions.to_string() + } else { + "no_permissions".to_string() + }; + write!( + f, + "User -> ID: {}, Username: {}, Status: {}, Permissions: {}", + self.id, self.username, self.status, permissions + ) + } +} + +impl Display for StreamState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Stream -> ID: {}, Name: {}", self.id, self.name,)?; + for topic in self.topics.iter() { + write!(f, "\n {}", topic.1)?; + } + Ok(()) + } +} + +impl Display for TopicState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Topic -> ID: {}, Name: {}", self.id, self.name,)?; + for partition in self.partitions.iter() { + write!(f, "\n {}", partition.1)?; + } + write!(f, "\nConsumer Groups:")?; + for consumer_group in self.consumer_groups.iter() { + write!(f, "\n {}", consumer_group.1)?; + } + Ok(()) + } +} + +impl Display for PartitionState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Partition -> ID: {}, Created At: {}", + self.id, self.created_at + ) + } +} diff --git a/server/src/streaming/batching/message_batch.rs b/server/src/streaming/batching/message_batch.rs index 8985f138a..8cb2f8754 100644 --- a/server/src/streaming/batching/message_batch.rs +++ b/server/src/streaming/batching/message_batch.rs @@ -9,13 +9,14 @@ use iggy::error::IggyError::{ MissingLengthRetainedMessageBatch, MissingMaxTimestampRetainedMessageBatch, MissingPayloadRetainedMessageBatch, }; +use iggy::utils::timestamp::IggyTimestamp; use crate::streaming::sizeable::Sizeable; #[derive(Debug, Clone)] pub struct RetainedMessageBatch { pub base_offset: u64, pub last_offset_delta: u32, - pub max_timestamp: u64, + pub max_timestamp: IggyTimestamp, pub length: u32, pub bytes: Bytes, } @@ -24,7 +25,7 @@ impl RetainedMessageBatch { pub fn new( base_offset: u64, last_offset_delta: u32, - max_timestamp: u64, + max_timestamp: IggyTimestamp, length: u32, bytes: Bytes, ) -> Self { @@ -59,7 +60,7 @@ impl RetainedMessageBatch { bytes.put_u64_le(self.base_offset); bytes.put_u32_le(self.length); bytes.put_u32_le(self.last_offset_delta); - bytes.put_u64_le(self.max_timestamp); + bytes.put_u64_le(self.max_timestamp.into()); bytes.put_slice(&self.bytes); } } @@ -104,7 +105,7 @@ where pub struct RetainedMessageBatchBuilder { base_offset: Option, last_offset_delta: Option, - max_timestamp: Option, + max_timestamp: Option, length: Option, payload: Option, } @@ -130,7 +131,7 @@ impl RetainedMessageBatchBuilder { self } - pub fn max_timestamp(mut self, max_timestamp: u64) -> Self { + pub fn max_timestamp(mut self, max_timestamp: IggyTimestamp) -> Self { self.max_timestamp = Some(max_timestamp); self } diff --git a/server/src/streaming/models/messages.rs b/server/src/streaming/models/messages.rs index ef8b8525c..7a274d16b 100644 --- a/server/src/streaming/models/messages.rs +++ b/server/src/streaming/models/messages.rs @@ -4,6 +4,7 @@ use iggy::bytes_serializable::BytesSerializable; use iggy::error::IggyError; use iggy::models::messages::PolledMessage; use iggy::utils::checksum; +use iggy::utils::timestamp::IggyTimestamp; use iggy::{messages::send_messages::Message, models::messages::MessageState}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; @@ -21,7 +22,7 @@ pub struct PolledMessages { pub struct RetainedMessage { pub id: u128, pub offset: u64, - pub timestamp: u64, + pub timestamp: IggyTimestamp, pub checksum: u32, pub message_state: MessageState, pub headers: Option, @@ -47,7 +48,7 @@ impl TryFrom for PolledMessage { } impl RetainedMessage { - pub fn new(offset: u64, timestamp: u64, message: Message) -> Self { + pub fn new(offset: u64, timestamp: IggyTimestamp, message: Message) -> Self { RetainedMessage { offset, timestamp, @@ -72,7 +73,7 @@ impl RetainedMessage { bytes.put_u32_le(length); bytes.put_u64_le(offset); bytes.put_u8(message_state.as_code()); - bytes.put_u64_le(timestamp); + bytes.put_u64_le(timestamp.into()); bytes.put_u128_le(id); bytes.put_u32_le(checksum); if let Some(headers) = headers { @@ -89,6 +90,7 @@ impl RetainedMessage { let offset = u64::from_le_bytes(bytes[..8].try_into()?); let message_state = MessageState::from_code(bytes[8])?; let timestamp = u64::from_le_bytes(bytes[9..17].try_into()?); + let timestamp = timestamp.into(); let id = u128::from_le_bytes(bytes[17..33].try_into()?); let checksum = u32::from_le_bytes(bytes[33..37].try_into()?); let headers_length = u32::from_le_bytes(bytes[37..41].try_into()?); diff --git a/server/src/streaming/partitions/consumer_offsets.rs b/server/src/streaming/partitions/consumer_offsets.rs index f5a5b2369..3a70c635b 100644 --- a/server/src/streaming/partitions/consumer_offsets.rs +++ b/server/src/streaming/partitions/consumer_offsets.rs @@ -69,13 +69,8 @@ impl Partition { offset: u64, ) -> Result<(), IggyError> { let consumer_offsets = self.get_consumer_offsets(kind); - let consumer_offset = consumer_offsets - .get_mut(&consumer_id) - .map(|mut consumer_offset| { - consumer_offset.offset = offset; - consumer_offset.clone() - }); - if let Some(consumer_offset) = consumer_offset { + if let Some(mut consumer_offset) = consumer_offsets.get_mut(&consumer_id) { + consumer_offset.offset = offset; self.storage .partition .save_consumer_offset(&consumer_offset) @@ -83,14 +78,11 @@ impl Partition { return Ok(()); } - let consumer_offset = ConsumerOffset::new( - kind, - consumer_id, - offset, - self.stream_id, - self.topic_id, - self.partition_id, - ); + let path = match kind { + ConsumerKind::Consumer => &self.consumer_offsets_path, + ConsumerKind::ConsumerGroup => &self.consumer_group_offsets_path, + }; + let consumer_offset = ConsumerOffset::new(kind, consumer_id, offset, path); self.storage .partition .save_consumer_offset(&consumer_offset) @@ -116,10 +108,14 @@ impl Partition { &self, kind: ConsumerKind, ) -> Result<(), IggyError> { + let path = match kind { + ConsumerKind::Consumer => &self.consumer_offsets_path, + ConsumerKind::ConsumerGroup => &self.consumer_group_offsets_path, + }; let loaded_consumer_offsets = self .storage .partition - .load_consumer_offsets(kind, self.stream_id, self.topic_id, self.partition_id) + .load_consumer_offsets(kind, path) .await?; let consumer_offsets = self.get_consumer_offsets(kind); for consumer_offset in loaded_consumer_offsets { diff --git a/server/src/streaming/partitions/messages.rs b/server/src/streaming/partitions/messages.rs index 3638d7658..3dba2fe41 100644 --- a/server/src/streaming/partitions/messages.rs +++ b/server/src/streaming/partitions/messages.rs @@ -24,7 +24,7 @@ impl Partition { pub async fn get_messages_by_timestamp( &self, - timestamp: u64, + timestamp: IggyTimestamp, count: u32, ) -> Result, IggyError> { trace!( @@ -63,7 +63,9 @@ impl Partition { let time_indexes = segment.time_indexes.as_ref().unwrap(); let time_index = time_indexes .iter() - .rposition(|time_index| time_index.timestamp <= timestamp) + .rposition(|time_index| { + time_index.timestamp.to_micros() <= timestamp.to_micros() + }) .map(|idx| time_indexes[idx]); if time_index.is_none() { continue; @@ -87,7 +89,7 @@ impl Partition { .get_messages_by_offset(start_offset, count) .await? .into_iter() - .filter(|msg| msg.timestamp >= timestamp) + .filter(|msg| msg.timestamp.to_micros() >= timestamp.to_micros()) .take(count as usize) .collect()); } @@ -101,20 +103,20 @@ impl Partition { .get_messages_by_offset(start_offset, adjusted_count) .await? .into_iter() - .filter(|msg| msg.timestamp >= timestamp) + .filter(|msg| msg.timestamp.to_micros() >= timestamp.to_micros()) .take(count as usize) .collect()) } fn calculate_adjusted_timestamp_message_count( &self, count: u32, - timestamp: u64, - timestamp_from_index: u64, + timestamp: IggyTimestamp, + timestamp_from_index: IggyTimestamp, ) -> u32 { if self.avg_timestamp_delta.as_micros() == 0 { return count; } - let timestamp_diff = timestamp - timestamp_from_index; + let timestamp_diff = timestamp.to_micros() - timestamp_from_index.to_micros(); // This approximation is not exact, but it's good enough for the usage of this function let overfetch_value = ((timestamp_diff as f64 / self.avg_timestamp_delta.as_micros() as f64) * 1.35).ceil() @@ -399,8 +401,8 @@ impl Partition { let mut messages_count = 0u32; // assume that messages have monotonic timestamps - let mut max_timestamp = 0; - let mut min_timestamp = 0; + let mut max_timestamp = IggyTimestamp::zero(); + let mut min_timestamp = IggyTimestamp::zero(); let mut buffer = BytesMut::with_capacity(batch_size as usize); let mut batch_builder = RetainedMessageBatch::builder(); @@ -414,7 +416,7 @@ impl Partition { ); continue; } - max_timestamp = IggyTimestamp::now().to_micros(); + max_timestamp = IggyTimestamp::now(); if messages_count == 0 { min_timestamp = max_timestamp; @@ -426,7 +428,7 @@ impl Partition { } } else { for message in messages { - max_timestamp = IggyTimestamp::now().to_micros(); + max_timestamp = IggyTimestamp::now(); if messages_count == 0 { min_timestamp = max_timestamp; @@ -441,8 +443,10 @@ impl Partition { return Ok(()); } - let avg_timestamp_delta = - Duration::from_micros((max_timestamp - min_timestamp) / messages_count as u64).into(); + let avg_timestamp_delta = Duration::from_micros( + (max_timestamp.to_micros() - min_timestamp.to_micros()) / messages_count as u64, + ) + .into(); let min_alpha: f64 = 0.3; let max_alpha: f64 = 0.7; @@ -517,6 +521,7 @@ impl Partition { #[cfg(test)] mod tests { + use iggy::utils::expiry::IggyExpiry; use std::sync::atomic::{AtomicU32, AtomicU64}; use super::*; @@ -587,12 +592,13 @@ mod tests { with_segment, config, storage, - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), + IggyTimestamp::now(), ) } } diff --git a/server/src/streaming/partitions/partition.rs b/server/src/streaming/partitions/partition.rs index 66e862a99..b6752b338 100644 --- a/server/src/streaming/partitions/partition.rs +++ b/server/src/streaming/partitions/partition.rs @@ -8,6 +8,7 @@ use crate::streaming::storage::SystemStorage; use dashmap::DashMap; use iggy::consumer::ConsumerKind; use iggy::utils::duration::IggyDuration; +use iggy::utils::expiry::IggyExpiry; use iggy::utils::timestamp::IggyTimestamp; use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; use std::sync::Arc; @@ -17,14 +18,17 @@ pub struct Partition { pub stream_id: u32, pub topic_id: u32, pub partition_id: u32, - pub path: String, + pub partition_path: String, + pub offsets_path: String, + pub consumer_offsets_path: String, + pub consumer_group_offsets_path: String, pub current_offset: u64, pub cache: Option>>, pub cached_memory_tracker: Option>, pub message_deduplicator: Option, pub unsaved_messages_count: u32, pub should_increment_offset: bool, - pub created_at: u64, + pub created_at: IggyTimestamp, pub avg_timestamp_delta: IggyDuration, pub messages_count_of_parent_stream: Arc, pub messages_count_of_parent_topic: Arc, @@ -33,7 +37,7 @@ pub struct Partition { pub size_of_parent_topic: Arc, pub size_bytes: Arc, pub segments_count_of_parent_stream: Arc, - pub(crate) message_expiry: Option, + pub(crate) message_expiry: IggyExpiry, pub(crate) consumer_offsets: DashMap, pub(crate) consumer_group_offsets: DashMap, pub(crate) segments: Vec, @@ -46,37 +50,18 @@ pub struct ConsumerOffset { pub kind: ConsumerKind, pub consumer_id: u32, pub offset: u64, - pub key: String, + pub path: String, } impl ConsumerOffset { - pub fn new( - kind: ConsumerKind, - consumer_id: u32, - offset: u64, - stream_id: u32, - topic_id: u32, - partition_id: u32, - ) -> ConsumerOffset { + pub fn new(kind: ConsumerKind, consumer_id: u32, offset: u64, path: &str) -> ConsumerOffset { ConsumerOffset { - key: format!( - "{}:{consumer_id}", - Self::get_key_prefix(kind, stream_id, topic_id, partition_id) - ), kind, consumer_id, offset, + path: format!("{path}/{consumer_id}"), } } - - pub fn get_key_prefix( - kind: ConsumerKind, - stream_id: u32, - topic_id: u32, - partition_id: u32, - ) -> String { - format!("{kind}_offsets:{stream_id}:{topic_id}:{partition_id}") - } } impl Partition { @@ -88,14 +73,20 @@ impl Partition { with_segment: bool, config: Arc, storage: Arc, - message_expiry: Option, + message_expiry: IggyExpiry, messages_count_of_parent_stream: Arc, messages_count_of_parent_topic: Arc, size_of_parent_stream: Arc, size_of_parent_topic: Arc, segments_count_of_parent_stream: Arc, + created_at: IggyTimestamp, ) -> Partition { - let path = config.get_partition_path(stream_id, topic_id, partition_id); + let partition_path = config.get_partition_path(stream_id, topic_id, partition_id); + let offsets_path = config.get_offsets_path(stream_id, topic_id, partition_id); + let consumer_offsets_path = + config.get_consumer_offsets_path(stream_id, topic_id, partition_id); + let consumer_group_offsets_path = + config.get_consumer_group_offsets_path(stream_id, topic_id, partition_id); let (cached_memory_tracker, messages) = match config.cache.enabled { false => (None, None), true => ( @@ -108,7 +99,10 @@ impl Partition { stream_id, topic_id, partition_id, - path, + partition_path, + offsets_path, + consumer_offsets_path, + consumer_group_offsets_path, message_expiry, cache: messages, cached_memory_tracker, @@ -137,7 +131,7 @@ impl Partition { consumer_group_offsets: DashMap::new(), config, storage, - created_at: IggyTimestamp::now().to_micros(), + created_at, avg_timestamp_delta: IggyDuration::default(), size_of_parent_stream, size_of_parent_topic, @@ -183,6 +177,9 @@ mod tests { use crate::configs::system::{CacheConfig, SystemConfig}; use crate::streaming::partitions::partition::Partition; use crate::streaming::storage::tests::get_test_system_storage; + use iggy::utils::duration::IggyDuration; + use iggy::utils::expiry::IggyExpiry; + use iggy::utils::timestamp::IggyTimestamp; use std::sync::atomic::{AtomicU32, AtomicU64}; use std::sync::Arc; @@ -195,7 +192,7 @@ mod tests { let with_segment = true; let config = Arc::new(SystemConfig::default()); let path = config.get_partition_path(stream_id, topic_id, partition_id); - let message_expiry = Some(10); + let message_expiry = IggyExpiry::ExpireDuration(IggyDuration::from(10)); let partition = Partition::create( stream_id, topic_id, @@ -209,12 +206,13 @@ mod tests { Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), + IggyTimestamp::now(), ); assert_eq!(partition.stream_id, stream_id); assert_eq!(partition.topic_id, topic_id); assert_eq!(partition.partition_id, partition_id); - assert_eq!(partition.path, path); + assert_eq!(partition.partition_path, path); assert_eq!(partition.current_offset, 0); assert_eq!(partition.unsaved_messages_count, 0); assert_eq!(partition.segments.len(), 1); @@ -242,12 +240,13 @@ mod tests { ..Default::default() }), storage, - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), + IggyTimestamp::now(), ); assert!(partition.cache.is_none()); } @@ -263,12 +262,13 @@ mod tests { false, Arc::new(SystemConfig::default()), storage, - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU32::new(0)), + IggyTimestamp::now(), ); assert!(partition.segments.is_empty()); } diff --git a/server/src/streaming/partitions/persistence.rs b/server/src/streaming/partitions/persistence.rs index 41692f363..75764e80b 100644 --- a/server/src/streaming/partitions/persistence.rs +++ b/server/src/streaming/partitions/persistence.rs @@ -1,13 +1,13 @@ use std::sync::atomic::Ordering; +use crate::state::system::PartitionState; use crate::streaming::partitions::partition::Partition; -use iggy::consumer::ConsumerKind; use iggy::error::IggyError; impl Partition { - pub async fn load(&mut self) -> Result<(), IggyError> { + pub async fn load(&mut self, state: PartitionState) -> Result<(), IggyError> { let storage = self.storage.clone(); - storage.partition.load(self).await + storage.partition.load(self, state).await } pub async fn persist(&self) -> Result<(), IggyError> { @@ -27,6 +27,8 @@ impl Partition { self.current_offset = 0; self.unsaved_messages_count = 0; self.should_increment_offset = false; + self.consumer_offsets.clear(); + self.consumer_group_offsets.clear(); if let Some(cache) = self.cache.as_mut() { cache.purge(); } @@ -38,21 +40,11 @@ impl Partition { self.segments.clear(); self.storage .partition - .delete_consumer_offsets( - ConsumerKind::Consumer, - self.stream_id, - self.topic_id, - self.partition_id, - ) + .delete_consumer_offsets(&self.consumer_offsets_path) .await?; self.storage .partition - .delete_consumer_offsets( - ConsumerKind::ConsumerGroup, - self.stream_id, - self.topic_id, - self.partition_id, - ) + .delete_consumer_offsets(&self.consumer_group_offsets_path) .await?; self.add_persisted_segment(0).await?; diff --git a/server/src/streaming/partitions/segments.rs b/server/src/streaming/partitions/segments.rs index e9bc3886a..c536243b8 100644 --- a/server/src/streaming/partitions/segments.rs +++ b/server/src/streaming/partitions/segments.rs @@ -3,6 +3,7 @@ use std::sync::atomic::Ordering; use crate::streaming::partitions::partition::Partition; use crate::streaming::segments::segment::Segment; use iggy::error::IggyError; +use iggy::utils::timestamp::IggyTimestamp; use tracing::info; pub struct DeletedSegment { @@ -23,7 +24,7 @@ impl Partition { &mut self.segments } - pub async fn get_expired_segments_start_offsets(&self, now: u64) -> Vec { + pub async fn get_expired_segments_start_offsets(&self, now: IggyTimestamp) -> Vec { let mut expired_segments = Vec::new(); for segment in &self.segments { if segment.is_closed && segment.is_expired(now).await { diff --git a/server/src/streaming/partitions/storage.rs b/server/src/streaming/partitions/storage.rs index 30055cee5..400e8e367 100644 --- a/server/src/streaming/partitions/storage.rs +++ b/server/src/streaming/partitions/storage.rs @@ -1,190 +1,54 @@ -use crate::compat::message_converter::MessageFormatConverter; +use crate::compat::message_conversion::message_converter::MessageFormatConverter; +use crate::state::system::PartitionState; use crate::streaming::partitions::partition::{ConsumerOffset, Partition}; +use crate::streaming::persistence::persister::Persister; use crate::streaming::segments::segment::{Segment, LOG_EXTENSION}; -use crate::streaming::storage::{PartitionStorage, Storage}; +use crate::streaming::storage::PartitionStorage; +use crate::streaming::utils::file; use anyhow::Context; use async_trait::async_trait; use iggy::consumer::ConsumerKind; use iggy::error::IggyError; -use serde::{Deserialize, Serialize}; -use sled::Db; use std::path::Path; use std::sync::atomic::Ordering; use std::sync::Arc; use tokio::fs; use tokio::fs::create_dir; +use tokio::io::AsyncReadExt; use tracing::{error, info, trace, warn}; #[derive(Debug)] pub struct FilePartitionStorage { - db: Arc, + persister: Arc, } impl FilePartitionStorage { - pub fn new(db: Arc) -> Self { - Self { db } + pub fn new(persister: Arc) -> Self { + Self { persister } } } - unsafe impl Send for FilePartitionStorage {} unsafe impl Sync for FilePartitionStorage {} #[async_trait] impl PartitionStorage for FilePartitionStorage { - async fn save_consumer_offset(&self, offset: &ConsumerOffset) -> Result<(), IggyError> { - // The stored value is just the offset, so we don't need to serialize the whole struct. - // It should be as fast and lightweight as possible. - // As described in the docs, sled works better with big-endian byte order. - if let Err(err) = self - .db - .insert(&offset.key, &offset.offset.to_be_bytes()) - .with_context(|| { - format!( - "Failed to save consumer offset: {}, key: {}", - offset.offset, offset.key - ) - }) - { - return Err(IggyError::CannotSaveResource(err)); - } - - trace!( - "Stored consumer offset value: {} for {} with ID: {}", - offset.offset, - offset.kind, - offset.consumer_id - ); - Ok(()) - } - - async fn load_consumer_offsets( - &self, - kind: ConsumerKind, - stream_id: u32, - topic_id: u32, - partition_id: u32, - ) -> Result, IggyError> { - let mut consumer_offsets = Vec::new(); - let key_prefix = format!( - "{}:", - ConsumerOffset::get_key_prefix(kind, stream_id, topic_id, partition_id) - ); - for data in self.db.scan_prefix(&key_prefix) { - let consumer_offset = match data.with_context(|| { - format!( - "Failed to load consumer offset, when searching by key: {}", - key_prefix - ) - }) { - Ok((key, value)) => { - let key = String::from_utf8(key.to_vec()).unwrap(); - let offset = u64::from_be_bytes(value.as_ref().try_into().unwrap()); - let consumer_id = key.split(':').last().unwrap().parse::().unwrap(); - ConsumerOffset { - key, - kind, - consumer_id, - offset, - } - } - Err(err) => { - return Err(IggyError::CannotLoadResource(err)); - } - }; - consumer_offsets.push(consumer_offset); - } - - consumer_offsets.sort_by(|a, b| a.consumer_id.cmp(&b.consumer_id)); - Ok(consumer_offsets) - } - - async fn delete_consumer_offsets( + async fn load( &self, - kind: ConsumerKind, - stream_id: u32, - topic_id: u32, - partition_id: u32, + partition: &mut Partition, + state: PartitionState, ) -> Result<(), IggyError> { - let consumer_offset_key_prefix = format!( - "{}:", - ConsumerOffset::get_key_prefix(kind, stream_id, topic_id, partition_id) - ); - - for data in self.db.scan_prefix(&consumer_offset_key_prefix) { - match data.with_context(|| { - format!( - "Failed to delete consumer offset, when searching by key: {}", - consumer_offset_key_prefix - ) - }) { - Ok((key, _)) => { - if let Err(err) = self.db.remove(&key).with_context(|| { - format!("Failed to delete consumer offset, key: {:?}", key) - }) { - return Err(IggyError::CannotLoadResource(err)); - } - } - Err(err) => { - return Err(IggyError::CannotLoadResource(err)); - } - } - } - - Ok(()) - } -} - -#[derive(Debug, Serialize, Deserialize)] -struct PartitionData { - created_at: u64, -} - -#[async_trait] -impl Storage for FilePartitionStorage { - async fn load(&self, partition: &mut Partition) -> Result<(), IggyError> { info!( "Loading partition with ID: {} for stream with ID: {} and topic with ID: {}, for path: {} from disk...", - partition.partition_id, partition.stream_id, partition.topic_id, partition.path - ); - let dir_entries = fs::read_dir(&partition.path).await; - if let Err(err) = fs::read_dir(&partition.path) - .await - .with_context(|| format!("Failed to read partition with ID: {} for stream with ID: {} and topic with ID: {} and path: {}", partition.partition_id, partition.stream_id, partition.topic_id, partition.path)) - { - return Err(IggyError::CannotReadPartitions(err)); - } - - let key = get_partition_key( - partition.stream_id, - partition.topic_id, - partition.partition_id, + partition.partition_id, partition.stream_id, partition.topic_id, partition.partition_path ); - let partition_data = match self - .db - .get(&key) - .with_context(|| format!("Failed to load partition with key: {}", key)) - { - Ok(partition_data) => { - if let Some(partition_data) = partition_data { - let partition_data = rmp_serde::from_slice::(&partition_data) - .with_context(|| { - format!("Failed to deserialize partition with key: {}", key) - }); - if let Err(err) = partition_data { - return Err(IggyError::CannotDeserializeResource(err)); - } else { - partition_data.unwrap() - } - } else { - return Err(IggyError::ResourceNotFound(key)); - } - } - Err(err) => { - return Err(IggyError::CannotLoadResource(err)); + partition.created_at = state.created_at; + let dir_entries = fs::read_dir(&partition.partition_path).await; + if let Err(err) = fs::read_dir(&partition.partition_path) + .await + .with_context(|| format!("Failed to read partition with ID: {} for stream with ID: {} and topic with ID: {} and path: {}", partition.partition_id, partition.stream_id, partition.topic_id, partition.partition_path)) + { + return Err(IggyError::CannotReadPartitions(err)); } - }; - - partition.created_at = partition_data.created_at; let mut dir_entries = dir_entries.unwrap(); while let Some(dir_entry) = dir_entries.next_entry().await.unwrap_or(None) { @@ -246,11 +110,11 @@ impl Storage for FilePartitionStorage { Err(err) if idx + 1 == samplers_count => { // Didn't find any message format, return an error return Err(IggyError::CannotLoadResource(anyhow::anyhow!(err) - .context(format!( - "Failed to find a valid message format, when trying to perform a conversion for partition with ID: {} and segment with start offset: {}.", - partition.partition_id, - start_offset - )))); + .context(format!( + "Failed to find a valid message format, when trying to perform a conversion for partition with ID: {} and segment with start offset: {}.", + partition.partition_id, + start_offset + )))); } _ => {} } @@ -336,7 +200,9 @@ impl Storage for FilePartitionStorage { "Saving partition with start ID: {} for stream with ID: {} and topic with ID: {}...", partition.partition_id, partition.stream_id, partition.topic_id ); - if !Path::new(&partition.path).exists() && create_dir(&partition.path).await.is_err() { + if !Path::new(&partition.partition_path).exists() + && create_dir(&partition.partition_path).await.is_err() + { return Err(IggyError::CannotCreatePartitionDirectory( partition.partition_id, partition.stream_id, @@ -344,49 +210,55 @@ impl Storage for FilePartitionStorage { )); } - let key = get_partition_key( - partition.stream_id, - partition.topic_id, - partition.partition_id, - ); - match rmp_serde::to_vec(&PartitionData { - created_at: partition.created_at, - }) - .with_context(|| format!("Failed to serialize partition with key: {}", key)) + if !Path::new(&partition.offsets_path).exists() + && create_dir(&partition.offsets_path).await.is_err() { - Ok(data) => { - if let Err(err) = self - .db - .insert(&key, data) - .with_context(|| format!("Failed to insert partition with key: {}", key)) - { - return Err(IggyError::CannotSaveResource(err)); - } - } - Err(err) => { - return Err(IggyError::CannotSerializeResource(err)); - } + error!( + "Failed to create offsets directory for partition with ID: {} for stream with ID: {} and topic with ID: {}.", + partition.partition_id, partition.stream_id, partition.topic_id + ); + return Err(IggyError::CannotCreatePartition( + partition.partition_id, + partition.stream_id, + partition.topic_id, + )); } - if let Err(err) = self - .db - .insert( - &key, - rmp_serde::to_vec(&PartitionData { - created_at: partition.created_at, - }) - .unwrap(), - ) - .with_context(|| format!("Failed to insert partition with key: {}", key)) + if !Path::new(&partition.consumer_offsets_path).exists() + && create_dir(&partition.consumer_offsets_path).await.is_err() + { + error!( + "Failed to create consumer offsets directory for partition with ID: {} for stream with ID: {} and topic with ID: {}.", + partition.partition_id, partition.stream_id, partition.topic_id + ); + return Err(IggyError::CannotCreatePartition( + partition.partition_id, + partition.stream_id, + partition.topic_id, + )); + } + + if !Path::new(&partition.consumer_group_offsets_path).exists() + && create_dir(&partition.consumer_group_offsets_path) + .await + .is_err() { - return Err(IggyError::CannotSaveResource(err)); + error!( + "Failed to create consumer group offsets directory for partition with ID: {} for stream with ID: {} and topic with ID: {}.", + partition.partition_id, partition.stream_id, partition.topic_id + ); + return Err(IggyError::CannotCreatePartition( + partition.partition_id, + partition.stream_id, + partition.topic_id, + )); } for segment in partition.get_segments() { segment.persist().await?; } - info!("Saved partition with start ID: {} for stream with ID: {} and topic with ID: {}, path: {}.", partition.partition_id, partition.stream_id, partition.topic_id, partition.path); + info!("Saved partition with start ID: {} for stream with ID: {} and topic with ID: {}, path: {}.", partition.partition_id, partition.stream_id, partition.topic_id, partition.partition_path); Ok(()) } @@ -396,29 +268,9 @@ impl Storage for FilePartitionStorage { "Deleting partition with ID: {} for stream with ID: {} and topic with ID: {}...", partition.partition_id, partition.stream_id, partition.topic_id, ); - if self - .db - .remove(get_partition_key( - partition.stream_id, - partition.topic_id, - partition.partition_id, - )) - .is_err() - { - return Err(IggyError::CannotDeletePartition( - partition.partition_id, - partition.topic_id, - partition.stream_id, - )); - } if let Err(err) = self - .delete_consumer_offsets( - ConsumerKind::Consumer, - partition.stream_id, - partition.topic_id, - partition.partition_id, - ) + .delete_consumer_offsets(&partition.consumer_offsets_path) .await { error!("Cannot delete consumer offsets for partition with ID: {} for topic with ID: {} for stream with ID: {}. Error: {}", partition.partition_id, partition.topic_id, partition.stream_id, err); @@ -430,12 +282,7 @@ impl Storage for FilePartitionStorage { } if let Err(err) = self - .delete_consumer_offsets( - ConsumerKind::ConsumerGroup, - partition.stream_id, - partition.topic_id, - partition.partition_id, - ) + .delete_consumer_offsets(&partition.consumer_group_offsets_path) .await { error!("Cannot delete consumer group offsets for partition with ID: {} for topic with ID: {} for stream with ID: {}. Error: {}", partition.partition_id, partition.topic_id, partition.stream_id, err); @@ -446,8 +293,8 @@ impl Storage for FilePartitionStorage { )); } - if fs::remove_dir_all(&partition.path).await.is_err() { - error!("Cannot delete partition directory: {} for partition with ID: {} for topic with ID: {} for stream with ID: {}.", partition.path, partition.partition_id, partition.topic_id, partition.stream_id); + if fs::remove_dir_all(&partition.partition_path).await.is_err() { + error!("Cannot delete partition directory: {} for partition with ID: {} for topic with ID: {} for stream with ID: {}.", partition.partition_path, partition.partition_id, partition.topic_id, partition.stream_id); return Err(IggyError::CannotDeletePartitionDirectory( partition.partition_id, partition.stream_id, @@ -460,11 +307,100 @@ impl Storage for FilePartitionStorage { ); Ok(()) } -} -fn get_partition_key(stream_id: u32, topic_id: u32, partition_id: u32) -> String { - format!( - "streams:{}:topics:{}:partitions:{}", - stream_id, topic_id, partition_id - ) + async fn save_consumer_offset(&self, offset: &ConsumerOffset) -> Result<(), IggyError> { + self.persister + .overwrite(&offset.path, &offset.offset.to_le_bytes()) + .await?; + trace!( + "Stored consumer offset value: {} for {} with ID: {}, path: {}", + offset.offset, + offset.kind, + offset.consumer_id, + offset.path + ); + Ok(()) + } + + async fn load_consumer_offsets( + &self, + kind: ConsumerKind, + path: &str, + ) -> Result, IggyError> { + trace!("Loading consumer offsets from path: {path}..."); + let dir_entries = fs::read_dir(&path).await; + if dir_entries.is_err() { + return Err(IggyError::CannotReadConsumerOffsets(path.to_owned())); + } + + let mut consumer_offsets = Vec::new(); + let mut dir_entries = dir_entries.unwrap(); + while let Some(dir_entry) = dir_entries.next_entry().await.unwrap_or(None) { + let metadata = dir_entry.metadata().await; + if metadata.is_err() { + break; + } + + if metadata.unwrap().is_dir() { + continue; + } + + let name = dir_entry.file_name().into_string().unwrap(); + let consumer_id = name.parse::(); + if consumer_id.is_err() { + error!("Invalid consumer ID file with name: '{}'.", name); + continue; + } + + let path = dir_entry.path(); + let path = path.to_str(); + if path.is_none() { + error!("Invalid consumer ID path for file with name: '{}'.", name); + continue; + } + + let path = path.unwrap().to_string(); + let consumer_id = consumer_id.unwrap(); + let mut file = file::open(&path).await?; + let offset = file.read_u64_le().await?; + + consumer_offsets.push(ConsumerOffset { + kind, + consumer_id, + offset, + path, + }); + } + + consumer_offsets.sort_by(|a, b| a.consumer_id.cmp(&b.consumer_id)); + Ok(consumer_offsets) + } + + async fn delete_consumer_offsets(&self, path: &str) -> Result<(), IggyError> { + if !Path::new(path).exists() { + trace!("Consumer offsets directory does not exist: {path}."); + return Ok(()); + } + + if fs::remove_dir_all(path).await.is_err() { + error!("Cannot delete consumer offsets directory: {}.", path); + return Err(IggyError::CannotDeleteConsumerOffsetsDirectory( + path.to_owned(), + )); + } + Ok(()) + } + + async fn delete_consumer_offset(&self, path: &str) -> Result<(), IggyError> { + if !Path::new(path).exists() { + trace!("Consumer offset file does not exist: {path}."); + return Ok(()); + } + + if fs::remove_file(path).await.is_err() { + error!("Cannot delete consumer offset file: {path}."); + return Err(IggyError::CannotDeleteConsumerOffsetFile(path.to_owned())); + } + Ok(()) + } } diff --git a/server/src/streaming/personal_access_tokens/mod.rs b/server/src/streaming/personal_access_tokens/mod.rs index 52fe20fba..61feb0317 100644 --- a/server/src/streaming/personal_access_tokens/mod.rs +++ b/server/src/streaming/personal_access_tokens/mod.rs @@ -1,2 +1 @@ pub mod personal_access_token; -pub mod storage; diff --git a/server/src/streaming/personal_access_tokens/personal_access_token.rs b/server/src/streaming/personal_access_tokens/personal_access_token.rs index ee06c3154..e546b1400 100644 --- a/server/src/streaming/personal_access_tokens/personal_access_token.rs +++ b/server/src/streaming/personal_access_tokens/personal_access_token.rs @@ -1,61 +1,92 @@ use crate::streaming::utils::hash; use iggy::models::user_info::UserId; +use iggy::utils::expiry::IggyExpiry; use iggy::utils::text::as_base64; +use iggy::utils::timestamp::IggyTimestamp; use ring::rand::SecureRandom; -use serde::{Deserialize, Serialize}; const SIZE: usize = 50; -#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq)] +#[derive(Debug)] pub struct PersonalAccessToken { pub user_id: UserId, pub name: String, pub token: String, - pub expiry: Option, + pub expiry_at: Option, } impl PersonalAccessToken { // Raw token is generated and returned only once - pub fn new(user_id: UserId, name: &str, now: u64, expiry: Option) -> (Self, String) { + pub fn new( + user_id: UserId, + name: &str, + now: IggyTimestamp, + expiry: IggyExpiry, + ) -> (Self, String) { let mut buffer: [u8; SIZE] = [0; SIZE]; let system_random = ring::rand::SystemRandom::new(); system_random.fill(&mut buffer).unwrap(); let token = as_base64(&buffer); let token_hash = Self::hash_token(&token); - let expiry = expiry.map(|e| now + e as u64 * 1_000_000); ( Self { user_id, name: name.to_string(), token: token_hash, - expiry, + expiry_at: Self::calculate_expiry_at(now, expiry), }, token, ) } - pub fn is_expired(&self, now: u64) -> bool { - match self.expiry { - Some(expiry) => now > expiry, + pub fn raw( + user_id: UserId, + name: &str, + token_hash: &str, + expiry_at: Option, + ) -> Self { + Self { + user_id, + name: name.into(), + token: token_hash.into(), + expiry_at, + } + } + + pub fn is_expired(&self, now: IggyTimestamp) -> bool { + match self.expiry_at { None => false, + Some(expiry_at) => expiry_at.to_micros() <= now.to_micros(), } } pub fn hash_token(token: &str) -> String { hash::calculate_256(token.as_bytes()) } + + pub fn calculate_expiry_at(now: IggyTimestamp, expiry: IggyExpiry) -> Option { + match expiry { + IggyExpiry::ExpireDuration(expiry) => { + Some(IggyTimestamp::from(now.to_micros() + expiry.as_micros())) + } + IggyExpiry::NeverExpire => None, + } + } } #[cfg(test)] mod tests { use super::*; + use iggy::utils::duration::IggyDuration; use iggy::utils::timestamp::IggyTimestamp; + #[test] fn personal_access_token_should_be_created_with_random_secure_value_and_hashed_successfully() { let user_id = 1; - let now = IggyTimestamp::now().to_micros(); + let now = IggyTimestamp::now(); let name = "test_token"; - let (personal_access_token, raw_token) = PersonalAccessToken::new(user_id, name, now, None); + let (personal_access_token, raw_token) = + PersonalAccessToken::new(user_id, name, now, IggyExpiry::NeverExpire); assert_eq!(personal_access_token.name, name); assert!(!personal_access_token.token.is_empty()); assert!(!raw_token.is_empty()); @@ -69,10 +100,12 @@ mod tests { #[test] fn personal_access_token_should_be_expired_given_passed_expiry() { let user_id = 1; - let now = IggyTimestamp::now().to_micros(); - let expiry = 1; + let now = IggyTimestamp::now(); + let expiry_ms = 10; + let expiry = IggyExpiry::ExpireDuration(IggyDuration::from(expiry_ms)); let name = "test_token"; - let (personal_access_token, _) = PersonalAccessToken::new(user_id, name, now, Some(expiry)); - assert!(personal_access_token.is_expired(now + expiry as u64 * 1_000_000 + 1)); + let (personal_access_token, _) = PersonalAccessToken::new(user_id, name, now, expiry); + let later = IggyTimestamp::from(now.to_micros() + expiry_ms + 1); + assert!(personal_access_token.is_expired(later)); } } diff --git a/server/src/streaming/personal_access_tokens/storage.rs b/server/src/streaming/personal_access_tokens/storage.rs deleted file mode 100644 index 74a7f0559..000000000 --- a/server/src/streaming/personal_access_tokens/storage.rs +++ /dev/null @@ -1,210 +0,0 @@ -use crate::streaming::personal_access_tokens::personal_access_token::PersonalAccessToken; -use crate::streaming::storage::{PersonalAccessTokenStorage, Storage}; -use anyhow::Context; -use async_trait::async_trait; -use iggy::error::IggyError; -use iggy::models::user_info::UserId; -use sled::Db; -use std::str::from_utf8; -use std::sync::Arc; -use tracing::info; - -const KEY_PREFIX: &str = "personal_access_token"; - -#[derive(Debug)] -pub struct FilePersonalAccessTokenStorage { - db: Arc, -} - -impl FilePersonalAccessTokenStorage { - pub fn new(db: Arc) -> Self { - Self { db } - } -} - -unsafe impl Send for FilePersonalAccessTokenStorage {} -unsafe impl Sync for FilePersonalAccessTokenStorage {} - -#[async_trait] -impl PersonalAccessTokenStorage for FilePersonalAccessTokenStorage { - async fn load_all(&self) -> Result, IggyError> { - let mut personal_access_tokens = Vec::new(); - for data in self.db.scan_prefix(format!("{}:token:", KEY_PREFIX)) { - let personal_access_token = match data - .with_context(|| format!("Failed to load personal access token, when searching by key: {}", KEY_PREFIX)){ - Ok((_, value)) => match rmp_serde::from_slice::(&value) - .with_context(|| format!("Failed to deserialize personal access token, when searching by key: {}", KEY_PREFIX)){ - Ok(personal_access_token) => personal_access_token, - Err(err) => { - return Err(IggyError::CannotDeserializeResource(err)); - } - }, - Err(err) => { - return Err(IggyError::CannotLoadResource(err)); - } - }; - personal_access_tokens.push(personal_access_token); - } - - Ok(personal_access_tokens) - } - - async fn load_for_user(&self, user_id: UserId) -> Result, IggyError> { - let mut personal_access_tokens = Vec::new(); - let key = format!("{}:user:{}:", KEY_PREFIX, user_id); - for data in self.db.scan_prefix(&key) { - match data.with_context(|| { - format!( - "Failed to load personal access token, for user ID: {}", - user_id - ) - }) { - Ok((_, value)) => { - let token = from_utf8(&value)?; - let personal_access_token = self.load_by_token(token).await?; - personal_access_tokens.push(personal_access_token); - } - Err(err) => { - return Err(IggyError::CannotLoadResource(err)); - } - }; - } - - Ok(personal_access_tokens) - } - - async fn load_by_token(&self, token: &str) -> Result { - let key = get_key(token); - return match self - .db - .get(&key) - .with_context(|| format!("Failed to load personal access token, token: {}", token)) - { - Ok(personal_access_token) => { - if let Some(personal_access_token) = personal_access_token { - let personal_access_token = - rmp_serde::from_slice::(&personal_access_token) - .with_context(|| "Failed to deserialize personal access token"); - if let Err(err) = personal_access_token { - Err(IggyError::CannotDeserializeResource(err)) - } else { - Ok(personal_access_token.unwrap()) - } - } else { - Err(IggyError::ResourceNotFound(key)) - } - } - Err(err) => Err(IggyError::CannotLoadResource(err)), - }; - } - - async fn load_by_name( - &self, - user_id: UserId, - name: &str, - ) -> Result { - let key = get_name_key(user_id, name); - return match self.db.get(&key).with_context(|| { - format!( - "Failed to load personal access token, token_name: {}, user_id: {}", - name, user_id - ) - }) { - Ok(token) => { - if let Some(token) = token { - let token = from_utf8(&token) - .with_context(|| "Failed to deserialize personal access token"); - if let Err(err) = token { - Err(IggyError::CannotDeserializeResource(err)) - } else { - Ok(self.load_by_token(token.unwrap()).await?) - } - } else { - Err(IggyError::ResourceNotFound(key)) - } - } - Err(err) => Err(IggyError::CannotLoadResource(err)), - }; - } - - async fn delete_for_user(&self, user_id: UserId, name: &str) -> Result<(), IggyError> { - let personal_access_token = self.load_by_name(user_id, name).await?; - info!("Deleting personal access token with name: {name} for user with ID: {user_id}..."); - let key = get_name_key(user_id, name); - if let Err(err) = self - .db - .remove(key) - .with_context(|| "Failed to delete personal access token") - { - return Err(IggyError::CannotDeleteResource(err)); - } - let key = get_key(&personal_access_token.token); - if let Err(err) = self - .db - .remove(key) - .with_context(|| "Failed to delete personal access token") - { - return Err(IggyError::CannotDeleteResource(err)); - } - info!("Deleted personal access token with name: {name} for user with ID: {user_id}."); - Ok(()) - } -} - -#[async_trait] -impl Storage for FilePersonalAccessTokenStorage { - async fn load(&self, personal_access_token: &mut PersonalAccessToken) -> Result<(), IggyError> { - self.load_by_name(personal_access_token.user_id, &personal_access_token.name) - .await?; - Ok(()) - } - - async fn save(&self, personal_access_token: &PersonalAccessToken) -> Result<(), IggyError> { - let key = get_key(&personal_access_token.token); - match rmp_serde::to_vec(&personal_access_token) - .with_context(|| "Failed to serialize personal access token") - { - Ok(data) => { - if let Err(err) = self - .db - .insert(key, data) - .with_context(|| "Failed to save personal access token") - { - return Err(IggyError::CannotSaveResource(err)); - } - if let Err(err) = self - .db - .insert( - get_name_key(personal_access_token.user_id, &personal_access_token.name), - personal_access_token.token.as_bytes(), - ) - .with_context(|| "Failed to save personal access token") - { - return Err(IggyError::CannotSaveResource(err)); - } - } - Err(err) => { - return Err(IggyError::CannotSerializeResource(err)); - } - } - - info!( - "Saved personal access token for user with ID: {}.", - personal_access_token.user_id - ); - Ok(()) - } - - async fn delete(&self, personal_access_token: &PersonalAccessToken) -> Result<(), IggyError> { - self.delete_for_user(personal_access_token.user_id, &personal_access_token.name) - .await - } -} - -fn get_key(token_hash: &str) -> String { - format!("{}:token:{}", KEY_PREFIX, token_hash) -} - -fn get_name_key(user_id: UserId, name: &str) -> String { - format!("{}:user:{}:{}", KEY_PREFIX, user_id, name) -} diff --git a/server/src/streaming/segments/index.rs b/server/src/streaming/segments/index.rs index 04cb32511..2c8cd0e20 100644 --- a/server/src/streaming/segments/index.rs +++ b/server/src/streaming/segments/index.rs @@ -74,6 +74,7 @@ mod tests { use super::*; use crate::configs::system::{SegmentConfig, SystemConfig}; use crate::streaming::storage::tests::get_test_system_storage; + use iggy::utils::expiry::IggyExpiry; use std::sync::atomic::AtomicU64; use std::sync::Arc; @@ -98,7 +99,7 @@ mod tests { start_offset, config, storage, - None, + IggyExpiry::NeverExpire, Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), Arc::new(AtomicU64::new(0)), diff --git a/server/src/streaming/segments/messages.rs b/server/src/streaming/segments/messages.rs index ca3347c44..620427aaa 100644 --- a/server/src/streaming/segments/messages.rs +++ b/server/src/streaming/segments/messages.rs @@ -7,6 +7,7 @@ use crate::streaming::segments::time_index::TimeIndex; use crate::streaming::sizeable::Sizeable; use bytes::BufMut; use iggy::error::IggyError; +use iggy::utils::timestamp::IggyTimestamp; use std::sync::atomic::Ordering; use std::sync::Arc; use tracing::{trace, warn}; @@ -248,7 +249,7 @@ impl Segment { fn store_offset_and_timestamp_index_for_batch( &mut self, batch_last_offset: u64, - batch_max_timestamp: u64, + batch_max_timestamp: IggyTimestamp, ) { let relative_offset = (batch_last_offset - self.start_offset) as u32; match (&mut self.indexes, &mut self.time_indexes) { @@ -282,7 +283,8 @@ impl Segment { self.unsaved_indexes.put_u32_le(relative_offset); self.unsaved_indexes.put_u32_le(self.size_bytes); self.unsaved_timestamps.put_u32_le(relative_offset); - self.unsaved_timestamps.put_u64_le(batch_max_timestamp); + self.unsaved_timestamps + .put_u64_le(batch_max_timestamp.into()); } pub async fn persist_messages(&mut self) -> Result { diff --git a/server/src/streaming/segments/segment.rs b/server/src/streaming/segments/segment.rs index 799da84f4..b731cf32d 100644 --- a/server/src/streaming/segments/segment.rs +++ b/server/src/streaming/segments/segment.rs @@ -1,11 +1,11 @@ -use crate::compat::binary_schema::BinarySchema; -use crate::compat::chunks_error::IntoTryChunksError; -use crate::compat::conversion_writer::ConversionWriter; -use crate::compat::message_converter::MessageFormatConverterPersister; -use crate::compat::message_stream::MessageStream; -use crate::compat::snapshots::retained_batch_snapshot::RetainedMessageBatchSnapshot; -use crate::compat::streams::retained_batch::RetainedBatchWriter; -use crate::compat::streams::retained_message::RetainedMessageStream; +use crate::compat::message_conversion::binary_schema::BinarySchema; +use crate::compat::message_conversion::chunks_error::IntoTryChunksError; +use crate::compat::message_conversion::conversion_writer::ConversionWriter; +use crate::compat::message_conversion::message_converter::MessageFormatConverterPersister; +use crate::compat::message_conversion::message_stream::MessageStream; +use crate::compat::message_conversion::snapshots::retained_batch_snapshot::RetainedMessageBatchSnapshot; +use crate::compat::message_conversion::streams::retained_batch::RetainedBatchWriter; +use crate::compat::message_conversion::streams::retained_message::RetainedMessageStream; use crate::configs::system::SystemConfig; use crate::streaming::batching::message_batch::RetainedMessageBatch; use crate::streaming::segments::index::Index; @@ -15,6 +15,7 @@ use crate::streaming::storage::SystemStorage; use crate::streaming::utils::file; use futures::{pin_mut, TryStreamExt}; use iggy::error::IggyError; +use iggy::utils::expiry::IggyExpiry; use iggy::utils::timestamp::IggyTimestamp; use std::sync::atomic::AtomicU64; use std::sync::Arc; @@ -38,6 +39,7 @@ pub struct Segment { pub log_path: String, pub time_index_path: String, pub size_bytes: u32, + pub max_size_bytes: u32, pub size_of_parent_stream: Arc, pub size_of_parent_topic: Arc, pub size_of_parent_partition: Arc, @@ -45,7 +47,7 @@ pub struct Segment { pub messages_count_of_parent_topic: Arc, pub messages_count_of_parent_partition: Arc, pub is_closed: bool, - pub(crate) message_expiry: Option, + pub(crate) message_expiry: IggyExpiry, pub(crate) unsaved_batches: Option>>, pub(crate) config: Arc, pub(crate) indexes: Option>, @@ -64,7 +66,7 @@ impl Segment { start_offset: u64, config: Arc, storage: Arc, - message_expiry: Option, + message_expiry: IggyExpiry, size_of_parent_stream: Arc, size_of_parent_topic: Arc, size_of_parent_partition: Arc, @@ -85,6 +87,7 @@ impl Segment { index_path: Self::get_index_path(&path), time_index_path: Self::get_time_index_path(&path), size_bytes: 0, + max_size_bytes: config.segment.size.as_bytes_u64() as u32, message_expiry, indexes: match config.segment.cache_indexes { true => Some(Vec::new()), @@ -110,32 +113,32 @@ impl Segment { } pub async fn is_full(&self) -> bool { - if self.size_bytes >= self.config.segment.size.as_bytes_u64() as u32 { + if self.size_bytes >= self.max_size_bytes { return true; } - self.is_expired(IggyTimestamp::now().to_micros()).await + self.is_expired(IggyTimestamp::now()).await } - pub async fn is_expired(&self, now: u64) -> bool { - if self.message_expiry.is_none() { - return false; - } + pub async fn is_expired(&self, now: IggyTimestamp) -> bool { + match self.message_expiry { + IggyExpiry::NeverExpire => false, + IggyExpiry::ExpireDuration(expiry) => { + let last_messages = self.get_messages(self.current_offset, 1).await; + if last_messages.is_err() { + return false; + } - let last_messages = self.get_messages(self.current_offset, 1).await; - if last_messages.is_err() { - return false; - } + let last_messages = last_messages.unwrap(); + if last_messages.is_empty() { + return false; + } - let last_messages = last_messages.unwrap(); - if last_messages.is_empty() { - return false; + let last_message = &last_messages[0]; + let last_message_timestamp: IggyTimestamp = last_message.timestamp; + last_message_timestamp.to_micros() + expiry.as_micros() <= now.to_micros() + } } - - let last_message = &last_messages[0]; - // Message expiry is in seconds, and timestamp is in microseconds - let message_expiry = (self.message_expiry.unwrap() * 1000000) as u64; - (last_message.timestamp + message_expiry) <= now } fn get_log_path(path: &str) -> String { @@ -228,6 +231,7 @@ mod tests { use super::*; use crate::configs::system::SegmentConfig; use crate::streaming::storage::tests::get_test_system_storage; + use iggy::utils::duration::IggyDuration; #[tokio::test] async fn should_be_created_given_valid_parameters() { @@ -241,7 +245,7 @@ mod tests { let log_path = Segment::get_log_path(&path); let index_path = Segment::get_index_path(&path); let time_index_path = Segment::get_time_index_path(&path); - let message_expiry = Some(10); + let message_expiry = IggyExpiry::ExpireDuration(IggyDuration::from(10)); let size_of_parent_stream = Arc::new(AtomicU64::new(0)); let size_of_parent_topic = Arc::new(AtomicU64::new(0)); let size_of_parent_partition = Arc::new(AtomicU64::new(0)); @@ -297,7 +301,7 @@ mod tests { }, ..Default::default() }); - let message_expiry = None; + let message_expiry = IggyExpiry::NeverExpire; let size_of_parent_stream = Arc::new(AtomicU64::new(0)); let size_of_parent_topic = Arc::new(AtomicU64::new(0)); let size_of_parent_partition = Arc::new(AtomicU64::new(0)); @@ -338,7 +342,7 @@ mod tests { }, ..Default::default() }); - let message_expiry = None; + let message_expiry = IggyExpiry::NeverExpire; let size_of_parent_stream = Arc::new(AtomicU64::new(0)); let size_of_parent_topic = Arc::new(AtomicU64::new(0)); let size_of_parent_partition = Arc::new(AtomicU64::new(0)); diff --git a/server/src/streaming/segments/storage.rs b/server/src/streaming/segments/storage.rs index ba3277f25..72a1e41fb 100644 --- a/server/src/streaming/segments/storage.rs +++ b/server/src/streaming/segments/storage.rs @@ -6,7 +6,7 @@ use crate::streaming::segments::index::{Index, IndexRange}; use crate::streaming::segments::segment::Segment; use crate::streaming::segments::time_index::TimeIndex; use crate::streaming::sizeable::Sizeable; -use crate::streaming::storage::{SegmentStorage, Storage}; +use crate::streaming::storage::SegmentStorage; use crate::streaming::utils::file; use crate::streaming::utils::head_tail_buf::HeadTailBuffer; use anyhow::Context; @@ -15,6 +15,7 @@ use bytes::{BufMut, BytesMut}; use iggy::error::IggyError; use iggy::utils::byte_size::IggyByteSize; use iggy::utils::checksum; +use iggy::utils::timestamp::IggyTimestamp; use std::io::SeekFrom; use std::path::Path; use std::sync::atomic::Ordering; @@ -42,9 +43,8 @@ impl FileSegmentStorage { unsafe impl Send for FileSegmentStorage {} unsafe impl Sync for FileSegmentStorage {} -// TODO: Split into smaller components. #[async_trait] -impl Storage for FileSegmentStorage { +impl SegmentStorage for FileSegmentStorage { async fn load(&self, segment: &mut Segment) -> Result<(), IggyError> { info!( "Loading segment from disk for start offset: {} and partition with ID: {} for topic with ID: {} and stream with ID: {} ...", @@ -208,10 +208,7 @@ impl Storage for FileSegmentStorage { ); Ok(()) } -} -#[async_trait] -impl SegmentStorage for FileSegmentStorage { async fn load_message_batches( &self, segment: &Segment, @@ -448,7 +445,7 @@ impl SegmentStorage for FileSegmentStorage { async fn try_load_time_index_for_timestamp( &self, segment: &Segment, - timestamp: u64, + timestamp: IggyTimestamp, ) -> Result, IggyError> { trace!("Loading time indexes from file..."); let file = file::open(&segment.time_index_path).await?; @@ -463,13 +460,13 @@ impl SegmentStorage for FileSegmentStorage { let mut idx_pred = HeadTailBuffer::new(); loop { let offset = reader.read_u32_le().await?; - let time = reader.read_u64_le().await?; + let time = reader.read_u64_le().await?.into(); let idx = TimeIndex { relative_offset: offset, timestamp: time, }; idx_pred.push(idx); - if time >= timestamp { + if time.to_micros() >= timestamp.to_micros() { return Ok(idx_pred.tail()); } read_bytes += TIME_INDEX_SIZE as usize; @@ -499,13 +496,17 @@ impl SegmentStorage for FileSegmentStorage { ); error })?; - let timestamp = reader.read_u64().await.map_err(|error| { - error!( - "Cannot read timestamp from index file for offset: {}. Error: {}", - offset, &error - ); - error - })?; + let timestamp = reader + .read_u64() + .await + .map_err(|error| { + error!( + "Cannot read timestamp from index file for offset: {}. Error: {}", + offset, &error + ); + error + })? + .into(); indexes.push(TimeIndex { relative_offset: offset, timestamp, @@ -540,7 +541,7 @@ impl SegmentStorage for FileSegmentStorage { file.seek(SeekFrom::Start(last_index_position as u64)) .await?; let index_offset = file.read_u32_le().await?; - let timestamp = file.read_u64_le().await?; + let timestamp = file.read_u64_le().await?.into(); let index = TimeIndex { relative_offset: index_offset, timestamp, @@ -603,7 +604,8 @@ async fn load_batches_by_range( let max_timestamp = reader .read_u64_le() .await - .map_err(|_| IggyError::CannotReadMaxTimestamp)?; + .map_err(|_| IggyError::CannotReadMaxTimestamp)? + .into(); let last_offset = batch_base_offset + (last_offset_delta as u64); let index_last_offset = index_range.end.relative_offset as u64 + segment.start_offset; @@ -662,7 +664,8 @@ async fn load_messages_by_size( let max_timestamp = reader .read_u64_le() .await - .map_err(|_| IggyError::CannotReadMaxTimestamp)?; + .map_err(|_| IggyError::CannotReadMaxTimestamp)? + .into(); let payload_len = batch_length as usize; let mut payload = BytesMut::with_capacity(payload_len); diff --git a/server/src/streaming/segments/time_index.rs b/server/src/streaming/segments/time_index.rs index 6c43a6439..afb4103a4 100644 --- a/server/src/streaming/segments/time_index.rs +++ b/server/src/streaming/segments/time_index.rs @@ -1,11 +1,14 @@ +use iggy::utils::timestamp::IggyTimestamp; + #[derive(Debug, Default, Eq, Clone, Copy)] pub struct TimeIndex { pub relative_offset: u32, - pub timestamp: u64, + pub timestamp: IggyTimestamp, } impl PartialEq for TimeIndex { fn eq(&self, other: &Self) -> bool { - self.relative_offset == other.relative_offset && self.timestamp == other.timestamp + self.relative_offset == other.relative_offset + && self.timestamp.to_micros() == other.timestamp.to_micros() } } diff --git a/server/src/streaming/storage.rs b/server/src/streaming/storage.rs index 61071cfce..292f8e586 100644 --- a/server/src/streaming/storage.rs +++ b/server/src/streaming/storage.rs @@ -1,9 +1,9 @@ use super::batching::message_batch::RetainedMessageBatch; +use crate::configs::system::SystemConfig; +use crate::state::system::{PartitionState, StreamState, TopicState}; use crate::streaming::partitions::partition::{ConsumerOffset, Partition}; use crate::streaming::partitions::storage::FilePartitionStorage; use crate::streaming::persistence::persister::Persister; -use crate::streaming::personal_access_tokens::personal_access_token::PersonalAccessToken; -use crate::streaming::personal_access_tokens::storage::FilePersonalAccessTokenStorage; use crate::streaming::segments::index::{Index, IndexRange}; use crate::streaming::segments::segment::Segment; use crate::streaming::segments::storage::FileSegmentStorage; @@ -12,88 +12,56 @@ use crate::streaming::streams::storage::FileStreamStorage; use crate::streaming::streams::stream::Stream; use crate::streaming::systems::info::SystemInfo; use crate::streaming::systems::storage::FileSystemInfoStorage; -use crate::streaming::topics::consumer_group::ConsumerGroup; use crate::streaming::topics::storage::FileTopicStorage; use crate::streaming::topics::topic::Topic; -use crate::streaming::users::storage::FileUserStorage; -use crate::streaming::users::user::User; use async_trait::async_trait; use iggy::consumer::ConsumerKind; use iggy::error::IggyError; -use iggy::models::user_info::UserId; -use sled::Db; +use iggy::utils::timestamp::IggyTimestamp; use std::fmt::{Debug, Formatter}; use std::sync::Arc; #[async_trait] -pub trait Storage: Sync + Send { - async fn load(&self, component: &mut T) -> Result<(), IggyError>; - async fn save(&self, component: &T) -> Result<(), IggyError>; - async fn delete(&self, component: &T) -> Result<(), IggyError>; +pub trait SystemInfoStorage: Sync + Send { + async fn load(&self) -> Result; + async fn save(&self, system_info: &SystemInfo) -> Result<(), IggyError>; } #[async_trait] -pub trait SystemInfoStorage: Storage {} - -#[async_trait] -pub trait UserStorage: Storage { - async fn load_by_id(&self, id: UserId) -> Result; - async fn load_by_username(&self, username: &str) -> Result; - async fn load_all(&self) -> Result, IggyError>; +pub trait StreamStorage: Send + Sync { + async fn load(&self, stream: &mut Stream, state: StreamState) -> Result<(), IggyError>; + async fn save(&self, stream: &Stream) -> Result<(), IggyError>; + async fn delete(&self, stream: &Stream) -> Result<(), IggyError>; } #[async_trait] -pub trait PersonalAccessTokenStorage: Storage { - async fn load_all(&self) -> Result, IggyError>; - async fn load_for_user(&self, user_id: UserId) -> Result, IggyError>; - async fn load_by_token(&self, token: &str) -> Result; - async fn load_by_name( - &self, - user_id: UserId, - name: &str, - ) -> Result; - async fn delete_for_user(&self, user_id: UserId, name: &str) -> Result<(), IggyError>; +pub trait TopicStorage: Send + Sync { + async fn load(&self, topic: &mut Topic, state: TopicState) -> Result<(), IggyError>; + async fn save(&self, topic: &Topic) -> Result<(), IggyError>; + async fn delete(&self, topic: &Topic) -> Result<(), IggyError>; } #[async_trait] -pub trait StreamStorage: Storage {} - -#[async_trait] -pub trait TopicStorage: Storage { - async fn save_consumer_group( - &self, - topic: &Topic, - consumer_group: &ConsumerGroup, - ) -> Result<(), IggyError>; - async fn load_consumer_groups(&self, topic: &Topic) -> Result, IggyError>; - async fn delete_consumer_group( - &self, - topic: &Topic, - consumer_group: &ConsumerGroup, - ) -> Result<(), IggyError>; -} - -#[async_trait] -pub trait PartitionStorage: Storage { +pub trait PartitionStorage: Send + Sync { + async fn load(&self, partition: &mut Partition, state: PartitionState) + -> Result<(), IggyError>; + async fn save(&self, partition: &Partition) -> Result<(), IggyError>; + async fn delete(&self, partition: &Partition) -> Result<(), IggyError>; async fn save_consumer_offset(&self, offset: &ConsumerOffset) -> Result<(), IggyError>; async fn load_consumer_offsets( &self, kind: ConsumerKind, - stream_id: u32, - topic_id: u32, - partition_id: u32, + path: &str, ) -> Result, IggyError>; - async fn delete_consumer_offsets( - &self, - kind: ConsumerKind, - stream_id: u32, - topic_id: u32, - partition_id: u32, - ) -> Result<(), IggyError>; + async fn delete_consumer_offsets(&self, path: &str) -> Result<(), IggyError>; + async fn delete_consumer_offset(&self, path: &str) -> Result<(), IggyError>; } #[async_trait] -pub trait SegmentStorage: Storage { +pub trait SegmentStorage: Send + Sync { + async fn load(&self, segment: &mut Segment) -> Result<(), IggyError>; + async fn save(&self, segment: &Segment) -> Result<(), IggyError>; + async fn delete(&self, segment: &Segment) -> Result<(), IggyError>; async fn load_message_batches( &self, segment: &Segment, @@ -122,7 +90,7 @@ pub trait SegmentStorage: Storage { async fn try_load_time_index_for_timestamp( &self, segment: &Segment, - timestamp: u64, + timestamp: IggyTimestamp, ) -> Result, IggyError>; async fn load_all_time_indexes(&self, segment: &Segment) -> Result, IggyError>; async fn load_last_time_index(&self, segment: &Segment) @@ -133,24 +101,25 @@ pub trait SegmentStorage: Storage { #[derive(Debug)] pub struct SystemStorage { pub info: Arc, - pub user: Arc, - pub personal_access_token: Arc, pub stream: Arc, pub topic: Arc, pub partition: Arc, pub segment: Arc, + pub persister: Arc, } impl SystemStorage { - pub fn new(db: Arc, persister: Arc) -> Self { + pub fn new(config: Arc, persister: Arc) -> Self { Self { - info: Arc::new(FileSystemInfoStorage::new(db.clone())), - user: Arc::new(FileUserStorage::new(db.clone())), - personal_access_token: Arc::new(FilePersonalAccessTokenStorage::new(db.clone())), - stream: Arc::new(FileStreamStorage::new(db.clone())), - topic: Arc::new(FileTopicStorage::new(db.clone())), - partition: Arc::new(FilePartitionStorage::new(db.clone())), + info: Arc::new(FileSystemInfoStorage::new( + config.get_state_info_path(), + persister.clone(), + )), + stream: Arc::new(FileStreamStorage), + topic: Arc::new(FileTopicStorage), + partition: Arc::new(FilePartitionStorage::new(persister.clone())), segment: Arc::new(FileSegmentStorage::new(persister.clone())), + persister, } } } @@ -161,18 +130,6 @@ impl Debug for dyn SystemInfoStorage { } } -impl Debug for dyn UserStorage { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "UserStorage") - } -} - -impl Debug for dyn PersonalAccessTokenStorage { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "PersonalAccessTokenStorage") - } -} - impl Debug for dyn StreamStorage { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "StreamStorage") @@ -209,119 +166,42 @@ pub(crate) mod tests { use async_trait::async_trait; use std::sync::Arc; + struct TestPersister {} struct TestSystemInfoStorage {} - struct TestUserStorage {} - struct TestPersonalAccessTokenStorage {} struct TestStreamStorage {} struct TestTopicStorage {} struct TestPartitionStorage {} struct TestSegmentStorage {} #[async_trait] - impl Storage for TestSystemInfoStorage { - async fn load(&self, _system_info: &mut SystemInfo) -> Result<(), IggyError> { - Ok(()) - } - - async fn save(&self, _system_info: &SystemInfo) -> Result<(), IggyError> { - Ok(()) - } - - async fn delete(&self, _system_info: &SystemInfo) -> Result<(), IggyError> { - Ok(()) - } - } - - #[async_trait] - impl SystemInfoStorage for TestSystemInfoStorage {} - - #[async_trait] - impl Storage for TestUserStorage { - async fn load(&self, _user: &mut User) -> Result<(), IggyError> { - Ok(()) - } - - async fn save(&self, _user: &User) -> Result<(), IggyError> { - Ok(()) - } - - async fn delete(&self, _user: &User) -> Result<(), IggyError> { - Ok(()) - } - } - - #[async_trait] - impl UserStorage for TestUserStorage { - async fn load_by_id(&self, _id: UserId) -> Result { - Ok(User::default()) - } - - async fn load_by_username(&self, _username: &str) -> Result { - Ok(User::default()) - } - - async fn load_all(&self) -> Result, IggyError> { - Ok(vec![]) - } - } - - #[async_trait] - impl Storage for TestPersonalAccessTokenStorage { - async fn load( - &self, - _personal_access_token: &mut PersonalAccessToken, - ) -> Result<(), IggyError> { + impl Persister for TestPersister { + async fn append(&self, _path: &str, _bytes: &[u8]) -> Result<(), IggyError> { Ok(()) } - async fn save( - &self, - _personal_access_token: &PersonalAccessToken, - ) -> Result<(), IggyError> { + async fn overwrite(&self, _path: &str, _bytes: &[u8]) -> Result<(), IggyError> { Ok(()) } - async fn delete( - &self, - _personal_access_token: &PersonalAccessToken, - ) -> Result<(), IggyError> { + async fn delete(&self, _path: &str) -> Result<(), IggyError> { Ok(()) } } #[async_trait] - impl PersonalAccessTokenStorage for TestPersonalAccessTokenStorage { - async fn load_all(&self) -> Result, IggyError> { - Ok(vec![]) - } - - async fn load_for_user( - &self, - _user_id: UserId, - ) -> Result, IggyError> { - Ok(vec![]) - } - - async fn load_by_token(&self, _token: &str) -> Result { - Ok(PersonalAccessToken::default()) + impl SystemInfoStorage for TestSystemInfoStorage { + async fn load(&self) -> Result { + Ok(SystemInfo::default()) } - async fn load_by_name( - &self, - _user_id: UserId, - _name: &str, - ) -> Result { - Ok(PersonalAccessToken::default()) - } - - async fn delete_for_user(&self, _user_id: UserId, _name: &str) -> Result<(), IggyError> { + async fn save(&self, _system_info: &SystemInfo) -> Result<(), IggyError> { Ok(()) } } #[async_trait] - impl Storage for TestStreamStorage { - async fn load(&self, _stream: &mut Stream) -> Result<(), IggyError> { + impl StreamStorage for TestStreamStorage { + async fn load(&self, _stream: &mut Stream, _state: StreamState) -> Result<(), IggyError> { Ok(()) } @@ -334,11 +214,9 @@ pub(crate) mod tests { } } - impl StreamStorage for TestStreamStorage {} - #[async_trait] - impl Storage for TestTopicStorage { - async fn load(&self, _topic: &mut Topic) -> Result<(), IggyError> { + impl TopicStorage for TestTopicStorage { + async fn load(&self, _topic: &mut Topic, _state: TopicState) -> Result<(), IggyError> { Ok(()) } @@ -352,36 +230,14 @@ pub(crate) mod tests { } #[async_trait] - impl TopicStorage for TestTopicStorage { - async fn save_consumer_group( - &self, - _topic: &Topic, - _consumer_group: &ConsumerGroup, - ) -> Result<(), IggyError> { - Ok(()) - } - - async fn load_consumer_groups( - &self, - _topic: &Topic, - ) -> Result, IggyError> { - Ok(vec![]) - } - - async fn delete_consumer_group( + impl PartitionStorage for TestPartitionStorage { + async fn load( &self, - _topic: &Topic, - _consumer_group: &ConsumerGroup, + _partition: &mut Partition, + _state: PartitionState, ) -> Result<(), IggyError> { Ok(()) } - } - - #[async_trait] - impl Storage for TestPartitionStorage { - async fn load(&self, _partition: &mut Partition) -> Result<(), IggyError> { - Ok(()) - } async fn save(&self, _partition: &Partition) -> Result<(), IggyError> { Ok(()) @@ -390,10 +246,7 @@ pub(crate) mod tests { async fn delete(&self, _partition: &Partition) -> Result<(), IggyError> { Ok(()) } - } - #[async_trait] - impl PartitionStorage for TestPartitionStorage { async fn save_consumer_offset(&self, _offset: &ConsumerOffset) -> Result<(), IggyError> { Ok(()) } @@ -401,26 +254,22 @@ pub(crate) mod tests { async fn load_consumer_offsets( &self, _kind: ConsumerKind, - _stream_id: u32, - _topic_id: u32, - _partition_id: u32, + _path: &str, ) -> Result, IggyError> { Ok(vec![]) } - async fn delete_consumer_offsets( - &self, - _kind: ConsumerKind, - _stream_id: u32, - _topic_id: u32, - _partition_id: u32, - ) -> Result<(), IggyError> { + async fn delete_consumer_offsets(&self, _path: &str) -> Result<(), IggyError> { + Ok(()) + } + + async fn delete_consumer_offset(&self, _path: &str) -> Result<(), IggyError> { Ok(()) } } #[async_trait] - impl Storage for TestSegmentStorage { + impl SegmentStorage for TestSegmentStorage { async fn load(&self, _segment: &mut Segment) -> Result<(), IggyError> { Ok(()) } @@ -432,10 +281,7 @@ pub(crate) mod tests { async fn delete(&self, _segment: &Segment) -> Result<(), IggyError> { Ok(()) } - } - #[async_trait] - impl SegmentStorage for TestSegmentStorage { async fn load_message_batches( &self, _segment: &Segment, @@ -488,7 +334,7 @@ pub(crate) mod tests { async fn try_load_time_index_for_timestamp( &self, _segment: &Segment, - _timestamp: u64, + _timestamp: IggyTimestamp, ) -> Result, IggyError> { Ok(None) } @@ -515,12 +361,11 @@ pub(crate) mod tests { pub fn get_test_system_storage() -> SystemStorage { SystemStorage { info: Arc::new(TestSystemInfoStorage {}), - user: Arc::new(TestUserStorage {}), - personal_access_token: Arc::new(TestPersonalAccessTokenStorage {}), stream: Arc::new(TestStreamStorage {}), topic: Arc::new(TestTopicStorage {}), partition: Arc::new(TestPartitionStorage {}), segment: Arc::new(TestSegmentStorage {}), + persister: Arc::new(TestPersister {}), } } } diff --git a/server/src/streaming/streams/persistence.rs b/server/src/streaming/streams/persistence.rs index 6d9246dae..ca0bbb2f7 100644 --- a/server/src/streaming/streams/persistence.rs +++ b/server/src/streaming/streams/persistence.rs @@ -1,10 +1,11 @@ +use crate::state::system::StreamState; use crate::streaming::streams::stream::Stream; use iggy::error::IggyError; impl Stream { - pub async fn load(&mut self) -> Result<(), IggyError> { + pub async fn load(&mut self, state: StreamState) -> Result<(), IggyError> { let storage = self.storage.clone(); - storage.stream.load(self).await + storage.stream.load(self, state).await } pub async fn persist(&self) -> Result<(), IggyError> { diff --git a/server/src/streaming/streams/storage.rs b/server/src/streaming/streams/storage.rs index eacbfa864..544389e74 100644 --- a/server/src/streaming/streams/storage.rs +++ b/server/src/streaming/streams/storage.rs @@ -1,36 +1,26 @@ -use crate::streaming::storage::{Storage, StreamStorage}; +use crate::state::system::StreamState; +use crate::streaming::storage::StreamStorage; use crate::streaming::streams::stream::Stream; use crate::streaming::topics::topic::Topic; -use anyhow::Context; use async_trait::async_trait; use futures::future::join_all; use iggy::error::IggyError; use iggy::utils::timestamp::IggyTimestamp; use serde::{Deserialize, Serialize}; -use sled::Db; +use std::collections::HashSet; use std::path::Path; use std::sync::Arc; use tokio::fs; use tokio::fs::create_dir; use tokio::sync::Mutex; -use tracing::{error, info}; +use tracing::{error, info, warn}; #[derive(Debug)] -pub struct FileStreamStorage { - db: Arc, -} - -impl FileStreamStorage { - pub fn new(db: Arc) -> Self { - Self { db } - } -} +pub struct FileStreamStorage; unsafe impl Send for FileStreamStorage {} unsafe impl Sync for FileStreamStorage {} -impl StreamStorage for FileStreamStorage {} - #[derive(Debug, Serialize, Deserialize)] struct StreamData { name: String, @@ -38,46 +28,13 @@ struct StreamData { } #[async_trait] -impl Storage for FileStreamStorage { - async fn load(&self, stream: &mut Stream) -> Result<(), IggyError> { +impl StreamStorage for FileStreamStorage { + async fn load(&self, stream: &mut Stream, mut state: StreamState) -> Result<(), IggyError> { info!("Loading stream with ID: {} from disk...", stream.stream_id); if !Path::new(&stream.path).exists() { return Err(IggyError::StreamIdNotFound(stream.stream_id)); } - let key = get_key(stream.stream_id); - let stream_data = match self.db.get(&key).with_context(|| { - format!( - "Failed to load stream with ID: {}, key: {}", - stream.stream_id, key - ) - }) { - Ok(stream_data) => { - if let Some(stream_data) = stream_data { - let stream_data = rmp_serde::from_slice::(&stream_data) - .with_context(|| { - format!( - "Failed to deserialize stream with ID: {}, key: {}", - stream.stream_id, key - ) - }); - match stream_data { - Ok(stream_data) => stream_data, - Err(err) => { - return Err(IggyError::CannotDeserializeResource(err)); - } - } - } else { - return Err(IggyError::ResourceNotFound(key)); - } - } - Err(err) => { - return Err(IggyError::CannotLoadResource(err)); - } - }; - - stream.name = stream_data.name; - stream.created_at = stream_data.created_at; let mut unloaded_topics = Vec::new(); let dir_entries = fs::read_dir(&stream.topics_path).await; if dir_entries.is_err() { @@ -94,9 +51,23 @@ impl Storage for FileStreamStorage { } let topic_id = topic_id.unwrap(); + let topic_state = state.topics.get(&topic_id); + if topic_state.is_none() { + let stream_id = stream.stream_id; + error!("Topic with ID: '{topic_id}' for stream with ID: '{stream_id}' was not found in state, but exists on disk and will be removed."); + if let Err(error) = fs::remove_dir_all(&dir_entry.path()).await { + error!("Cannot remove topic directory: {error}"); + } else { + warn!("Topic with ID: '{topic_id}' for stream with ID: '{stream_id}' was removed."); + } + continue; + } + + let topic_state = topic_state.unwrap(); let topic = Topic::empty( stream.stream_id, topic_id, + &topic_state.name, stream.size_bytes.clone(), stream.messages_count.clone(), stream.segments_count.clone(), @@ -106,12 +77,32 @@ impl Storage for FileStreamStorage { unloaded_topics.push(topic); } + let state_topic_ids = state.topics.keys().copied().collect::>(); + let unloaded_topic_ids = unloaded_topics + .iter() + .map(|topic| topic.topic_id) + .collect::>(); + let missing_ids = state_topic_ids + .difference(&unloaded_topic_ids) + .copied() + .collect::>(); + if missing_ids.is_empty() { + info!( + "All topics for stream with ID: '{}' found on disk were found in state.", + stream.stream_id + ); + } else { + error!("Topics with IDs: '{missing_ids:?}' for stream with ID: '{}' were not found on disk.", stream.stream_id); + return Err(IggyError::MissingTopics(stream.stream_id)); + } + let loaded_topics = Arc::new(Mutex::new(Vec::new())); let mut load_topics = Vec::new(); for mut topic in unloaded_topics { let loaded_topics = loaded_topics.clone(); - let load_stream = tokio::spawn(async move { - match topic.load().await { + let topic_state = state.topics.remove(&topic.topic_id).unwrap(); + let load_topic = tokio::spawn(async move { + match topic.load(topic_state).await { Ok(_) => loaded_topics.lock().await.push(topic), Err(error) => error!( "Failed to load topic with ID: {} for stream with ID: {}. Error: {}", @@ -119,7 +110,7 @@ impl Storage for FileStreamStorage { ), } }); - load_topics.push(load_stream); + load_topics.push(load_topic); } join_all(load_topics).await; @@ -169,27 +160,6 @@ impl Storage for FileStreamStorage { )); } - let key = get_key(stream.stream_id); - match rmp_serde::to_vec(&StreamData { - name: stream.name.clone(), - created_at: stream.created_at, - }) - .with_context(|| format!("Failed to serialize stream with key: {}", key)) - { - Ok(data) => { - if let Err(err) = self - .db - .insert(&key, data) - .with_context(|| format!("Failed to insert stream with key: {}", key)) - { - return Err(IggyError::CannotSaveResource(err)); - } - } - Err(err) => { - return Err(IggyError::CannotSerializeResource(err)); - } - } - info!("Saved stream with ID: {}.", stream.stream_id); Ok(()) @@ -197,14 +167,6 @@ impl Storage for FileStreamStorage { async fn delete(&self, stream: &Stream) -> Result<(), IggyError> { info!("Deleting stream with ID: {}...", stream.stream_id); - let key = get_key(stream.stream_id); - if let Err(err) = self - .db - .remove(&key) - .with_context(|| format!("Failed to delete stream with key: {}", key)) - { - return Err(IggyError::CannotDeleteResource(err)); - } if fs::remove_dir_all(&stream.path).await.is_err() { return Err(IggyError::CannotDeleteStreamDirectory(stream.stream_id)); } @@ -212,7 +174,3 @@ impl Storage for FileStreamStorage { Ok(()) } } - -fn get_key(stream_id: u32) -> String { - format!("streams:{}", stream_id) -} diff --git a/server/src/streaming/streams/stream.rs b/server/src/streaming/streams/stream.rs index 969ab681b..a46e75911 100644 --- a/server/src/streaming/streams/stream.rs +++ b/server/src/streaming/streams/stream.rs @@ -25,8 +25,13 @@ pub struct Stream { } impl Stream { - pub fn empty(id: u32, config: Arc, storage: Arc) -> Self { - Stream::create(id, "", config, storage) + pub fn empty( + id: u32, + name: &str, + config: Arc, + storage: Arc, + ) -> Self { + Stream::create(id, name, config, storage) } pub fn create( diff --git a/server/src/streaming/streams/topics.rs b/server/src/streaming/streams/topics.rs index c27759fae..1e86e726e 100644 --- a/server/src/streaming/streams/topics.rs +++ b/server/src/streaming/streams/topics.rs @@ -4,8 +4,9 @@ use iggy::compression::compression_algorithm::CompressionAlgorithm; use iggy::error::IggyError; use iggy::identifier::{IdKind, Identifier}; use iggy::locking::IggySharedMutFn; -use iggy::utils::byte_size::IggyByteSize; +use iggy::utils::expiry::IggyExpiry; use iggy::utils::text; +use iggy::utils::topic_size::MaxTopicSize; use std::sync::atomic::Ordering; use tracing::{debug, info}; @@ -20,9 +21,9 @@ impl Stream { topic_id: Option, name: &str, partitions_count: u32, - message_expiry: Option, + message_expiry: IggyExpiry, compression_algorithm: CompressionAlgorithm, - max_topic_size: Option, + max_topic_size: MaxTopicSize, replication_factor: u8, ) -> Result<(), IggyError> { let name = text::to_lowercase_non_whitespace(name); @@ -51,7 +52,6 @@ impl Stream { return Err(IggyError::TopicIdAlreadyExists(id, self.stream_id)); } - // TODO: check if max_topic_size is not lower than system.segment.size let topic = Topic::create( self.stream_id, id, @@ -79,9 +79,9 @@ impl Stream { &mut self, id: &Identifier, name: &str, - message_expiry: Option, + message_expiry: IggyExpiry, compression_algorithm: CompressionAlgorithm, - max_topic_size: Option, + max_topic_size: MaxTopicSize, replication_factor: u8, ) -> Result<(), IggyError> { let topic_id; @@ -112,6 +112,15 @@ impl Stream { self.topics_ids.remove(&old_topic_name.clone()); self.topics_ids.insert(updated_name.clone(), topic_id); let topic = self.get_topic_mut(id)?; + + let max_topic_size = match max_topic_size { + MaxTopicSize::ServerDefault => match topic.config.retention_policy.max_topic_size { + MaxTopicSize::ServerDefault => MaxTopicSize::get_server_default(), + value => value, + }, + value => value, + }; + topic.name = updated_name; topic.message_expiry = message_expiry; topic.compression_algorithm = compression_algorithm; @@ -124,7 +133,6 @@ impl Stream { } topic.max_topic_size = max_topic_size; topic.replication_factor = replication_factor; - topic.persist().await?; info!("Updated topic: {topic}"); } @@ -227,6 +235,7 @@ mod tests { use super::*; use crate::configs::system::SystemConfig; use crate::streaming::storage::tests::get_test_system_storage; + use iggy::utils::byte_size::IggyByteSize; use std::sync::Arc; #[tokio::test] @@ -235,9 +244,9 @@ mod tests { let stream_name = "test_stream"; let topic_id = 2; let topic_name = "test_topic"; - let message_expiry = Some(10); + let message_expiry = IggyExpiry::NeverExpire; let compression_algorithm = CompressionAlgorithm::None; - let max_topic_size = Some(IggyByteSize::from(100)); + let max_topic_size = MaxTopicSize::Custom(IggyByteSize::from(100)); let config = Arc::new(SystemConfig::default()); let storage = Arc::new(get_test_system_storage()); let mut stream = Stream::create(stream_id, stream_name, config, storage); diff --git a/server/src/streaming/systems/info.rs b/server/src/streaming/systems/info.rs index 29518b19e..b3f291f98 100644 --- a/server/src/streaming/systems/info.rs +++ b/server/src/streaming/systems/info.rs @@ -1,4 +1,5 @@ use crate::streaming::systems::system::System; +use crate::versioning::SemanticVersion; use iggy::error::IggyError; use serde::{Deserialize, Serialize}; use std::collections::hash_map::DefaultHasher; @@ -7,8 +8,6 @@ use std::hash::{Hash, Hasher}; use std::str::FromStr; use tracing::info; -const VERSION: &str = env!("CARGO_PKG_VERSION"); - #[derive(Debug, Serialize, Deserialize, Default)] pub struct SystemInfo { pub version: Version, @@ -29,52 +28,55 @@ pub struct Migration { pub applied_at: u64, } -#[derive(Debug)] -pub struct SemanticVersion { - pub major: u32, - pub minor: u32, - pub patch: u32, -} - impl System { pub(crate) async fn load_version(&mut self) -> Result<(), IggyError> { - info!("Loading system info..."); - let mut system_info = SystemInfo::default(); - if let Err(err) = self.storage.info.load(&mut system_info).await { - match err { - IggyError::ResourceNotFound(_) => { - info!("System info not found, creating..."); - self.update_system_info(&mut system_info).await?; - } - _ => return Err(err), + let current_version = SemanticVersion::current()?; + let mut system_info; + let load_system_info = self.storage.info.load().await; + if load_system_info.is_err() { + let error = load_system_info.err().unwrap(); + if let IggyError::ResourceNotFound(_) = error { + info!("System info not found, creating..."); + system_info = SystemInfo::default(); + self.update_system_info(&mut system_info, ¤t_version) + .await?; + } else { + return Err(error); } + } else { + system_info = load_system_info.unwrap(); } info!("Loaded {system_info}"); - let current_version = SemanticVersion::from_str(VERSION)?; let loaded_version = SemanticVersion::from_str(&system_info.version.version)?; if current_version.is_equal_to(&loaded_version) { info!("System version {current_version} is up to date."); } else if current_version.is_greater_than(&loaded_version) { info!("System version {current_version} is greater than {loaded_version}, checking the available migrations..."); - self.update_system_info(&mut system_info).await?; + self.update_system_info(&mut system_info, ¤t_version) + .await?; } else { info!("System version {current_version} is lower than {loaded_version}, possible downgrade."); - self.update_system_info(&mut system_info).await?; + self.update_system_info(&mut system_info, ¤t_version) + .await?; } Ok(()) } - async fn update_system_info(&self, system_info: &mut SystemInfo) -> Result<(), IggyError> { - system_info.update_version(VERSION); + async fn update_system_info( + &self, + system_info: &mut SystemInfo, + version: &SemanticVersion, + ) -> Result<(), IggyError> { + system_info.update_version(version); self.storage.info.save(system_info).await?; Ok(()) } } impl SystemInfo { - pub fn update_version(&mut self, version: &str) { + pub fn update_version(&mut self, version: &SemanticVersion) { self.version.version = version.to_string(); let mut hasher = DefaultHasher::new(); self.version.hash.hash(&mut hasher); @@ -108,71 +110,3 @@ impl Display for SystemInfo { write!(f, "system info, {}", self.version) } } - -impl FromStr for SemanticVersion { - type Err = IggyError; - fn from_str(s: &str) -> Result { - let mut version = s.split('.'); - let major = version.next().unwrap().parse::()?; - let minor = version.next().unwrap().parse::()?; - let patch = version.next().unwrap().parse::()?; - Ok(SemanticVersion { - major, - minor, - patch, - }) - } -} - -impl SemanticVersion { - pub fn is_equal_to(&self, other: &SemanticVersion) -> bool { - self.major == other.major && self.minor == other.minor && self.patch == other.patch - } - - pub fn is_greater_than(&self, other: &SemanticVersion) -> bool { - if self.major > other.major { - return true; - } - if self.major < other.major { - return false; - } - - if self.minor > other.minor { - return true; - } - if self.minor < other.minor { - return false; - } - - if self.patch > other.patch { - return true; - } - if self.patch < other.patch { - return false; - } - - false - } -} - -impl Display for SemanticVersion { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{major}.{minor}.{patch}", - major = self.major, - minor = self.minor, - patch = self.patch - ) - } -} - -mod tests { - #[test] - fn should_load_the_expected_version_from_package_definition() { - use super::VERSION; - - const CARGO_TOML_VERSION: &str = env!("CARGO_PKG_VERSION"); - assert_eq!(VERSION, CARGO_TOML_VERSION); - } -} diff --git a/server/src/streaming/systems/personal_access_tokens.rs b/server/src/streaming/systems/personal_access_tokens.rs index 0399f9d13..b307cd2e9 100644 --- a/server/src/streaming/systems/personal_access_tokens.rs +++ b/server/src/streaming/systems/personal_access_tokens.rs @@ -3,6 +3,7 @@ use crate::streaming::session::Session; use crate::streaming::systems::system::System; use crate::streaming::users::user::User; use iggy::error::IggyError; +use iggy::utils::expiry::IggyExpiry; use iggy::utils::text; use iggy::utils::timestamp::IggyTimestamp; use tracing::{error, info}; @@ -11,51 +12,48 @@ impl System { pub async fn get_personal_access_tokens( &self, session: &Session, - ) -> Result, IggyError> { + ) -> Result, IggyError> { self.ensure_authenticated(session)?; let user_id = session.get_user_id(); + let user = self.get_user(&user_id.try_into()?)?; info!("Loading personal access tokens for user with ID: {user_id}...",); - let personal_access_tokens = self - .storage - .personal_access_token - .load_for_user(user_id) - .await?; + let personal_access_tokens: Vec<_> = user.personal_access_tokens.values().collect(); info!( - "Loaded {count} personal access tokens for user with ID: {user_id}.", - count = personal_access_tokens.len(), + "Loaded {} personal access tokens for user with ID: {user_id}.", + personal_access_tokens.len(), ); Ok(personal_access_tokens) } pub async fn create_personal_access_token( - &self, + &mut self, session: &Session, name: &str, - expiry: Option, + expiry: IggyExpiry, ) -> Result { self.ensure_authenticated(session)?; let user_id = session.get_user_id(); - let max_token_per_user = self.personal_access_token.max_tokens_per_user; - let name = text::to_lowercase_non_whitespace(name); - let personal_access_tokens = self - .storage - .personal_access_token - .load_for_user(user_id) - .await?; - if personal_access_tokens.len() as u32 >= max_token_per_user { - error!( - "User with ID: {} has reached the maximum number of personal access tokens: {}.", - user_id, max_token_per_user, + let identifier = user_id.try_into()?; + { + let user = self.get_user(&identifier)?; + let max_token_per_user = self.personal_access_token.max_tokens_per_user; + if user.personal_access_tokens.len() as u32 >= max_token_per_user { + error!( + "User with ID: {user_id} has reached the maximum number of personal access tokens: {max_token_per_user}.", ); - return Err(IggyError::PersonalAccessTokensLimitReached( - user_id, - max_token_per_user, - )); + return Err(IggyError::PersonalAccessTokensLimitReached( + user_id, + max_token_per_user, + )); + } } - if personal_access_tokens - .iter() - .any(|personal_access_token| personal_access_token.name == name) + let user = self.get_user_mut(&identifier)?; + let name = text::to_lowercase_non_whitespace(name); + if user + .personal_access_tokens + .values() + .any(|pat| pat.name == name) { error!("Personal access token: {name} for user with ID: {user_id} already exists."); return Err(IggyError::PersonalAccessTokenAlreadyExists(name, user_id)); @@ -63,28 +61,39 @@ impl System { info!("Creating personal access token: {name} for user with ID: {user_id}..."); let (personal_access_token, token) = - PersonalAccessToken::new(user_id, &name, IggyTimestamp::now().to_micros(), expiry); - self.storage - .personal_access_token - .save(&personal_access_token) - .await?; + PersonalAccessToken::new(user_id, &name, IggyTimestamp::now(), expiry); + user.personal_access_tokens + .insert(personal_access_token.token.clone(), personal_access_token); info!("Created personal access token: {name} for user with ID: {user_id}."); Ok(token) } pub async fn delete_personal_access_token( - &self, + &mut self, session: &Session, name: &str, ) -> Result<(), IggyError> { self.ensure_authenticated(session)?; let user_id = session.get_user_id(); + let user = self.get_user_mut(&user_id.try_into()?)?; let name = text::to_lowercase_non_whitespace(name); + let token; + + { + let pat = user + .personal_access_tokens + .iter() + .find(|(_, pat)| pat.name == name); + if pat.is_none() { + error!("Personal access token: {name} for user with ID: {user_id} does not exist.",); + return Err(IggyError::ResourceNotFound(name)); + } + + token = pat.unwrap().1.token.clone(); + } + info!("Deleting personal access token: {name} for user with ID: {user_id}..."); - self.storage - .personal_access_token - .delete_for_user(user_id, &name) - .await?; + user.personal_access_tokens.remove(&token); info!("Deleted personal access token: {name} for user with ID: {user_id}."); Ok(()) } @@ -93,29 +102,34 @@ impl System { &self, token: &str, session: Option<&Session>, - ) -> Result { + ) -> Result<&User, IggyError> { let token_hash = PersonalAccessToken::hash_token(token); - let personal_access_token = self - .storage - .personal_access_token - .load_by_token(&token_hash) - .await?; - if personal_access_token.is_expired(IggyTimestamp::now().to_micros()) { + let mut personal_access_token = None; + for user in self.users.values() { + if let Some(pat) = user.personal_access_tokens.get(&token_hash) { + personal_access_token = Some(pat); + break; + } + } + + if personal_access_token.is_none() { + error!("Personal access token: {} does not exist.", token); + return Err(IggyError::ResourceNotFound(token.to_owned())); + } + + let personal_access_token = personal_access_token.unwrap(); + if personal_access_token.is_expired(IggyTimestamp::now()) { error!( "Personal access token: {} for user with ID: {} has expired.", personal_access_token.name, personal_access_token.user_id ); return Err(IggyError::PersonalAccessTokenExpired( - personal_access_token.name, + personal_access_token.name.clone(), personal_access_token.user_id, )); } - let user = self - .storage - .user - .load_by_id(personal_access_token.user_id) - .await?; + let user = self.get_user(&personal_access_token.user_id.try_into()?)?; self.login_user_with_credentials(&user.username, None, session) .await } diff --git a/server/src/streaming/systems/storage.rs b/server/src/streaming/systems/storage.rs index 1d1f330f6..8e3291977 100644 --- a/server/src/streaming/systems/storage.rs +++ b/server/src/streaming/systems/storage.rs @@ -1,91 +1,53 @@ -use crate::streaming::storage::{Storage, SystemInfoStorage}; +use crate::streaming::persistence::persister::Persister; +use crate::streaming::storage::SystemInfoStorage; use crate::streaming::systems::info::SystemInfo; +use crate::streaming::utils::file; use anyhow::Context; use async_trait::async_trait; +use bytes::{BufMut, BytesMut}; use iggy::error::IggyError; -use sled::Db; use std::sync::Arc; +use tokio::io::AsyncReadExt; use tracing::info; -const KEY: &str = "system"; - #[derive(Debug)] pub struct FileSystemInfoStorage { - db: Arc, + persister: Arc, + path: String, } impl FileSystemInfoStorage { - pub fn new(db: Arc) -> Self { - Self { db } + pub fn new(path: String, persister: Arc) -> Self { + Self { path, persister } } } - unsafe impl Send for FileSystemInfoStorage {} unsafe impl Sync for FileSystemInfoStorage {} -impl SystemInfoStorage for FileSystemInfoStorage {} - #[async_trait] -impl Storage for FileSystemInfoStorage { - async fn load(&self, system_info: &mut SystemInfo) -> Result<(), IggyError> { - let data = match self - .db - .get(KEY) - .with_context(|| "Failed to load system info") - { - Ok(data) => { - if let Some(data) = data { - let data = rmp_serde::from_slice::(&data) - .with_context(|| "Failed to deserialize system info"); - if let Err(err) = data { - return Err(IggyError::CannotDeserializeResource(err)); - } else { - data.unwrap() - } - } else { - return Err(IggyError::ResourceNotFound(KEY.to_string())); - } - } - Err(err) => { - return Err(IggyError::CannotLoadResource(err)); - } - }; +impl SystemInfoStorage for FileSystemInfoStorage { + async fn load(&self) -> Result { + let file = file::open(&self.path).await; + if file.is_err() { + return Err(IggyError::ResourceNotFound(self.path.to_owned())); + } - system_info.version = data.version; - system_info.migrations = data.migrations; - Ok(()) + let mut file = file.unwrap(); + let file_size = file.metadata().await?.len() as usize; + let mut buffer = BytesMut::with_capacity(file_size); + buffer.put_bytes(0, file_size); + file.read_exact(&mut buffer).await?; + bincode::deserialize(&buffer) + .with_context(|| "Failed to deserialize system info") + .map_err(IggyError::CannotDeserializeResource) } async fn save(&self, system_info: &SystemInfo) -> Result<(), IggyError> { - match rmp_serde::to_vec(&system_info).with_context(|| "Failed to serialize system info") { - Ok(data) => { - if let Err(err) = self - .db - .insert(KEY, data) - .with_context(|| "Failed to save system info") - { - return Err(IggyError::CannotSaveResource(err)); - } - } - Err(err) => { - return Err(IggyError::CannotSerializeResource(err)); - } - } - + let data = bincode::serialize(&system_info) + .with_context(|| "Failed to serialize system info") + .map_err(IggyError::CannotSerializeResource)?; + self.persister.overwrite(&self.path, &data).await?; info!("Saved system info, {}", system_info); Ok(()) } - - async fn delete(&self, _: &SystemInfo) -> Result<(), IggyError> { - if let Err(err) = self - .db - .remove(KEY) - .with_context(|| "Failed to delete system info") - { - return Err(IggyError::CannotDeleteResource(err)); - } - - info!("Deleted system info"); - Ok(()) - } } diff --git a/server/src/streaming/systems/streams.rs b/server/src/streaming/systems/streams.rs index e1bc407bb..862d30d6c 100644 --- a/server/src/streaming/systems/streams.rs +++ b/server/src/streaming/systems/streams.rs @@ -1,3 +1,4 @@ +use crate::state::system::StreamState; use crate::streaming::session::Session; use crate::streaming::streams::stream::Stream; use crate::streaming::systems::system::System; @@ -7,14 +8,19 @@ use iggy::identifier::{IdKind, Identifier}; use iggy::locking::IggySharedMutFn; use iggy::utils::text; use std::cell::RefCell; +use std::collections::{HashMap, HashSet}; use std::sync::atomic::{AtomicU32, Ordering}; +use tokio::fs; use tokio::fs::read_dir; -use tracing::{error, info}; +use tracing::{error, info, warn}; static CURRENT_STREAM_ID: AtomicU32 = AtomicU32::new(1); impl System { - pub(crate) async fn load_streams(&mut self) -> Result<(), IggyError> { + pub(crate) async fn load_streams( + &mut self, + streams: Vec, + ) -> Result<(), IggyError> { info!("Loading streams from disk..."); let mut unloaded_streams = Vec::new(); let dir_entries = read_dir(&self.config.get_streams_path()).await; @@ -28,19 +34,61 @@ impl System { let name = dir_entry.file_name().into_string().unwrap(); let stream_id = name.parse::(); if stream_id.is_err() { - error!("Invalid stream ID file with name: '{}'.", name); + error!("Invalid stream ID file with name: '{name}'."); continue; } let stream_id = stream_id.unwrap(); - let stream = Stream::empty(stream_id, self.config.clone(), self.storage.clone()); + let stream_state = streams.iter().find(|s| s.id == stream_id); + if stream_state.is_none() { + error!("Stream with ID: '{stream_id}' was not found in state, but exists on disk and will be removed."); + if let Err(error) = fs::remove_dir_all(&dir_entry.path()).await { + error!("Cannot remove stream directory: {error}"); + } else { + warn!("Stream with ID: '{stream_id}' was removed."); + } + continue; + } + + let stream_state = stream_state.unwrap(); + let mut stream = Stream::empty( + stream_id, + &stream_state.name, + self.config.clone(), + self.storage.clone(), + ); + stream.created_at = stream_state.created_at; unloaded_streams.push(stream); } + let state_stream_ids = streams + .iter() + .map(|stream| stream.id) + .collect::>(); + let unloaded_stream_ids = unloaded_streams + .iter() + .map(|stream| stream.stream_id) + .collect::>(); + let missing_ids = state_stream_ids + .difference(&unloaded_stream_ids) + .copied() + .collect::>(); + if missing_ids.is_empty() { + info!("All streams found on disk were found in state."); + } else { + error!("Streams with IDs: '{missing_ids:?}' were not found on disk."); + return Err(IggyError::MissingStreams); + } + + let mut streams_states = streams + .into_iter() + .map(|s| (s.id, s)) + .collect::>(); let loaded_streams = RefCell::new(Vec::new()); let load_stream_tasks = unloaded_streams.into_iter().map(|mut stream| { + let state = streams_states.remove(&stream.stream_id).unwrap(); let load_stream_task = async { - stream.load().await?; + stream.load(state).await?; loaded_streams.borrow_mut().push(stream); Result::<(), IggyError>::Ok(()) }; @@ -292,8 +340,10 @@ mod tests { use super::*; use crate::configs::server::PersonalAccessTokenConfig; use crate::configs::system::SystemConfig; + use crate::state::{State, StateEntry}; use crate::streaming::storage::tests::get_test_system_storage; use crate::streaming::users::user::User; + use async_trait::async_trait; use iggy::users::defaults::{DEFAULT_ROOT_PASSWORD, DEFAULT_ROOT_USERNAME}; use std::{ net::{Ipv4Addr, SocketAddr}, @@ -306,15 +356,22 @@ mod tests { let stream_name = "test"; let config = Arc::new(SystemConfig::default()); let storage = get_test_system_storage(); - let mut system = - System::create(config, storage, None, PersonalAccessTokenConfig::default()); + let mut system = System::create( + config, + storage, + Arc::new(TestState::default()), + PersonalAccessTokenConfig::default(), + ); let root = User::root(DEFAULT_ROOT_USERNAME, DEFAULT_ROOT_PASSWORD); + let permissions = root.permissions.clone(); let session = Session::new( 1, root.id, SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 1234), ); - system.permissioner.init_permissions_for_user(root); + system + .permissioner + .init_permissions_for_user(root.id, permissions); system .create_stream(&session, Some(stream_id), stream_name) .await @@ -332,4 +389,22 @@ mod tests { assert_eq!(stream.stream_id, stream_id); assert_eq!(stream.name, stream_name); } + + #[derive(Debug, Default)] + struct TestState {} + + #[async_trait] + impl State for TestState { + async fn init(&self) -> Result, IggyError> { + Ok(Vec::new()) + } + + async fn load_entries(&self) -> Result, IggyError> { + Ok(Vec::new()) + } + + async fn apply(&self, _: u32, _: u32, _: &[u8], _: Option<&[u8]>) -> Result<(), IggyError> { + Ok(()) + } + } } diff --git a/server/src/streaming/systems/system.rs b/server/src/streaming/systems/system.rs index cdaeef846..fbafce079 100644 --- a/server/src/streaming/systems/system.rs +++ b/server/src/streaming/systems/system.rs @@ -10,7 +10,6 @@ use crate::streaming::streams::stream::Stream; use crate::streaming::users::permissioner::Permissioner; use iggy::error::IggyError; use iggy::utils::crypto::{Aes256GcmEncryptor, Encryptor}; -use sled::Db; use std::collections::HashMap; use std::path::Path; use std::sync::Arc; @@ -18,8 +17,15 @@ use tokio::fs::{create_dir, remove_dir_all}; use tokio::time::Instant; use tracing::{info, trace}; +use crate::compat; +use crate::state::file::FileState; +use crate::state::system::SystemState; +use crate::state::State; +use crate::streaming::users::user::User; +use crate::versioning::SemanticVersion; use iggy::locking::IggySharedMut; use iggy::locking::IggySharedMutFn; +use iggy::models::user_info::UserId; use keepcalm::{SharedMut, SharedReadLock, SharedWriteLock}; #[derive(Debug)] @@ -57,11 +63,12 @@ pub struct System { pub(crate) storage: Arc, pub(crate) streams: HashMap, pub(crate) streams_ids: HashMap, + pub(crate) users: HashMap, pub(crate) config: Arc, pub(crate) client_manager: IggySharedMut, pub(crate) encryptor: Option>, pub(crate) metrics: Metrics, - pub(crate) db: Option>, + pub(crate) state: Arc, pub personal_access_token: PersonalAccessTokenConfig, } @@ -70,29 +77,21 @@ pub struct System { const CACHE_OVER_EVICTION_FACTOR: u64 = 5; impl System { - pub fn new( - config: Arc, - db: Option>, - pat_config: PersonalAccessTokenConfig, - ) -> System { - let db = match db { - Some(db) => db, - None => { - let db = sled::open(config.get_database_path()); - if db.is_err() { - panic!("Cannot open database at: {}", config.get_database_path()); - } - Arc::new(db.unwrap()) - } - }; + pub fn new(config: Arc, pat_config: PersonalAccessTokenConfig) -> System { + let version = SemanticVersion::current().expect("Invalid version"); let persister: Arc = match config.partition.enforce_fsync { true => Arc::new(FileWithSyncPersister {}), false => Arc::new(FilePersister {}), }; + let state = Arc::new(FileState::new( + &config.get_state_log_path(), + &version, + persister.clone(), + )); Self::create( - config, - SystemStorage::new(db.clone(), persister), - Some(db), + config.clone(), + SystemStorage::new(config, persister), + state, pat_config, ) } @@ -100,7 +99,7 @@ impl System { pub fn create( config: Arc, storage: SystemStorage, - db: Option>, + state: Arc, pat_config: PersonalAccessTokenConfig, ) -> System { info!( @@ -121,18 +120,23 @@ impl System { client_manager: IggySharedMut::new(ClientManager::default()), permissioner: Permissioner::default(), metrics: Metrics::init(), - db, + users: HashMap::new(), + state, personal_access_token: pat_config, } } pub async fn init(&mut self) -> Result<(), IggyError> { let system_path = self.config.get_system_path(); - if !Path::new(&system_path).exists() && create_dir(&system_path).await.is_err() { return Err(IggyError::CannotCreateBaseDirectory(system_path)); } + let state_path = self.config.get_state_path(); + if !Path::new(&state_path).exists() && create_dir(&state_path).await.is_err() { + return Err(IggyError::CannotCreateStateDirectory(state_path)); + } + let streams_path = self.config.get_streams_path(); if !Path::new(&streams_path).exists() && create_dir(&streams_path).await.is_err() { return Err(IggyError::CannotCreateStreamsDirectory(streams_path)); @@ -151,10 +155,24 @@ impl System { "Initializing system, data will be stored at: {}", self.config.get_system_path() ); + + if self.config.database.is_some() { + compat::storage_conversion::init( + self.config.clone(), + self.state.clone(), + self.storage.clone(), + ) + .await?; + } + + let state_entries = self.state.init().await?; + let system_state = SystemState::init(state_entries).await?; let now = Instant::now(); self.load_version().await?; - self.load_users().await?; - self.load_streams().await?; + self.load_users(system_state.users.into_values().collect()) + .await?; + self.load_streams(system_state.streams.into_values().collect()) + .await?; info!("Initialized system in {} ms.", now.elapsed().as_millis()); Ok(()) } diff --git a/server/src/streaming/systems/topics.rs b/server/src/streaming/systems/topics.rs index 94f285394..5db5f2398 100644 --- a/server/src/streaming/systems/topics.rs +++ b/server/src/streaming/systems/topics.rs @@ -5,7 +5,8 @@ use iggy::compression::compression_algorithm::CompressionAlgorithm; use iggy::error::IggyError; use iggy::identifier::Identifier; use iggy::locking::IggySharedMutFn; -use iggy::utils::byte_size::IggyByteSize; +use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; impl System { pub fn find_topic( @@ -42,9 +43,9 @@ impl System { topic_id: Option, name: &str, partitions_count: u32, - message_expiry: Option, + message_expiry: IggyExpiry, compression_algorithm: CompressionAlgorithm, - max_topic_size: Option, + max_topic_size: MaxTopicSize, replication_factor: Option, ) -> Result<(), IggyError> { self.ensure_authenticated(session)?; @@ -78,9 +79,9 @@ impl System { stream_id: &Identifier, topic_id: &Identifier, name: &str, - message_expiry: Option, + message_expiry: IggyExpiry, compression_algorithm: CompressionAlgorithm, - max_topic_size: Option, + max_topic_size: MaxTopicSize, replication_factor: Option, ) -> Result<(), IggyError> { self.ensure_authenticated(session)?; diff --git a/server/src/streaming/systems/users.rs b/server/src/streaming/systems/users.rs index b8419d08a..36522aeb4 100644 --- a/server/src/streaming/systems/users.rs +++ b/server/src/streaming/systems/users.rs @@ -1,12 +1,17 @@ +use crate::state::system::UserState; +use crate::streaming::personal_access_tokens::personal_access_token::PersonalAccessToken; use crate::streaming::session::Session; use crate::streaming::systems::system::System; use crate::streaming::users::user::User; use crate::streaming::utils::crypto; +use iggy::bytes_serializable::BytesSerializable; +use iggy::command::CREATE_USER_CODE; use iggy::error::IggyError; use iggy::identifier::{IdKind, Identifier}; use iggy::locking::IggySharedMutFn; use iggy::models::permissions::Permissions; use iggy::models::user_status::UserStatus; +use iggy::users::create_user::CreateUser; use iggy::users::defaults::*; use iggy::utils::text; use std::env; @@ -17,21 +22,57 @@ static USER_ID: AtomicU32 = AtomicU32::new(1); const MAX_USERS: usize = u32::MAX as usize; impl System { - pub(crate) async fn load_users(&mut self) -> Result<(), IggyError> { + pub(crate) async fn load_users(&mut self, users: Vec) -> Result<(), IggyError> { info!("Loading users..."); - let mut users = self.storage.user.load_all().await?; if users.is_empty() { info!("No users found, creating the root user..."); let root = Self::create_root_user(); - self.storage.user.save(&root).await?; + let command = CreateUser { + username: root.username.clone(), + password: root.password.clone(), + status: root.status, + permissions: root.permissions.clone(), + }; + self.state + .apply(CREATE_USER_CODE, 0, &command.as_bytes(), None) + .await?; + + self.users.insert(root.id, root); info!("Created the root user."); - users = self.storage.user.load_all().await?; } - let users_count = users.len(); - let current_user_id = users.iter().map(|user| user.id).max().unwrap_or(1); + for user_state in users.into_iter() { + let mut user = User::with_password( + user_state.id, + &user_state.username, + user_state.password_hash, + user_state.status, + user_state.permissions, + ); + + user.personal_access_tokens = user_state + .personal_access_tokens + .into_values() + .map(|token| { + ( + token.token_hash.clone(), + PersonalAccessToken::raw( + user_state.id, + &token.name, + &token.token_hash, + token.expiry_at, + ), + ) + }) + .collect(); + self.users.insert(user_state.id, user); + } + + let users_count = self.users.len(); + let current_user_id = self.users.keys().max().unwrap_or(&1); USER_ID.store(current_user_id + 1, Ordering::SeqCst); - self.permissioner.init(users); + self.permissioner + .init(&self.users.values().collect::>()); self.metrics.increment_users(users_count as u32); info!("Initialized {} user(s).", users_count); Ok(()) @@ -70,13 +111,9 @@ impl System { User::root(&username, &password) } - pub async fn find_user( - &self, - session: &Session, - user_id: &Identifier, - ) -> Result { + pub fn find_user(&self, session: &Session, user_id: &Identifier) -> Result<&User, IggyError> { self.ensure_authenticated(session)?; - let user = self.get_user(user_id).await?; + let user = self.get_user(user_id)?; let session_user_id = session.get_user_id(); if user.id != session_user_id { self.permissioner.get_user(session_user_id)?; @@ -85,27 +122,44 @@ impl System { Ok(user) } - pub async fn get_user(&self, user_id: &Identifier) -> Result { - Ok(match user_id.kind { - IdKind::Numeric => { - self.storage - .user - .load_by_id(user_id.get_u32_value()?) - .await? + pub fn get_user(&self, user_id: &Identifier) -> Result<&User, IggyError> { + match user_id.kind { + IdKind::Numeric => self + .users + .get(&user_id.get_u32_value()?) + .ok_or(IggyError::ResourceNotFound(user_id.to_string())), + IdKind::String => { + let username = user_id.get_cow_str_value()?; + self.users + .iter() + .find(|(_, user)| user.username == username) + .map(|(_, user)| user) + .ok_or(IggyError::ResourceNotFound(user_id.to_string())) } + } + } + + pub fn get_user_mut(&mut self, user_id: &Identifier) -> Result<&mut User, IggyError> { + match user_id.kind { + IdKind::Numeric => self + .users + .get_mut(&user_id.get_u32_value()?) + .ok_or(IggyError::ResourceNotFound(user_id.to_string())), IdKind::String => { - self.storage - .user - .load_by_username(&user_id.get_cow_str_value()?) - .await? + let username = user_id.get_cow_str_value()?; + self.users + .iter_mut() + .find(|(_, user)| user.username == username) + .map(|(_, user)| user) + .ok_or(IggyError::ResourceNotFound(user_id.to_string())) } - }) + } } - pub async fn get_users(&self, session: &Session) -> Result, IggyError> { + pub async fn get_users(&self, session: &Session) -> Result, IggyError> { self.ensure_authenticated(session)?; self.permissioner.get_users(session.get_user_id())?; - self.storage.user.load_all().await + Ok(self.users.values().collect()) } pub async fn create_user( @@ -119,21 +173,22 @@ impl System { self.ensure_authenticated(session)?; self.permissioner.create_user(session.get_user_id())?; let username = text::to_lowercase_non_whitespace(username); - if self.storage.user.load_by_username(&username).await.is_ok() { + if self.users.iter().any(|(_, user)| user.username == username) { error!("User: {username} already exists."); return Err(IggyError::UserAlreadyExists); } - if self.storage.user.load_all().await?.len() > MAX_USERS { + if self.users.len() >= MAX_USERS { error!("Available users limit reached."); return Err(IggyError::UsersLimitReached); } let user_id = USER_ID.fetch_add(1, Ordering::SeqCst); info!("Creating user: {username} with ID: {user_id}..."); - let user = User::new(user_id, &username, password, status, permissions); - self.storage.user.save(&user).await?; - self.permissioner.init_permissions_for_user(user); + let user = User::new(user_id, &username, password, status, permissions.clone()); + self.permissioner + .init_permissions_for_user(user_id, permissions); + self.users.insert(user.id, user); info!("Created user: {username} with ID: {user_id}."); self.metrics.increment_users(1); Ok(()) @@ -145,41 +200,58 @@ impl System { user_id: &Identifier, ) -> Result { self.ensure_authenticated(session)?; - self.permissioner.delete_user(session.get_user_id())?; - let user = self.get_user(user_id).await?; - if user.is_root() { - error!("Cannot delete the root user."); - return Err(IggyError::CannotDeleteUser(user.id)); + let existing_user_id; + let existing_username; + { + self.permissioner.delete_user(session.get_user_id())?; + let user = self.get_user(user_id)?; + if user.is_root() { + error!("Cannot delete the root user."); + return Err(IggyError::CannotDeleteUser(user.id)); + } + + existing_user_id = user.id; + existing_username = user.username.clone(); } - info!("Deleting user: {} with ID: {user_id}...", user.username); - self.storage.user.delete(&user).await?; - self.permissioner.delete_permissions_for_user(user.id); + info!("Deleting user: {existing_username} with ID: {user_id}..."); + let user = self + .users + .remove(&existing_user_id) + .ok_or(IggyError::ResourceNotFound(user_id.to_string()))?; + self.permissioner + .delete_permissions_for_user(existing_user_id); let mut client_manager = self.client_manager.write().await; - client_manager.delete_clients_for_user(user.id).await?; - info!("Deleted user: {} with ID: {user_id}.", user.username); + client_manager + .delete_clients_for_user(existing_user_id) + .await?; + info!("Deleted user: {existing_username} with ID: {user_id}."); self.metrics.decrement_users(1); Ok(user) } pub async fn update_user( - &self, + &mut self, session: &Session, user_id: &Identifier, username: Option, status: Option, - ) -> Result { + ) -> Result<&User, IggyError> { self.ensure_authenticated(session)?; self.permissioner.update_user(session.get_user_id())?; - let mut user = self.get_user(user_id).await?; - if let Some(username) = username { + + if let Some(username) = username.clone() { let username = text::to_lowercase_non_whitespace(&username); - let existing_user = self.storage.user.load_by_username(&username).await; + let user = self.get_user(user_id)?; + let existing_user = self.get_user(&username.clone().try_into()?); if existing_user.is_ok() && existing_user.unwrap().id != user.id { error!("User: {username} already exists."); return Err(IggyError::UserAlreadyExists); } - self.storage.user.delete(&user).await?; + } + + let user = self.get_user_mut(user_id)?; + if let Some(username) = username { user.username = username; } @@ -187,8 +259,6 @@ impl System { user.status = status; } - info!("Updating user: {} with ID: {}...", user.username, user.id); - self.storage.user.save(&user).await?; info!("Updated user: {} with ID: {}.", user.username, user.id); Ok(user) } @@ -200,43 +270,50 @@ impl System { permissions: Option, ) -> Result<(), IggyError> { self.ensure_authenticated(session)?; - self.permissioner - .update_permissions(session.get_user_id())?; - let mut user = self.get_user(user_id).await?; - if user.is_root() { - error!("Cannot change the root user permissions."); - return Err(IggyError::CannotChangePermissions(user.id)); + + { + self.permissioner + .update_permissions(session.get_user_id())?; + let user = self.get_user(user_id)?; + if user.is_root() { + error!("Cannot change the root user permissions."); + return Err(IggyError::CannotChangePermissions(user.id)); + } + + self.permissioner + .update_permissions_for_user(user.id, permissions.clone()); + } + + { + let user = self.get_user_mut(user_id)?; + user.permissions = permissions; + info!( + "Updated permissions for user: {} with ID: {user_id}.", + user.username + ); } - user.permissions = permissions; - let username = user.username.clone(); - info!( - "Updating permissions for user: {} with ID: {user_id}...", - username - ); - self.storage.user.save(&user).await?; - self.permissioner.update_permissions_for_user(user); - info!( - "Updated permissions for user: {} with ID: {user_id}.", - username - ); Ok(()) } pub async fn change_password( - &self, + &mut self, session: &Session, user_id: &Identifier, current_password: &str, new_password: &str, ) -> Result<(), IggyError> { self.ensure_authenticated(session)?; - let mut user = self.get_user(user_id).await?; - let session_user_id = session.get_user_id(); - if user.id != session_user_id { - self.permissioner.change_password(session_user_id)?; + + { + let user = self.get_user(user_id)?; + let session_user_id = session.get_user_id(); + if user.id != session_user_id { + self.permissioner.change_password(session_user_id)?; + } } + let user = self.get_user_mut(user_id)?; if !crypto::verify_password(current_password, &user.password) { error!( "Invalid current password for user: {} with ID: {user_id}.", @@ -245,12 +322,7 @@ impl System { return Err(IggyError::InvalidCredentials); } - info!( - "Changing password for user: {} with ID: {user_id}...", - user.username - ); user.password = crypto::hash_password(new_password); - self.storage.user.save(&user).await?; info!( "Changed password for user: {} with ID: {user_id}.", user.username @@ -263,7 +335,7 @@ impl System { username: &str, password: &str, session: Option<&Session>, - ) -> Result { + ) -> Result<&User, IggyError> { self.login_user_with_credentials(username, Some(password), session) .await } @@ -273,8 +345,8 @@ impl System { username: &str, password: Option<&str>, session: Option<&Session>, - ) -> Result { - let user = match self.storage.user.load_by_username(username).await { + ) -> Result<&User, IggyError> { + let user = match self.get_user(&username.try_into()?) { Ok(user) => user, Err(_) => { error!("Cannot login user: {username} (not found)."); @@ -323,9 +395,7 @@ impl System { pub async fn logout_user(&self, session: &Session) -> Result<(), IggyError> { self.ensure_authenticated(session)?; - let user = self - .get_user(&Identifier::numeric(session.get_user_id())?) - .await?; + let user = self.get_user(&Identifier::numeric(session.get_user_id())?)?; info!( "Logging out user: {} with ID: {}...", user.username, user.id diff --git a/server/src/streaming/topics/consumer_groups.rs b/server/src/streaming/topics/consumer_groups.rs index 126eea587..ebc278073 100644 --- a/server/src/streaming/topics/consumer_groups.rs +++ b/server/src/streaming/topics/consumer_groups.rs @@ -2,6 +2,7 @@ use crate::streaming::topics::consumer_group::ConsumerGroup; use crate::streaming::topics::topic::Topic; use iggy::error::IggyError; use iggy::identifier::{IdKind, Identifier}; +use iggy::locking::IggySharedMutFn; use iggy::utils::text; use std::sync::atomic::Ordering; use tokio::sync::RwLock; @@ -105,11 +106,6 @@ impl Topic { self.consumer_groups.insert(id, RwLock::new(consumer_group)); self.consumer_groups_ids.insert(name, id); let consumer_group = self.get_consumer_group_by_id(id)?; - let consumer_group_guard = consumer_group.read().await; - self.storage - .topic - .save_consumer_group(self, &consumer_group_guard) - .await?; info!( "Created consumer group with ID: {} for topic with ID: {} and stream with ID: {}.", id, self.topic_id, self.stream_id @@ -137,17 +133,22 @@ impl Topic { let consumer_group = consumer_group.read().await; let group_id = consumer_group.group_id; self.consumer_groups_ids.remove(&consumer_group.name); - self.storage - .topic - .delete_consumer_group(self, &consumer_group) - .await?; - let current_group_id = self.current_consumer_group_id.load(Ordering::SeqCst); if current_group_id > group_id { self.current_consumer_group_id .store(group_id, Ordering::SeqCst); } + for (_, partition) in self.partitions.iter() { + let partition = partition.read().await; + if let Some((_, offset)) = partition.consumer_group_offsets.remove(&group_id) { + self.storage + .partition + .delete_consumer_offset(&offset.path) + .await?; + } + } + info!( "Deleted consumer group with ID: {} from topic with ID: {} and stream with ID: {}.", id, self.topic_id, self.stream_id @@ -194,6 +195,8 @@ mod tests { use crate::configs::system::SystemConfig; use crate::streaming::storage::tests::get_test_system_storage; use iggy::compression::compression_algorithm::CompressionAlgorithm; + use iggy::utils::expiry::IggyExpiry; + use iggy::utils::topic_size::MaxTopicSize; use std::sync::atomic::{AtomicU32, AtomicU64}; use std::sync::Arc; @@ -363,9 +366,9 @@ mod tests { size_of_parent_stream, messages_count_of_parent_stream, segments_count_of_parent_stream, - None, + IggyExpiry::NeverExpire, compression_algorithm, - None, + MaxTopicSize::ServerDefault, 1, ) .unwrap() diff --git a/server/src/streaming/topics/messages.rs b/server/src/streaming/topics/messages.rs index a2862dfac..736faad2b 100644 --- a/server/src/streaming/topics/messages.rs +++ b/server/src/streaming/topics/messages.rs @@ -10,10 +10,12 @@ use iggy::locking::IggySharedMutFn; use iggy::messages::poll_messages::{PollingKind, PollingStrategy}; use iggy::messages::send_messages::{Message, Partitioning, PartitioningKind}; use iggy::models::messages::PolledMessages; +use iggy::utils::expiry::IggyExpiry; +use iggy::utils::timestamp::IggyTimestamp; use std::collections::HashMap; use std::sync::atomic::Ordering; use std::sync::Arc; -use tracing::{info, trace, warn}; +use tracing::{debug, info, trace, warn}; impl Topic { pub fn get_messages_count(&self) -> u64 { @@ -45,7 +47,11 @@ impl Topic { let value = strategy.value; let messages = match strategy.kind { PollingKind::Offset => partition.get_messages_by_offset(value, count).await, - PollingKind::Timestamp => partition.get_messages_by_timestamp(value, count).await, + PollingKind::Timestamp => { + partition + .get_messages_by_timestamp(value.into(), count) + .await + } PollingKind::First => partition.get_first_messages(count).await, PollingKind::Last => partition.get_last_messages(count).await, PollingKind::Next => partition.get_next_messages(consumer, count).await, @@ -186,6 +192,14 @@ impl Topic { .get_newest_messages_by_size(size_to_fetch_from_disk) .await?; + if messages.is_empty() { + debug!( + "No messages found on disk for partition ID: {}, topic ID: {}, stream ID: {}, offset: 0 to {}", + partition.partition_id, partition.topic_id, partition.stream_id, end_offset + ); + continue; + } + let sum: u64 = messages.iter().map(|m| m.get_size_bytes() as u64).sum(); if !Self::cache_integrity_check(&messages) { warn!( @@ -230,21 +244,18 @@ impl Topic { pub async fn get_expired_segments_start_offsets_per_partition( &self, - now: u64, + now: IggyTimestamp, ) -> HashMap> { let mut expired_segments = HashMap::new(); - if self.message_expiry.is_none() { - return expired_segments; - } - - for (_, partition) in self.partitions.iter() { - let partition = partition.read().await; - let segments = partition.get_expired_segments_start_offsets(now).await; - if !segments.is_empty() { - expired_segments.insert(partition.partition_id, segments); + if let IggyExpiry::ExpireDuration(_) = self.message_expiry { + for (_, partition) in self.partitions.iter() { + let partition = partition.read().await; + let segments = partition.get_expired_segments_start_offsets(now).await; + if !segments.is_empty() { + expired_segments.insert(partition.partition_id, segments); + } } } - expired_segments } } @@ -256,6 +267,7 @@ mod tests { use crate::streaming::storage::tests::get_test_system_storage; use bytes::Bytes; use iggy::compression::compression_algorithm::CompressionAlgorithm; + use iggy::utils::topic_size::MaxTopicSize; use std::sync::atomic::AtomicU32; use std::sync::atomic::AtomicU64; use std::sync::Arc; @@ -378,9 +390,9 @@ mod tests { size_of_parent_stream, messages_count_of_parent_stream, segments_count_of_parent_stream, - None, + IggyExpiry::NeverExpire, compression_algorithm, - None, + MaxTopicSize::ServerDefault, 1, ) .unwrap() diff --git a/server/src/streaming/topics/partitions.rs b/server/src/streaming/topics/partitions.rs index c2336675b..26a1831df 100644 --- a/server/src/streaming/topics/partitions.rs +++ b/server/src/streaming/topics/partitions.rs @@ -3,6 +3,7 @@ use crate::streaming::topics::topic::Topic; use iggy::error::IggyError; use iggy::locking::IggySharedMut; use iggy::locking::IggySharedMutFn; +use iggy::utils::timestamp::IggyTimestamp; const MAX_PARTITIONS_COUNT: u32 = 100_000; @@ -40,6 +41,7 @@ impl Topic { self.size_of_parent_stream.clone(), self.size_bytes.clone(), self.segments_count_of_parent_stream.clone(), + IggyTimestamp::now(), ); self.partitions .insert(partition_id, IggySharedMut::new(partition)); diff --git a/server/src/streaming/topics/persistence.rs b/server/src/streaming/topics/persistence.rs index 6738c01c1..ecb63d7fa 100644 --- a/server/src/streaming/topics/persistence.rs +++ b/server/src/streaming/topics/persistence.rs @@ -1,27 +1,12 @@ -use crate::streaming::topics::consumer_group::ConsumerGroup; +use crate::state::system::TopicState; use crate::streaming::topics::topic::Topic; use iggy::error::IggyError; use iggy::locking::IggySharedMutFn; -use tokio::sync::RwLock; impl Topic { - pub async fn load(&mut self) -> Result<(), IggyError> { + pub async fn load(&mut self, state: TopicState) -> Result<(), IggyError> { let storage = self.storage.clone(); - storage.topic.load(self).await?; - let consumer_groups = storage.topic.load_consumer_groups(self).await?; - for consumer_group in consumer_groups { - self.consumer_groups_ids - .insert(consumer_group.name.clone(), consumer_group.group_id); - self.consumer_groups.insert( - consumer_group.group_id, - RwLock::new(ConsumerGroup::new( - self.topic_id, - consumer_group.group_id, - &consumer_group.name, - self.get_partitions_count(), - )), - ); - } + storage.topic.load(self, state).await?; Ok(()) } diff --git a/server/src/streaming/topics/storage.rs b/server/src/streaming/topics/storage.rs index 12d7be083..466eb2148 100644 --- a/server/src/streaming/topics/storage.rs +++ b/server/src/streaming/topics/storage.rs @@ -1,34 +1,25 @@ +use crate::state::system::TopicState; use crate::streaming::partitions::partition::Partition; -use crate::streaming::storage::{Storage, TopicStorage}; +use crate::streaming::storage::TopicStorage; use crate::streaming::topics::consumer_group::ConsumerGroup; use crate::streaming::topics::topic::Topic; use anyhow::Context; use async_trait::async_trait; use futures::future::join_all; -use iggy::compression::compression_algorithm::CompressionAlgorithm; use iggy::error::IggyError; use iggy::locking::IggySharedMut; use iggy::locking::IggySharedMutFn; -use iggy::utils::byte_size::IggyByteSize; use serde::{Deserialize, Serialize}; -use sled::Db; +use std::collections::HashSet; use std::path::Path; use std::sync::Arc; use tokio::fs; use tokio::fs::create_dir; -use tokio::sync::Mutex; -use tracing::{error, info}; +use tokio::sync::{Mutex, RwLock}; +use tracing::{error, info, warn}; #[derive(Debug)] -pub struct FileTopicStorage { - db: Arc, -} - -impl FileTopicStorage { - pub fn new(db: Arc) -> Self { - Self { db } - } -} +pub struct FileTopicStorage; unsafe impl Send for FileTopicStorage {} unsafe impl Sync for FileTopicStorage {} @@ -41,152 +32,33 @@ struct ConsumerGroupData { #[async_trait] impl TopicStorage for FileTopicStorage { - async fn save_consumer_group( - &self, - topic: &Topic, - consumer_group: &ConsumerGroup, - ) -> Result<(), IggyError> { - let key = get_consumer_group_key(topic.stream_id, topic.topic_id, consumer_group.group_id); - match rmp_serde::to_vec(&ConsumerGroupData { - id: consumer_group.group_id, - name: consumer_group.name.clone(), - }) - .with_context(|| format!("Failed to serialize consumer group with key: {}", key)) - { - Ok(data) => { - if let Err(err) = self - .db - .insert(&key, data) - .with_context(|| format!("Failed to insert consumer group with key: {}", key)) - { - return Err(IggyError::CannotSaveResource(err)); - } - } - Err(err) => { - return Err(IggyError::CannotSerializeResource(err)); - } + async fn load(&self, topic: &mut Topic, mut state: TopicState) -> Result<(), IggyError> { + info!("Loading topic {} from disk...", topic); + if !Path::new(&topic.path).exists() { + return Err(IggyError::TopicIdNotFound(topic.topic_id, topic.stream_id)); } - Ok(()) - } - - async fn load_consumer_groups(&self, topic: &Topic) -> Result, IggyError> { - info!("Loading consumer groups for topic {} from disk...", topic); + topic.created_at = state.created_at; + topic.message_expiry = state.message_expiry; + topic.compression_algorithm = state.compression_algorithm; + topic.max_topic_size = state.max_topic_size; + topic.replication_factor = state.replication_factor.unwrap_or(1); - let key_prefix = get_consumer_groups_key_prefix(topic.stream_id, topic.topic_id); - let mut consumer_groups = Vec::new(); - for data in self.db.scan_prefix(format!("{}:", key_prefix)) { - let consumer_group = match data.with_context(|| { - format!( - "Failed to load consumer group when searching for key: {}", - key_prefix - ) - }) { - Ok((_, value)) => match rmp_serde::from_slice::(&value) - .with_context(|| { - format!( - "Failed to deserialize consumer group with key: {}", - key_prefix - ) - }) { - Ok(user) => user, - Err(err) => { - return Err(IggyError::CannotDeserializeResource(err)); - } - }, - Err(err) => { - return Err(IggyError::CannotLoadResource(err)); - } - }; + for consumer_group in state.consumer_groups.into_values() { let consumer_group = ConsumerGroup::new( topic.topic_id, consumer_group.id, &consumer_group.name, topic.get_partitions_count(), ); - consumer_groups.push(consumer_group); - } - - Ok(consumer_groups) - } - - async fn delete_consumer_group( - &self, - topic: &Topic, - consumer_group: &ConsumerGroup, - ) -> Result<(), IggyError> { - let key = get_consumer_group_key(topic.stream_id, topic.topic_id, consumer_group.group_id); - match self - .db - .remove(&key) - .with_context(|| format!("Failed to delete consumer group with key: {}", key)) - { - Ok(_) => { - info!( - "Consumer group with ID: {} for topic with ID: {} and stream with ID: {} was deleted.", - consumer_group.group_id, topic.topic_id, topic.stream_id - ); - Ok(()) - } - Err(err) => { - return Err(IggyError::CannotDeleteResource(err)); - } - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -struct TopicData { - name: String, - created_at: u64, - message_expiry: Option, - compression_algorithm: CompressionAlgorithm, - max_topic_size: Option, - replication_factor: u8, -} - -#[async_trait] -impl Storage for FileTopicStorage { - async fn load(&self, topic: &mut Topic) -> Result<(), IggyError> { - info!("Loading topic {} from disk...", topic); - if !Path::new(&topic.path).exists() { - return Err(IggyError::TopicIdNotFound(topic.topic_id, topic.stream_id)); + topic + .consumer_groups + .insert(consumer_group.group_id, RwLock::new(consumer_group)); } - let key = get_topic_key(topic.stream_id, topic.topic_id); - let topic_data = match self - .db - .get(&key) - .with_context(|| format!("Failed to load topic with key: {}", key)) - { - Ok(data) => { - if let Some(topic_data) = data { - let topic_data = rmp_serde::from_slice::(&topic_data) - .with_context(|| format!("Failed to deserialize topic with key: {}", key)); - if let Err(err) = topic_data { - return Err(IggyError::CannotDeserializeResource(err)); - } else { - topic_data.unwrap() - } - } else { - return Err(IggyError::ResourceNotFound(key)); - } - } - Err(err) => { - return Err(IggyError::CannotLoadResource(err)); - } - }; - - topic.name = topic_data.name; - topic.created_at = topic_data.created_at; - topic.message_expiry = topic_data.message_expiry; - topic.compression_algorithm = topic_data.compression_algorithm; - topic.max_topic_size = topic_data.max_topic_size; - topic.replication_factor = topic_data.replication_factor; - let dir_entries = fs::read_dir(&topic.partitions_path).await .with_context(|| format!("Failed to read partition with ID: {} for stream with ID: {} for topic with ID: {} and path: {}", - topic.topic_id, topic.stream_id, topic.topic_id, &topic.partitions_path)); + topic.topic_id, topic.stream_id, topic.topic_id, &topic.partitions_path)); if let Err(err) = dir_entries { return Err(IggyError::CannotReadPartitions(err)); } @@ -207,6 +79,20 @@ impl Storage for FileTopicStorage { } let partition_id = partition_id.unwrap(); + let partition_state = state.partitions.get(&partition_id); + if partition_state.is_none() { + let stream_id = topic.stream_id; + let topic_id = topic.topic_id; + error!("Partition with ID: '{partition_id}' for stream with ID: '{stream_id}' and topic with ID: '{topic_id}' was not found in state, but exists on disk and will be removed."); + if let Err(error) = fs::remove_dir_all(&dir_entry.path()).await { + error!("Cannot remove partition directory: {error}"); + } else { + warn!("Partition with ID: '{partition_id}' for stream with ID: '{stream_id}' and topic with ID: '{topic_id}' was removed."); + } + continue; + } + + let partition_state = partition_state.unwrap(); let partition = Partition::create( topic.stream_id, topic.topic_id, @@ -220,18 +106,45 @@ impl Storage for FileTopicStorage { topic.size_of_parent_stream.clone(), topic.size_bytes.clone(), topic.segments_count_of_parent_stream.clone(), + partition_state.created_at, ); unloaded_partitions.push(partition); } + let state_partition_ids = state.partitions.keys().copied().collect::>(); + let unloaded_partition_ids = unloaded_partitions + .iter() + .map(|partition| partition.partition_id) + .collect::>(); + let missing_ids = state_partition_ids + .difference(&unloaded_partition_ids) + .copied() + .collect::>(); + if missing_ids.is_empty() { + info!( + "All partitions for topic with ID: '{}' for stream with ID: '{}' found on disk were found in state.", + topic.topic_id, topic.stream_id + ); + } else { + error!( + "Partitions with IDs: '{missing_ids:?}' for topic with ID: '{topic_id}' for stream with ID: '{stream_id}' were not found on disk.", + topic_id = topic.topic_id, stream_id = topic.stream_id + ); + return Err(IggyError::MissingPartitions( + topic.topic_id, + topic.stream_id, + )); + } + let stream_id = topic.stream_id; let topic_id = topic.topic_id; let loaded_partitions = Arc::new(Mutex::new(Vec::new())); let mut load_partitions = Vec::new(); for mut partition in unloaded_partitions { let loaded_partitions = loaded_partitions.clone(); + let partition_state = state.partitions.remove(&partition.partition_id).unwrap(); let load_partition = tokio::spawn(async move { - match partition.load().await { + match partition.load(partition_state).await { Ok(_) => { loaded_partitions.lock().await.push(partition); } @@ -252,7 +165,6 @@ impl Storage for FileTopicStorage { .insert(partition.partition_id, IggySharedMut::new(partition)); } - self.load_consumer_groups(topic).await?; topic.load_messages_from_disk_to_cache().await?; info!("Loaded topic {topic}"); @@ -278,31 +190,6 @@ impl Storage for FileTopicStorage { )); } - let key = get_topic_key(topic.stream_id, topic.topic_id); - match rmp_serde::to_vec(&TopicData { - name: topic.name.clone(), - created_at: topic.created_at, - message_expiry: topic.message_expiry, - compression_algorithm: topic.compression_algorithm, - max_topic_size: topic.max_topic_size, - replication_factor: topic.replication_factor, - }) - .with_context(|| format!("Failed to serialize topic with key: {key}")) - { - Ok(data) => { - if let Err(err) = self - .db - .insert(&key, data) - .with_context(|| format!("Failed to insert topic with key: {key}")) - { - return Err(IggyError::CannotSaveResource(err)); - } - } - Err(err) => { - return Err(IggyError::CannotSerializeResource(err)); - } - } - info!( "Saving {} partition(s) for topic {topic}...", topic.partitions.len() @@ -319,18 +206,6 @@ impl Storage for FileTopicStorage { async fn delete(&self, topic: &Topic) -> Result<(), IggyError> { info!("Deleting topic {topic}..."); - let key = get_topic_key(topic.stream_id, topic.topic_id); - if let Err(err) = self - .db - .remove(&key) - .with_context(|| format!("Failed to delete topic with key: {key}")) - { - return Err(IggyError::CannotDeleteResource(err)); - } - for consumer_group in topic.consumer_groups.values() { - let consumer_group = consumer_group.read().await; - self.delete_consumer_group(topic, &consumer_group).await?; - } if fs::remove_dir_all(&topic.path).await.is_err() { return Err(IggyError::CannotDeleteTopicDirectory( topic.topic_id, @@ -347,18 +222,3 @@ impl Storage for FileTopicStorage { Ok(()) } } - -fn get_topic_key(stream_id: u32, topic_id: u32) -> String { - format!("streams:{}:topics:{}", stream_id, topic_id) -} - -fn get_consumer_group_key(stream_id: u32, topic_id: u32, group_id: u32) -> String { - format!( - "{}:{group_id}", - get_consumer_groups_key_prefix(stream_id, topic_id) - ) -} - -fn get_consumer_groups_key_prefix(stream_id: u32, topic_id: u32) -> String { - format!("streams:{stream_id}:topics:{topic_id}:consumer_groups") -} diff --git a/server/src/streaming/topics/topic.rs b/server/src/streaming/topics/topic.rs index 4e42c51ff..3d17e2eec 100644 --- a/server/src/streaming/topics/topic.rs +++ b/server/src/streaming/topics/topic.rs @@ -7,7 +7,9 @@ use iggy::compression::compression_algorithm::CompressionAlgorithm; use iggy::error::IggyError; use iggy::locking::IggySharedMut; use iggy::utils::byte_size::IggyByteSize; +use iggy::utils::expiry::IggyExpiry; use iggy::utils::timestamp::IggyTimestamp; +use iggy::utils::topic_size::MaxTopicSize; use std::collections::HashMap; use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; use std::sync::Arc; @@ -32,17 +34,19 @@ pub struct Topic { pub(crate) consumer_groups_ids: HashMap, pub(crate) current_consumer_group_id: AtomicU32, pub(crate) current_partition_id: AtomicU32, - pub message_expiry: Option, + pub message_expiry: IggyExpiry, pub compression_algorithm: CompressionAlgorithm, - pub max_topic_size: Option, + pub max_topic_size: MaxTopicSize, pub replication_factor: u8, - pub created_at: u64, + pub created_at: IggyTimestamp, } impl Topic { + #[allow(clippy::too_many_arguments)] pub fn empty( stream_id: u32, topic_id: u32, + name: &str, size_of_parent_stream: Arc, messages_count_of_parent_stream: Arc, segments_count_of_parent_stream: Arc, @@ -52,16 +56,16 @@ impl Topic { Topic::create( stream_id, topic_id, - "", + name, 0, config, storage, size_of_parent_stream, messages_count_of_parent_stream, segments_count_of_parent_stream, - None, + IggyExpiry::NeverExpire, Default::default(), - None, + MaxTopicSize::ServerDefault, 1, ) .unwrap() @@ -78,9 +82,9 @@ impl Topic { size_of_parent_stream: Arc, messages_count_of_parent_stream: Arc, segments_count_of_parent_stream: Arc, - message_expiry: Option, + message_expiry: IggyExpiry, compression_algorithm: CompressionAlgorithm, - max_topic_size: Option, + max_topic_size: MaxTopicSize, replication_factor: u8, ) -> Result { let path = config.get_topic_path(stream_id, topic_id); @@ -102,21 +106,21 @@ impl Topic { consumer_groups_ids: HashMap::new(), current_consumer_group_id: AtomicU32::new(1), current_partition_id: AtomicU32::new(1), - message_expiry: match message_expiry { - Some(expiry) => match expiry { - 0 => None, - _ => Some(expiry), - }, - None => match config.retention_policy.message_expiry.as_secs() { - 0 => None, - expiry => Some(expiry), - }, + message_expiry: match config.retention_policy.message_expiry { + IggyExpiry::NeverExpire => message_expiry, + value => value, }, compression_algorithm, - max_topic_size, + max_topic_size: match max_topic_size { + MaxTopicSize::ServerDefault => match config.retention_policy.max_topic_size { + MaxTopicSize::ServerDefault => MaxTopicSize::get_server_default(), + value => value, + }, + value => value, + }, replication_factor, config, - created_at: IggyTimestamp::now().to_micros(), + created_at: IggyTimestamp::now(), }; topic.add_partitions(partitions_count)?; @@ -145,17 +149,13 @@ impl Topic { impl fmt::Display for Topic { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result { - let max_topic_size = match self.max_topic_size { - Some(size) => size.as_human_string_with_zero_as_unlimited(), - None => "unlimited".to_owned(), - }; write!(f, "ID: {}, ", self.topic_id)?; write!(f, "stream ID: {}, ", self.stream_id)?; write!(f, "name: {}, ", self.name)?; write!(f, "path: {}, ", self.path)?; - write!(f, "partitions count: {:?}, ", self.partitions.len())?; - write!(f, "message expiry (s): {:?}, ", self.message_expiry)?; - write!(f, "max topic size (B): {:?}, ", max_topic_size)?; + write!(f, "partitions count: {}, ", self.partitions.len())?; + write!(f, "message expiry: {}, ", self.message_expiry)?; + write!(f, "max topic size: {}, ", self.max_topic_size)?; write!(f, "replication factor: {}, ", self.replication_factor) } } @@ -175,9 +175,9 @@ mod tests { let topic_id = 2; let name = "test"; let partitions_count = 3; - let message_expiry = 10; + let message_expiry = IggyExpiry::NeverExpire; let compression_algorithm = CompressionAlgorithm::None; - let max_topic_size = IggyByteSize::from_str("2 GB").unwrap(); + let max_topic_size = MaxTopicSize::Custom(IggyByteSize::from_str("2 GB").unwrap()); let replication_factor = 1; let config = Arc::new(SystemConfig::default()); let path = config.get_topic_path(stream_id, topic_id); @@ -195,9 +195,9 @@ mod tests { messages_count_of_parent_stream, size_of_parent_stream, segments_count_of_parent_stream, - Some(message_expiry), + message_expiry, compression_algorithm, - Some(max_topic_size), + max_topic_size, replication_factor, ) .unwrap(); @@ -207,7 +207,7 @@ mod tests { assert_eq!(topic.path, path); assert_eq!(topic.name, name); assert_eq!(topic.partitions.len(), partitions_count as usize); - assert_eq!(topic.message_expiry, Some(message_expiry)); + assert_eq!(topic.message_expiry, message_expiry); for (id, partition) in topic.partitions { let partition = partition.read().await; diff --git a/server/src/streaming/users/mod.rs b/server/src/streaming/users/mod.rs index bdf85c0e1..87b87323e 100644 --- a/server/src/streaming/users/mod.rs +++ b/server/src/streaming/users/mod.rs @@ -1,4 +1,3 @@ pub mod permissioner; pub mod permissioner_rules; -pub mod storage; pub mod user; diff --git a/server/src/streaming/users/permissioner.rs b/server/src/streaming/users/permissioner.rs index f6a0c8326..babfa801d 100644 --- a/server/src/streaming/users/permissioner.rs +++ b/server/src/streaming/users/permissioner.rs @@ -1,5 +1,5 @@ use crate::streaming::users::user::User; -use iggy::models::permissions::{GlobalPermissions, StreamPermissions}; +use iggy::models::permissions::{GlobalPermissions, Permissions, StreamPermissions}; use iggy::models::user_info::UserId; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; @@ -15,29 +15,29 @@ pub struct Permissioner { } impl Permissioner { - pub fn init(&mut self, users: Vec) { + pub fn init(&mut self, users: &[&User]) { for user in users { - self.init_permissions_for_user(user); + self.init_permissions_for_user(user.id, user.permissions.clone()); } } - pub fn init_permissions_for_user(&mut self, user: User) { - if user.permissions.is_none() { + pub fn init_permissions_for_user(&mut self, user_id: UserId, permissions: Option) { + if permissions.is_none() { return; } - let permissions = user.permissions.unwrap(); + let permissions = permissions.unwrap(); if permissions.global.poll_messages { self.users_that_can_poll_messages_from_all_streams - .insert(user.id); + .insert(user_id); } if permissions.global.send_messages { self.users_that_can_send_messages_to_all_streams - .insert(user.id); + .insert(user_id); } - self.users_permissions.insert(user.id, permissions.global); + self.users_permissions.insert(user_id, permissions.global); if permissions.streams.is_none() { return; } @@ -46,22 +46,26 @@ impl Permissioner { for (stream_id, stream) in streams { if stream.poll_messages { self.users_that_can_poll_messages_from_specific_streams - .insert((user.id, stream_id)); + .insert((user_id, stream_id)); } if stream.send_messages { self.users_that_can_send_messages_to_specific_streams - .insert((user.id, stream_id)); + .insert((user_id, stream_id)); } self.users_streams_permissions - .insert((user.id, stream_id), stream); + .insert((user_id, stream_id), stream); } } - pub fn update_permissions_for_user(&mut self, user: User) { - self.delete_permissions_for_user(user.id); - self.init_permissions_for_user(user); + pub fn update_permissions_for_user( + &mut self, + user_id: UserId, + permissions: Option, + ) { + self.delete_permissions_for_user(user_id); + self.init_permissions_for_user(user_id, permissions); } pub fn delete_permissions_for_user(&mut self, user_id: UserId) { diff --git a/server/src/streaming/users/storage.rs b/server/src/streaming/users/storage.rs deleted file mode 100644 index fad24c91f..000000000 --- a/server/src/streaming/users/storage.rs +++ /dev/null @@ -1,193 +0,0 @@ -use crate::streaming::storage::{Storage, UserStorage}; -use crate::streaming::users::user::User; -use anyhow::Context; -use async_trait::async_trait; -use iggy::error::IggyError; -use iggy::models::user_info::UserId; -use sled::Db; -use std::sync::Arc; -use tracing::info; - -const KEY_PREFIX: &str = "users"; - -#[derive(Debug)] -pub struct FileUserStorage { - db: Arc, -} - -impl FileUserStorage { - pub fn new(db: Arc) -> Self { - Self { db } - } -} - -unsafe impl Send for FileUserStorage {} -unsafe impl Sync for FileUserStorage {} - -#[async_trait] -impl UserStorage for FileUserStorage { - async fn load_by_id(&self, id: UserId) -> Result { - let mut user = User::empty(id); - self.load(&mut user).await?; - Ok(user) - } - - async fn load_by_username(&self, username: &str) -> Result { - let user_id_key = get_id_key(username); - let user_id = self.db.get(&user_id_key).with_context(|| { - format!( - "Failed to load user with key: {}, username: {}", - user_id_key, username - ) - }); - match user_id { - Ok(user_id) => { - if let Some(user_id) = user_id { - let user_id = u32::from_le_bytes(user_id.as_ref().try_into()?); - let mut user = User::empty(user_id); - self.load(&mut user).await?; - Ok(user) - } else { - Err(IggyError::ResourceNotFound(user_id_key)) - } - } - Err(err) => Err(IggyError::CannotLoadResource(err)), - } - } - - async fn load_all(&self) -> Result, IggyError> { - let mut users = Vec::new(); - for data in self.db.scan_prefix(format!("{}:", KEY_PREFIX)) { - let user = match data.with_context(|| { - format!( - "Failed to load user, when searching for key: {}", - KEY_PREFIX - ) - }) { - Ok((_, value)) => match rmp_serde::from_slice::(&value).with_context(|| { - format!( - "Failed to deserialize user, when searching for key: {}", - KEY_PREFIX - ) - }) { - Ok(user) => user, - Err(err) => { - return Err(IggyError::CannotDeserializeResource(err)); - } - }, - Err(err) => { - return Err(IggyError::CannotLoadResource(err)); - } - }; - users.push(user); - } - - Ok(users) - } -} - -#[async_trait] -impl Storage for FileUserStorage { - async fn load(&self, user: &mut User) -> Result<(), IggyError> { - let key = get_key(user.id); - let user_data = match self.db.get(&key).with_context(|| { - format!( - "Failed to load user with key: {}, username: {}", - key, user.username - ) - }) { - Ok(data) => { - if let Some(user_data) = data { - user_data - } else { - return Err(IggyError::ResourceNotFound(key)); - } - } - Err(err) => { - return Err(IggyError::CannotLoadResource(err)); - } - }; - - let user_data = rmp_serde::from_slice::(&user_data) - .with_context(|| format!("Failed to deserialize user with key: {}", key)); - match user_data { - Ok(user_data) => { - user.status = user_data.status; - user.username = user_data.username; - user.password = user_data.password; - user.created_at = user_data.created_at; - user.permissions = user_data.permissions; - Ok(()) - } - Err(err) => { - return Err(IggyError::CannotDeserializeResource(err)); - } - } - } - - async fn save(&self, user: &User) -> Result<(), IggyError> { - let key = get_key(user.id); - match rmp_serde::to_vec(&user) - .with_context(|| format!("Failed to serialize user with key: {}", key)) - { - Ok(data) => { - if let Err(err) = self - .db - .insert(&key, data) - .with_context(|| format!("Failed to insert user with key: {}", key)) - { - return Err(IggyError::CannotSaveResource(err)); - } - if let Err(err) = self - .db - .insert(get_id_key(&user.username), &user.id.to_le_bytes()) - .with_context(|| { - format!( - "Failed to insert user with ID: {} key: {}", - &user.id, - get_id_key(&user.username) - ) - }) - { - return Err(IggyError::CannotSaveResource(err)); - } - } - Err(err) => { - return Err(IggyError::CannotSerializeResource(err)); - } - } - - info!("Saved user with ID: {}.", user.id); - Ok(()) - } - - async fn delete(&self, user: &User) -> Result<(), IggyError> { - info!("Deleting user with ID: {}...", user.id); - let key = get_key(user.id); - if let Err(err) = self - .db - .remove(&key) - .with_context(|| format!("Failed to delete user with ID: {}, key: {}", user.id, key)) - { - return Err(IggyError::CannotDeleteResource(err)); - } else { - let key = get_id_key(&user.username); - if let Err(err) = self.db.remove(&key).with_context(|| { - format!("Failed to delete user with ID: {}, key : {}", user.id, key) - }) { - return Err(IggyError::CannotDeleteResource(err)); - } else { - info!("Deleted user with ID: {}.", user.id); - Ok(()) - } - } - } -} - -fn get_key(user_id: UserId) -> String { - format!("{}:{}", KEY_PREFIX, user_id) -} - -fn get_id_key(username: &str) -> String { - format!("{}_id:{}", KEY_PREFIX, username) -} diff --git a/server/src/streaming/users/user.rs b/server/src/streaming/users/user.rs index 41bfdabd6..7e89887cd 100644 --- a/server/src/streaming/users/user.rs +++ b/server/src/streaming/users/user.rs @@ -1,18 +1,20 @@ +use crate::streaming::personal_access_tokens::personal_access_token::PersonalAccessToken; use crate::streaming::utils::crypto; use iggy::models::user_status::UserStatus; use iggy::models::{permissions::Permissions, user_info::UserId}; use iggy::users::defaults::*; use iggy::utils::timestamp::IggyTimestamp; -use serde::{Deserialize, Serialize}; +use std::collections::HashMap; -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug)] pub struct User { pub id: UserId, pub status: UserStatus, pub username: String, pub password: String, - pub created_at: u64, + pub created_at: IggyTimestamp, pub permissions: Option, + pub personal_access_tokens: HashMap, } impl Default for User { @@ -22,8 +24,9 @@ impl Default for User { status: UserStatus::Active, username: "user".to_string(), password: "secret".to_string(), - created_at: IggyTimestamp::now().to_micros(), + created_at: IggyTimestamp::now(), permissions: None, + personal_access_tokens: HashMap::new(), } } } @@ -42,14 +45,31 @@ impl User { password: &str, status: UserStatus, permissions: Option, + ) -> Self { + Self::with_password( + id, + username, + crypto::hash_password(password), + status, + permissions, + ) + } + + pub fn with_password( + id: u32, + username: &str, + password: String, + status: UserStatus, + permissions: Option, ) -> Self { Self { id, - username: username.to_string(), - password: crypto::hash_password(password), - created_at: IggyTimestamp::now().to_micros(), + username: username.into(), + password, + created_at: IggyTimestamp::now(), status, permissions, + personal_access_tokens: HashMap::new(), } } @@ -87,7 +107,7 @@ mod tests { &user.password )); assert_eq!(user.status, UserStatus::Active); - assert!(user.created_at > 0); + assert!(user.created_at.to_micros() > 0); } #[test] diff --git a/server/src/versioning.rs b/server/src/versioning.rs new file mode 100644 index 000000000..edb07f5e8 --- /dev/null +++ b/server/src/versioning.rs @@ -0,0 +1,99 @@ +use iggy::error::IggyError; +use std::fmt::Display; +use std::str::FromStr; + +const VERSION: &str = env!("CARGO_PKG_VERSION"); + +#[derive(Debug)] +pub struct SemanticVersion { + pub major: u32, + pub minor: u32, + pub patch: u32, +} + +impl FromStr for SemanticVersion { + type Err = IggyError; + fn from_str(s: &str) -> Result { + let mut version = s.split('.'); + let major = version.next().unwrap().parse::()?; + let minor = version.next().unwrap().parse::()?; + let patch = version.next().unwrap().parse::()?; + Ok(SemanticVersion { + major, + minor, + patch, + }) + } +} + +impl SemanticVersion { + pub fn current() -> Result { + if let Ok(version) = VERSION.parse::() { + return Ok(version); + } + + Err(IggyError::InvalidVersion(VERSION.into())) + } + + pub fn is_equal_to(&self, other: &SemanticVersion) -> bool { + self.major == other.major && self.minor == other.minor && self.patch == other.patch + } + + pub fn is_greater_than(&self, other: &SemanticVersion) -> bool { + if self.major > other.major { + return true; + } + if self.major < other.major { + return false; + } + + if self.minor > other.minor { + return true; + } + if self.minor < other.minor { + return false; + } + + if self.patch > other.patch { + return true; + } + if self.patch < other.patch { + return false; + } + + false + } + + pub fn get_numeric_version(&self) -> Result { + let major = self.major; + let minor = format!("{:03}", self.minor); + let patch = format!("{:03}", self.patch); + if let Ok(version) = format!("{major}{minor}{patch}").parse::() { + return Ok(version); + } + + Err(IggyError::InvalidVersion(self.to_string())) + } +} + +impl Display for SemanticVersion { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{major}.{minor}.{patch}", + major = self.major, + minor = self.minor, + patch = self.patch + ) + } +} + +mod tests { + #[test] + fn should_load_the_expected_version_from_package_definition() { + use super::VERSION; + + const CARGO_TOML_VERSION: &str = env!("CARGO_PKG_VERSION"); + assert_eq!(VERSION, CARGO_TOML_VERSION); + } +} diff --git a/tools/src/data-seeder/seeder.rs b/tools/src/data-seeder/seeder.rs index f6259d3de..6c9f6f7f2 100644 --- a/tools/src/data-seeder/seeder.rs +++ b/tools/src/data-seeder/seeder.rs @@ -4,6 +4,7 @@ use iggy::error::IggyError; use iggy::messages::send_messages::{Message, Partitioning}; use iggy::models::header::{HeaderKey, HeaderValue}; use iggy::utils::expiry::IggyExpiry; +use iggy::utils::topic_size::MaxTopicSize; use rand::Rng; use std::collections::HashMap; use std::str::FromStr; @@ -39,7 +40,7 @@ async fn create_topics(client: &IggyClient) -> Result<(), IggyError> { None, None, IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await?; @@ -52,7 +53,7 @@ async fn create_topics(client: &IggyClient) -> Result<(), IggyError> { None, None, IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await?; @@ -65,7 +66,7 @@ async fn create_topics(client: &IggyClient) -> Result<(), IggyError> { None, None, IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await?; @@ -78,7 +79,7 @@ async fn create_topics(client: &IggyClient) -> Result<(), IggyError> { None, None, IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await?; @@ -91,7 +92,7 @@ async fn create_topics(client: &IggyClient) -> Result<(), IggyError> { None, None, IggyExpiry::NeverExpire, - None, + MaxTopicSize::ServerDefault, ) .await?; }