diff --git a/Cargo.lock b/Cargo.lock index c0cb3360d8..7924a60153 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -193,6 +193,7 @@ dependencies = [ "config-utils", "control-macro", "eyre", + "fluence-keypair", "fluence-libp2p", "fs-utils", "futures", diff --git a/aquamarine/Cargo.toml b/aquamarine/Cargo.toml index 6e011344ad..f266b8c7bc 100644 --- a/aquamarine/Cargo.toml +++ b/aquamarine/Cargo.toml @@ -21,6 +21,7 @@ key-manager = { workspace = true} avm-server = { workspace = true } libp2p = { workspace = true } +fluence-keypair = { workspace = true } futures = { workspace = true } log = { workspace = true } diff --git a/aquamarine/src/plumber.rs b/aquamarine/src/plumber.rs index 1c7d76523e..fc772b5110 100644 --- a/aquamarine/src/plumber.rs +++ b/aquamarine/src/plumber.rs @@ -276,6 +276,7 @@ mod tests { use std::{sync::Arc, task::Context}; use avm_server::{AVMMemoryStats, AVMOutcome, CallResults, ParticleParameters}; + use fluence_keypair::KeyPair; use fluence_libp2p::RandomPeerId; use futures::future::BoxFuture; use futures::task::noop_waker_ref; @@ -377,7 +378,7 @@ mod tests { let builtin_mock = Arc::new(MockF); let key_manager = KeyManager::new( "keypair".into(), - RandomPeerId::random(), + KeyPair::generate_ed25519(), RandomPeerId::random(), RandomPeerId::random(), ); diff --git a/crates/key-manager/src/key_manager.rs b/crates/key-manager/src/key_manager.rs index 639063936d..1208af0c7c 100644 --- a/crates/key-manager/src/key_manager.rs +++ b/crates/key-manager/src/key_manager.rs @@ -43,6 +43,7 @@ pub struct KeyManager { host_peer_id: PeerId, // temporary public, will refactor pub insecure_keypair: KeyPair, + pub root_keypair: KeyPair, management_peer_id: PeerId, builtins_management_peer_id: PeerId, } @@ -50,7 +51,7 @@ pub struct KeyManager { impl KeyManager { pub fn new( keypairs_dir: PathBuf, - host_peer_id: PeerId, + root_keypair: KeyPair, management_peer_id: PeerId, builtins_management_peer_id: PeerId, ) -> Self { @@ -59,12 +60,13 @@ impl KeyManager { worker_ids: Arc::new(Default::default()), worker_creators: Arc::new(Default::default()), keypairs_dir, - host_peer_id, + host_peer_id: root_keypair.get_peer_id(), insecure_keypair: KeyPair::from_secret_key( INSECURE_KEYPAIR_SEED.collect(), KeyFormat::Ed25519, ) .expect("error creating insecure keypair"), + root_keypair, management_peer_id, builtins_management_peer_id, }; @@ -149,11 +151,15 @@ impl KeyManager { } pub fn get_worker_keypair(&self, worker_id: PeerId) -> Result { - self.worker_keypairs - .read() - .get(&worker_id) - .cloned() - .ok_or(KeyManagerError::KeypairNotFound(worker_id)) + if self.is_host(worker_id) { + Ok(self.root_keypair.clone()) + } else { + self.worker_keypairs + .read() + .get(&worker_id) + .cloned() + .ok_or(KeyManagerError::KeypairNotFound(worker_id)) + } } pub fn get_worker_creator(&self, worker_id: PeerId) -> Result { diff --git a/crates/particle-node-tests/tests/builtin.rs b/crates/particle-node-tests/tests/builtin.rs index f82f4ffd6a..1a5703cf6f 100644 --- a/crates/particle-node-tests/tests/builtin.rs +++ b/crates/particle-node-tests/tests/builtin.rs @@ -1345,7 +1345,10 @@ fn sign_verify() { (call relay ("registry" "get_record_bytes") ["key_id" "" [] [] 1 []] data) (seq (call relay ("sig" "sign") [data] sig_result) - (call relay ("sig" "verify") [sig_result.$.signature.[0]! data] result) + (xor + (call relay ("sig" "verify") [sig_result.$.signature.[0]! data] result) + (call %init_peer_id% ("op" "return") [sig_result.$.error]) + ) ) ) (call %init_peer_id% ("op" "return") [data sig_result result]) diff --git a/crates/particle-node-tests/tests/spells.rs b/crates/particle-node-tests/tests/spells.rs index bf9a701b50..8d1dd99a9c 100644 --- a/crates/particle-node-tests/tests/spells.rs +++ b/crates/particle-node-tests/tests/spells.rs @@ -25,7 +25,6 @@ use serde_json::{json, Value as JValue}; use connected_client::ConnectedClient; use created_swarm::{make_swarms, make_swarms_with_builtins}; use fluence_spell_dtos::trigger_config::TriggerConfig; -use log_utils::enable_logs; use service_modules::load_module; use spell_event_bus::api::{TriggerInfo, TriggerInfoAqua, MAX_PERIOD_SEC}; use test_utils::create_service; @@ -446,8 +445,6 @@ fn spell_install_fail_end_sec_past() { // In this case we don't schedule a spell and return error. #[test] fn spell_install_fail_end_sec_before_start() { - enable_logs(); - let swarms = make_swarms(1); let mut client = ConnectedClient::connect_to(swarms[0].multiaddr.clone()) .wrap_err("connect client") @@ -603,7 +600,6 @@ fn spell_remove() { #[test] fn spell_remove_by_alias() { - enable_logs(); let swarms = make_swarms(1); let mut client = ConnectedClient::connect_to(swarms[0].multiaddr.clone()) @@ -695,9 +691,12 @@ fn spell_remove_spell_as_service() { #[test] fn spell_remove_service_as_spell() { let swarms = make_swarms(1); - let mut client = ConnectedClient::connect_to(swarms[0].multiaddr.clone()) - .wrap_err("connect client") - .unwrap(); + let mut client = ConnectedClient::connect_with_keypair( + swarms[0].multiaddr.clone(), + Some(swarms[0].management_keypair.clone()), + ) + .wrap_err("connect client") + .unwrap(); let service = create_service( &mut client, @@ -714,10 +713,7 @@ fn spell_remove_service_as_spell() { client.send_particle( r#" (xor - (seq - (call relay ("worker" "create") [] worker) - (call worker ("spell" "remove") [service_id]) - ) + (call relay ("spell" "remove") [service_id]) (call client ("return" "") [%last_error%.$.message]) ) "#, @@ -1393,9 +1389,12 @@ fn spell_create_worker_twice() { fn spell_install_root_scope() { let swarms = make_swarms(1); - let mut client = ConnectedClient::connect_to(swarms[0].multiaddr.clone()) - .wrap_err("connect client") - .unwrap(); + let mut client = ConnectedClient::connect_with_keypair( + swarms[0].multiaddr.clone(), + Some(swarms[0].management_keypair.clone()), + ) + .wrap_err("connect client") + .unwrap(); let script = r#"(call %init_peer_id% ("getDataSrv" "spell_id") [] spell_id)"#; @@ -1415,9 +1414,9 @@ fn spell_install_root_scope() { (seq (seq (call relay ("spell" "install") [script data config] spell_id) - (call relay ("worker" "get_peer_id") [] worker_peer_id) + (call relay ("srv" "info") [spell_id] info) ) - (call client ("return" "") [spell_id worker_peer_id]) + (call client ("return" "") [spell_id info.$.worker_id]) )"#, data.clone(), ); @@ -1426,7 +1425,7 @@ fn spell_install_root_scope() { let spell_id = response[0].as_str().unwrap().to_string(); assert_ne!(spell_id.len(), 0); let worker_id = response[1].as_str().unwrap().to_string(); - assert_ne!(worker_id, client.node.to_base58()); + assert_eq!(worker_id, client.node.to_base58()); } #[test] diff --git a/crates/peer-metrics/src/services_metrics/external.rs b/crates/peer-metrics/src/services_metrics/external.rs index 2f46b7089c..ec5616c2f3 100644 --- a/crates/peer-metrics/src/services_metrics/external.rs +++ b/crates/peer-metrics/src/services_metrics/external.rs @@ -24,7 +24,7 @@ impl Encode for ServiceType { let label = match self { ServiceType::Builtin => "builtin", ServiceType::Service(Some(x)) => x, - ServiceType::Service(_) => "service", + ServiceType::Service(_) => "non-aliased-services", }; writer.write_all(label.as_bytes())?; diff --git a/particle-builtins/src/builtins.rs b/particle-builtins/src/builtins.rs index 96f20cade7..940c116e31 100644 --- a/particle-builtins/src/builtins.rs +++ b/particle-builtins/src/builtins.rs @@ -23,7 +23,7 @@ use std::str::FromStr; use std::time::{Duration, Instant}; use derivative::Derivative; -use fluence_keypair::{KeyPair, Signature}; +use fluence_keypair::Signature; use humantime_serde::re::humantime::format_duration as pretty; use libp2p::{core::Multiaddr, kad::kbucket::Key, kad::K_VALUE, PeerId}; use multihash::{Code, MultihashDigest, MultihashGeneric}; @@ -72,8 +72,6 @@ pub struct Builtins { pub management_peer_id: PeerId, pub builtins_management_peer_id: PeerId, pub local_peer_id: PeerId, - #[derivative(Debug = "ignore")] - pub root_keypair: KeyPair, pub modules: ModuleRepository, pub services: ParticleAppServices, @@ -98,7 +96,6 @@ where node_info: NodeInfo, config: ServicesConfig, services_metrics: ServicesMetrics, - root_keypair: KeyPair, key_manager: KeyManager, ) -> Self { let modules_dir = &config.modules_dir; @@ -123,7 +120,6 @@ where management_peer_id, builtins_management_peer_id, local_peer_id, - root_keypair, modules, services, node_info, @@ -188,7 +184,7 @@ where ("kad", "neigh_with_addrs") => wrap(self.neighborhood_with_addresses(args).await), ("kad", "merge") => wrap(self.kad_merge(args.function_args)), - ("srv", "list") => ok(self.list_services()), + ("srv", "list") => ok(self.list_services(particle)), ("srv", "create") => wrap(self.create_service(args, particle)), ("srv", "get_interface") => wrap(self.get_interface(args, particle)), ("srv", "resolve_alias") => wrap(self.resolve_alias(args, particle)), @@ -828,8 +824,8 @@ where Ok(()) } - fn list_services(&self) -> JValue { - JValue::Array(self.services.list_services()) + fn list_services(&self, params: ParticleParams) -> JValue { + JValue::Array(self.services.list_services(params.host_id)) } fn call_service(&self, function_args: Args, particle: ParticleParams) -> FunctionOutcome { @@ -951,13 +947,8 @@ where return Err(JError::new(format!("expected tetraplet for a scalar argument, got tetraplet for an array: {tetraplet:?}, tetraplets"))); } - if params.host_id == self.local_peer_id { - json!(self.root_keypair.sign(&data)?.to_vec()) - } else { - // if this call is initiated by the worker on this worker as host_id and init_peer_id - let keypair = self.key_manager.get_worker_keypair(params.init_peer_id)?; - json!(keypair.sign(&data)?.to_vec()) - } + let keypair = self.key_manager.get_worker_keypair(params.host_id)?; + json!(keypair.sign(&data)?.to_vec()) }; match result { @@ -979,23 +970,13 @@ where let mut args = args.function_args.into_iter(); let signature: Vec = Args::next("signature", &mut args)?; let data: Vec = Args::next("data", &mut args)?; - let signature = - Signature::from_bytes(self.root_keypair.public().get_key_format(), signature); + let pk = self + .key_manager + .get_worker_keypair(params.host_id)? + .public(); + let signature = Signature::from_bytes(pk.get_key_format(), signature); - // TODO: move root_keypair to key_manager and unify verification - if params.host_id == self.local_peer_id { - Ok(JValue::Bool( - self.root_keypair.public().verify(&data, &signature).is_ok(), - )) - } else { - Ok(JValue::Bool( - self.key_manager - .get_worker_keypair(params.host_id)? - .public() - .verify(&data, &signature) - .is_ok(), - )) - } + Ok(JValue::Bool(pk.verify(&data, &signature).is_ok())) } fn get_peer_id(&self, params: ParticleParams) -> Result { diff --git a/particle-node/src/node.rs b/particle-node/src/node.rs index 32769b9087..640b074c4a 100644 --- a/particle-node/src/node.rs +++ b/particle-node/src/node.rs @@ -19,7 +19,6 @@ use std::{io, net::SocketAddr}; use async_std::task; use eyre::WrapErr; -use fluence_keypair::KeyPair; use futures::{ channel::{mpsc::unbounded, oneshot}, select, @@ -111,7 +110,7 @@ impl Node { let key_manager = KeyManager::new( config.dir_config.keypairs_base_dir.clone(), - to_peer_id(&key_pair), + key_pair.clone().try_into()?, config.management_peer_id, builtins_peer_id, ); @@ -203,7 +202,6 @@ impl Node { services_config, script_storage_api, services_metrics, - config.node_config.root_key_pair.clone(), key_manager.clone(), )); @@ -319,7 +317,6 @@ impl Node { services_config: ServicesConfig, script_storage_api: ScriptStorageApi, services_metrics: ServicesMetrics, - root_keypair: KeyPair, key_manager: KeyManager, ) -> Builtins { let node_info = NodeInfo { @@ -334,7 +331,6 @@ impl Node { node_info, services_config, services_metrics, - root_keypair, key_manager, ) } diff --git a/particle-services/src/app_service.rs b/particle-services/src/app_service.rs index 25b7376a14..6bec59f063 100644 --- a/particle-services/src/app_service.rs +++ b/particle-services/src/app_service.rs @@ -35,7 +35,6 @@ pub fn create_app_service( blueprint_id: String, service_id: String, aliases: Vec, - root_aliases: Vec, owner_id: PeerId, worker_id: PeerId, metrics: Option<&ServicesMetrics>, @@ -65,14 +64,8 @@ pub fn create_app_service( .map_err(ServiceError::Engine)?; // Save created service to disk, so it is recreated on restart - let persisted = PersistedService::new( - service_id, - blueprint_id, - aliases, - root_aliases, - owner_id, - worker_id, - ); + let persisted = + PersistedService::new(service_id, blueprint_id, aliases, owner_id, worker_id); persist_service(&config.services_dir, persisted)?; service diff --git a/particle-services/src/app_services.rs b/particle-services/src/app_services.rs index 5b31a7ab46..d8cd5e1ddf 100644 --- a/particle-services/src/app_services.rs +++ b/particle-services/src/app_services.rs @@ -56,26 +56,17 @@ pub struct Service { pub service: Mutex, pub blueprint_id: String, pub owner_id: PeerId, - pub worker_aliases: Vec, + pub aliases: Vec, pub worker_id: PeerId, - pub root_aliases: Vec, } impl Service { pub fn remove_alias(&mut self, alias: &str) { - self.worker_aliases.retain(|a| a.ne(alias)); + self.aliases.retain(|a| a.ne(alias)); } pub fn add_alias(&mut self, alias: String) { - self.worker_aliases.push(alias); - } - - pub fn remove_root_alias(&mut self, alias: &str) { - self.root_aliases.retain(|a| a.ne(alias)); - } - - pub fn add_root_alias(&mut self, alias: String) { - self.root_aliases.push(alias); + self.aliases.push(alias); } } @@ -183,17 +174,16 @@ impl ParticleAppServices { pub fn create_service( &self, blueprint_id: String, - init_peer_id: PeerId, + owner_id: PeerId, worker_id: PeerId, ) -> Result { let service_id = uuid::Uuid::new_v4().to_string(); self.create_service_inner( blueprint_id, - init_peer_id, + owner_id, worker_id, service_id.clone(), vec![], - vec![], )?; Ok(service_id) } @@ -217,8 +207,7 @@ impl ParticleAppServices { "id": service_id, "blueprint_id": service.blueprint_id, "owner_id": service.owner_id.to_string(), - "worker_aliases": service.worker_aliases, - "aliases": service.root_aliases, + "aliases": service.aliases, "worker_id": service.worker_id.to_string() })) } @@ -292,14 +281,8 @@ impl ParticleAppServices { } let service = self.services.write().remove(&service_id).unwrap(); - if let Some(aliases) = self.aliases.write().get_mut(&worker_id) { - for alias in service.worker_aliases.iter() { - aliases.remove(alias); - } - } - - if let Some(aliases) = self.aliases.write().get_mut(&self.config.local_peer_id) { - for alias in service.root_aliases.iter() { + if let Some(aliases) = self.aliases.write().get_mut(&service.worker_id) { + for alias in service.aliases.iter() { aliases.remove(alias); } } @@ -354,13 +337,7 @@ impl ParticleAppServices { // )); // } - let service_type = ServiceType::Service( - service - .root_aliases - .first() - .or_else(|| service.worker_aliases.first()) - .cloned(), - ); + let service_type = ServiceType::Service(service.aliases.first().cloned()); // TODO: move particle vault creation to aquamarine::particle_functions self.create_vault(&particle.id)?; @@ -509,7 +486,7 @@ impl ParticleAppServices { .get_mut(&service_id) .ok_or_else(|| ServiceError::NoSuchService(service_id.clone()))?; - if !is_root_scope && service.worker_id != worker_id { + if service.worker_id != worker_id { // service is deployed on another worker_id return Err(ServiceError::AliasWrongWorkerId { service_id, @@ -517,28 +494,25 @@ impl ParticleAppServices { }); } - if is_root_scope { - service.add_root_alias(alias.clone()); - } else { - service.add_alias(alias.clone()); - } + service.add_alias(alias.clone()); let persisted_new = PersistedService::from_service(service_id.clone(), service); // Find a service with the same alias if any - let previous_owner_id: Option<_> = - try { self.aliases.read().get(&worker_id)?.get(&alias).cloned()? }; + let previous_owner_id: Option<_> = try { + self.aliases + .read() + .get(&service.worker_id)? + .get(&alias) + .cloned()? + }; // If there is such a service remove the alias from its list of aliases let previous_owner = try { let previous_owner_id = previous_owner_id?; let previous_owner_service = services.get_mut(&previous_owner_id)?; - if is_root_scope { - previous_owner_service.remove_root_alias(&alias); - } else { - previous_owner_service.remove_alias(&alias); - } + previous_owner_service.remove_alias(&alias); PersistedService::from_service(previous_owner_id, previous_owner_service) }; @@ -606,6 +580,31 @@ impl ParticleAppServices { Ok(service.owner_id) } + pub fn check_service_worker_id( + &self, + id_or_alias: String, + worker_id: PeerId, + ) -> Result<(), ServiceError> { + let services = self.services.read(); + let (service, _) = get_service( + &services, + &self.aliases.read(), + worker_id, + self.config.local_peer_id, + id_or_alias.clone(), + ) + .map_err(ServiceError::NoSuchService)?; + + if service.worker_id != worker_id { + Err(ServiceError::CallServiceFailedWrongWorker { + service_id: id_or_alias, + worker_id, + }) + } else { + Ok(()) + } + } + pub fn get_interface( &self, service_id: String, @@ -633,17 +632,17 @@ impl ParticleAppServices { } // TODO: move JSON serialization to builtins - pub fn list_services(&self) -> Vec { + pub fn list_services(&self, worker_id: PeerId) -> Vec { let services = self.services.read(); let services = services .iter() + .filter(|(_, srv)| srv.worker_id.eq(&worker_id)) .map(|(id, srv)| { json!({ "id": id, "blueprint_id": srv.blueprint_id, "owner_id": srv.owner_id.to_string(), - "worker_aliases": srv.worker_aliases, - "aliases": srv.root_aliases, + "aliases": srv.aliases, "worker_id": srv.worker_id.to_string() }) }) @@ -705,7 +704,6 @@ impl ParticleAppServices { worker_id, s.service_id.clone(), s.aliases.clone(), - s.root_aliases.clone(), ); let replaced = match result { Ok(replaced) => replaced, @@ -716,39 +714,20 @@ impl ParticleAppServices { } }; - // worker_aliases - if worker_id != self.config.local_peer_id { - let mut binding = self.aliases.write(); - let aliases = binding.entry(worker_id).or_default(); - for alias in s.aliases.into_iter() { - let old = aliases.insert(alias.clone(), s.service_id.clone()); - if let Some(old) = old { - log::warn!( - "Alias `{}` is the same for {} and {}", - alias, - old, - s.service_id - ); - } + let mut binding = self.aliases.write(); + let aliases = binding.entry(worker_id).or_default(); + for alias in s.aliases.iter() { + let old = aliases.insert(alias.clone(), s.service_id.clone()); + if let Some(old) = old { + log::warn!( + "Alias `{}` is the same for {} and {}", + alias, + old, + s.service_id + ); } } - // root_aliases - { - let mut binding = self.aliases.write(); - let aliases = binding.entry(self.config.local_peer_id).or_default(); - for alias in s.root_aliases.iter() { - let old = aliases.insert(alias.clone(), s.service_id.clone()); - if let Some(old) = old { - log::warn!( - "Alias `{}` is the same for {} and {}", - alias, - old, - s.service_id - ); - } - } - } debug_assert!( replaced.is_none(), "shouldn't replace any existing services" @@ -758,7 +737,7 @@ impl ParticleAppServices { "Persisted service {} created in {}, aliases: {:?}", s.service_id, pretty(start.elapsed()), - s.root_aliases + s.aliases ); } } @@ -769,8 +748,7 @@ impl ParticleAppServices { owner_id: PeerId, worker_id: PeerId, service_id: String, - worker_aliases: Vec, - root_aliases: Vec, + aliases: Vec, ) -> Result, ServiceError> { let creation_start_time = Instant::now(); let service = create_app_service( @@ -778,8 +756,7 @@ impl ParticleAppServices { &self.modules, blueprint_id.clone(), service_id.clone(), - worker_aliases.clone(), - root_aliases.clone(), + aliases.clone(), owner_id, worker_id, self.metrics.as_ref(), @@ -793,14 +770,13 @@ impl ParticleAppServices { })?; let stats = service.module_memory_stats(); let stats = ServiceMemoryStat::new(&stats); - let service_type = ServiceType::Service(root_aliases.first().cloned()); + let service_type = ServiceType::Service(aliases.first().cloned()); let service = Service { service: Mutex::new(service), blueprint_id, owner_id, - worker_aliases, + aliases, worker_id, - root_aliases, }; let replaced = self.services.write().insert(service_id.clone(), service); @@ -1042,7 +1018,7 @@ mod tests { let services = pas.services.read(); let service_1 = services.get(&service_id1).unwrap(); // the service's alias list must contain the alias - assert_eq!(service_1.root_aliases, vec![alias.to_string()]); + assert_eq!(service_1.aliases, vec![alias.to_string()]); let persisted_services: Vec<_> = load_persisted_services(&pas.config.services_dir, local_pid) @@ -1055,7 +1031,7 @@ mod tests { .unwrap(); // the persisted service's alias list must contain the alias - assert_eq!(persisted_service_1.root_aliases, vec![alias.to_string()]); + assert_eq!(persisted_service_1.aliases, vec![alias.to_string()]); } #[test] @@ -1093,9 +1069,9 @@ mod tests { let service_1 = services.get(&service_id1).unwrap(); let service_2 = services.get(&service_id2).unwrap(); // the first service's alias list must not contain the alias - assert_eq!(service_1.root_aliases, Vec::::new()); + assert_eq!(service_1.aliases, Vec::::new()); // the second service's alias list must contain the alias - assert_eq!(service_2.root_aliases, vec![alias.to_string()]); + assert_eq!(service_2.aliases, vec![alias.to_string()]); let persisted_services: Vec<_> = load_persisted_services(&pas.config.services_dir, local_pid) @@ -1111,9 +1087,9 @@ mod tests { .find(|s| s.service_id == service_id2) .unwrap(); // the first persisted service's alias list must not contain the alias - assert_eq!(persisted_service_1.root_aliases, Vec::::new()); + assert_eq!(persisted_service_1.aliases, Vec::::new()); // the second persisted service's alias list must contain the alias - assert_eq!(persisted_service_2.root_aliases, vec![alias.to_string()]); + assert_eq!(persisted_service_2.aliases, vec![alias.to_string()]); } #[test] @@ -1137,8 +1113,8 @@ mod tests { .unwrap(); let services = pas.services.read(); let service_1 = services.get(&service_id1).unwrap(); - assert_eq!(service_1.root_aliases.len(), 1); - assert_eq!(service_1.root_aliases[0], alias); + assert_eq!(service_1.aliases.len(), 1); + assert_eq!(service_1.aliases[0], alias); let persisted_services: Vec<_> = load_persisted_services(&pas.config.services_dir, local_pid) @@ -1146,8 +1122,8 @@ mod tests { .collect::>() .unwrap(); let persisted_service_1 = persisted_services.first().unwrap(); - assert_eq!(service_1.worker_aliases, persisted_service_1.aliases); - assert_eq!(service_1.root_aliases, persisted_service_1.root_aliases); + assert_eq!(service_1.aliases, persisted_service_1.aliases); + assert_eq!(service_1.aliases, persisted_service_1.aliases); assert_eq!(service_1.blueprint_id, persisted_service_1.blueprint_id); assert_eq!(service_id1, persisted_service_1.service_id); assert_eq!(service_1.owner_id, persisted_service_1.owner_id); diff --git a/particle-services/src/persistence.rs b/particle-services/src/persistence.rs index df34815943..b08ad9cc13 100644 --- a/particle-services/src/persistence.rs +++ b/particle-services/src/persistence.rs @@ -44,9 +44,6 @@ pub struct PersistedService { #[serde(default)] #[serde(with = "peerid_serializer_opt")] pub worker_id: Option, - #[serde(default)] - // Old versions of PersistedService may omit `root_aliases` field, tolerate that - pub root_aliases: Vec, } impl PersistedService { @@ -54,7 +51,6 @@ impl PersistedService { service_id: String, blueprint_id: String, aliases: Vec, - root_aliases: Vec, owner_id: PeerId, worker_id: PeerId, ) -> Self { @@ -64,7 +60,6 @@ impl PersistedService { aliases, owner_id, worker_id: Some(worker_id), - root_aliases, } } @@ -72,8 +67,7 @@ impl PersistedService { PersistedService::new( service_id, service.blueprint_id.clone(), - service.worker_aliases.clone(), - service.root_aliases.clone(), + service.aliases.clone(), service.owner_id, service.worker_id, ) diff --git a/sorcerer/src/spells.rs b/sorcerer/src/spells.rs index a1fe073644..866018fe86 100644 --- a/sorcerer/src/spells.rs +++ b/sorcerer/src/spells.rs @@ -40,17 +40,21 @@ pub(crate) async fn spell_install( let init_data: JValue = Args::next("data", &mut args)?; let user_config: TriggerConfig = Args::next("config", &mut args)?; let config = api::from_user_config(user_config.clone())?; + let init_peer_id = params.init_peer_id; - let worker_id = if key_manager.is_host(params.host_id) { - // direct hosting - let deal_id = KeyManager::generate_deal_id(params.init_peer_id); - match key_manager.get_worker_id(deal_id.clone()) { - Ok(id) => id, - Err(_) => key_manager.create_worker(Some(deal_id), params.init_peer_id)?, - } - } else { - params.host_id - }; + let is_management = key_manager.is_management(init_peer_id); + if key_manager.is_host(params.host_id) && !is_management { + return Err(JError::new("Failed to install spell in the root scope, only management peer id can install top-level spells")); + } + + let worker_id = params.host_id; + let worker_creator = key_manager.get_worker_creator(params.host_id)?; + + let is_worker = init_peer_id == worker_id; + let is_worker_creator = init_peer_id == worker_creator; + if !is_management && !is_worker && !is_worker_creator { + return Err(JError::new(format!("Failed to install spell on {worker_id}, spell can be installed by worker creator {worker_creator}, worker itself {worker_id} or peer manager; init_peer_id={init_peer_id}"))); + } let spell_id = services.create_service(spell_storage.get_blueprint(), worker_id, worker_id)?; spell_storage.register_spell(spell_id.clone()); @@ -149,18 +153,18 @@ pub(crate) async fn spell_remove( let spell_id: String = Args::next("spell_id", &mut args)?; let worker_id = params.host_id; + services.check_service_worker_id(spell_id.clone(), worker_id)?; + let init_peer_id = params.init_peer_id; - let spell_owner = services.get_service_owner(spell_id.clone(), params.host_id)?; let worker_creator = key_manager.get_worker_creator(worker_id)?; - let is_spell_owner = init_peer_id == spell_owner; let is_worker_creator = init_peer_id == worker_creator; let is_worker = init_peer_id == worker_id; let is_management = key_manager.is_management(init_peer_id); - if !is_spell_owner && !is_worker_creator && !is_worker && !is_management { + if !is_worker_creator && !is_worker && !is_management { return Err(JError::new(format!( - "Failed to remove spell {spell_id}, spell can be removed by spell owner {spell_owner}, worker creator {worker_creator}, worker itself {worker_id} or peer manager" + "Failed to remove spell {spell_id}, spell can be removed by worker creator {worker_creator}, worker itself {worker_id} or peer manager" ))); } @@ -195,18 +199,18 @@ pub(crate) async fn spell_update_config( let spell_id: String = Args::next("spell_id", &mut args)?; let worker_id = params.host_id; + services.check_service_worker_id(spell_id.clone(), worker_id)?; + let init_peer_id = params.init_peer_id; - let spell_owner = services.get_service_owner(spell_id.clone(), worker_id)?; let worker_creator = key_manager.get_worker_creator(worker_id)?; - let is_spell_owner = init_peer_id == spell_owner; let is_worker_creator = init_peer_id == worker_creator; let is_worker = init_peer_id == worker_id; let is_management = key_manager.is_management(init_peer_id); - if !is_spell_owner && !is_worker_creator && !is_worker && !is_management { + if !is_worker_creator && !is_worker && !is_management { return Err(JError::new(format!( - "Failed to update spell config {spell_id}, spell config can be updated by spell owner {spell_owner}, worker creator {worker_creator}, worker itself {worker_id} or peer manager; init_peer_id={init_peer_id}" + "Failed to update spell config {spell_id}, spell config can be updated by worker creator {worker_creator}, worker itself {worker_id} or peer manager; init_peer_id={init_peer_id}" ))); }