From a272e011a35447b4720e09dc1e45ba2a0d668948 Mon Sep 17 00:00:00 2001 From: Brooks Townsend Date: Mon, 8 Apr 2024 13:15:18 -0400 Subject: [PATCH] chore: rename actor to component a bunch Signed-off-by: Brooks Townsend --- README.md | 4 +- nottests/command_consumer_integration.rs | 24 +-- nottests/command_worker_integration.rs | 24 +-- nottests/e2e_multitenant.rs | 23 ++- nottests/e2e_upgrades.rs | 12 +- nottests/event_consumer_integration.rs | 8 +- oam/README.md | 2 +- oam/custom.yaml | 2 +- oam/echo.yaml | 2 +- oam/kvcounter.yaml | 2 +- oam/kvcounter_old.yaml | 2 +- oam/oam.schema.json | 20 +-- oam/simple1.yaml | 2 +- oam/simple2.yaml | 2 +- src/commands/mod.rs | 24 +-- src/events/data.rs | 2 +- src/events/types.rs | 10 +- src/model/mod.rs | 22 +-- src/scaler/daemonscaler/mod.rs | 98 +++++------ src/scaler/daemonscaler/provider.rs | 4 +- src/scaler/manager.rs | 42 ++--- src/scaler/spreadscaler/link.rs | 40 ++--- src/scaler/spreadscaler/mod.rs | 160 ++++++++--------- src/scaler/spreadscaler/provider.rs | 24 +-- src/server/handlers.rs | 8 +- src/storage/mod.rs | 2 +- src/storage/reaper.rs | 88 +++++----- src/storage/snapshot.rs | 6 +- src/storage/state.rs | 73 ++++---- src/workers/command.rs | 4 +- src/workers/event.rs | 179 +++++++++++--------- test/data/all_hosts.yaml | 2 +- test/data/complex.yaml | 2 +- test/data/duplicate_component.yaml | 10 +- test/data/duplicate_id1.yaml | 2 +- test/data/duplicate_id2.yaml | 4 +- test/data/duplicate_linkdef.yaml | 10 +- test/data/events.json | 4 +- test/data/host_stop.yaml | 2 +- test/data/incorrect_component.yaml | 10 +- test/data/long_image_refs.yaml | 2 +- test/data/lotta_actors.yaml | 2 +- test/data/missing_capability_component.yaml | 2 +- test/data/outdatedapp.yaml | 6 +- test/data/simple.yaml | 2 +- test/data/simple2.yaml | 2 +- test/data/upgradedapp.yaml | 6 +- test/data/upgradedapp2.yaml | 6 +- tests/storage_nats_kv.rs | 58 +++---- 49 files changed, 535 insertions(+), 512 deletions(-) diff --git a/README.md b/README.md index 436b10e2..cb01ccc6 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ metadata: spec: components: - name: echo - type: actor + type: component properties: image: wasmcloud.azurecr.io/echo:0.3.7 traits: @@ -101,7 +101,7 @@ them. Try changing the manifest you created above by updating the number of echo spec: components: - name: echo - type: actor + type: component properties: image: wasmcloud.azurecr.io/echo:0.3.5 traits: diff --git a/nottests/command_consumer_integration.rs b/nottests/command_consumer_integration.rs index 65af1023..f43d12cb 100644 --- a/nottests/command_consumer_integration.rs +++ b/nottests/command_consumer_integration.rs @@ -15,8 +15,8 @@ async fn test_consumer_stream() { // Publish a whole bunch of commands to the stream wrapper - .publish_command(ScaleActor { - actor_id: None, + .publish_command(ScaleComponent { + component_id: None, reference: "foobar".to_string(), host_id: "fakehost".to_string(), count: 3, @@ -34,7 +34,7 @@ async fn test_consumer_stream() { .await; wrapper .publish_command(PutLinkdef { - actor_id: "foobar".to_string(), + component_id: "foobar".to_string(), provider_id: "fakehost".to_string(), contract_id: "wasmcloud:httpserver".to_string(), model_name: "fake".into(), @@ -44,7 +44,7 @@ async fn test_consumer_stream() { // Make sure we get the right data back, in the right order let mut cmd = wrapper.wait_for_command().await; - if let Command::ScaleActor(actor) = cmd.as_ref() { + if let Command::ScaleComponent(actor) = cmd.as_ref() { assert_eq!( actor.reference, "foobar", @@ -98,8 +98,8 @@ async fn test_consumer_stream() { .expect("Should be able to publish data"); wrapper - .publish_command(ScaleActor { - actor_id: Some("foobar".to_string()), + .publish_command(ScaleComponent { + component_id: Some("foobar".to_string()), reference: "foobarref".to_string(), host_id: "fakehost".to_string(), count: 0, @@ -109,9 +109,9 @@ async fn test_consumer_stream() { .await; let mut cmd = wrapper.wait_for_command().await; - if let Command::ScaleActor(actor) = cmd.as_ref() { + if let Command::ScaleComponent(actor) = cmd.as_ref() { assert_eq!( - actor.actor_id, + actor.component_id, Some("foobar".to_string()), "Expected to get a valid stop actor command, got command: {:?}", cmd.as_ref() @@ -128,8 +128,8 @@ async fn test_nack_and_rereceive() { let mut wrapper = StreamWrapper::new("nack_and_rereceive".into(), None).await; // Send an event wrapper - .publish_command(ScaleActor { - actor_id: None, + .publish_command(ScaleComponent { + component_id: None, reference: "foobar".to_string(), host_id: "fakehost".to_string(), count: 3, @@ -141,7 +141,7 @@ async fn test_nack_and_rereceive() { // Get the event and then nack it let mut cmd = wrapper.wait_for_command().await; // Make sure we got the right event - if let Command::ScaleActor(actor) = cmd.as_ref() { + if let Command::ScaleComponent(actor) = cmd.as_ref() { assert_eq!( actor.reference, "foobar", @@ -154,7 +154,7 @@ async fn test_nack_and_rereceive() { cmd.nack().await; // Now do it again and make sure we get the same event - if let Command::ScaleActor(actor) = wrapper.wait_for_command().await.as_ref() { + if let Command::ScaleComponent(actor) = wrapper.wait_for_command().await.as_ref() { assert_eq!( actor.reference, "foobar", diff --git a/nottests/command_worker_integration.rs b/nottests/command_worker_integration.rs index fff02558..2a5daa3c 100644 --- a/nottests/command_worker_integration.rs +++ b/nottests/command_worker_integration.rs @@ -43,8 +43,8 @@ async fn test_commands() { // Start an actor wrapper - .publish_command(ScaleActor { - actor_id: Some(ECHO_ACTOR_ID.to_string()), + .publish_command(ScaleComponent { + component_id: Some(ECHO_ACTOR_ID.to_string()), reference: "wasmcloud.azurecr.io/echo:0.3.4".to_string(), host_id: host_id.clone(), count: 2, @@ -172,7 +172,7 @@ async fn test_commands() { // Put a linkdef wrapper .publish_command(PutLinkdef { - actor_id: ECHO_ACTOR_ID.to_owned(), + component_id: ECHO_ACTOR_ID.to_owned(), provider_id: HTTP_SERVER_PROVIDER_ID.to_owned(), link_name: wadm::DEFAULT_LINK_NAME.to_owned(), contract_id: "wasmcloud:httpserver".to_string(), @@ -195,7 +195,7 @@ async fn test_commands() { inventory .into_iter() .find(|ld| { - ld.actor_id == ECHO_ACTOR_ID + ld.component_id == ECHO_ACTOR_ID && ld.provider_id == HTTP_SERVER_PROVIDER_ID && ld.contract_id == "wasmcloud:httpserver" }) @@ -204,7 +204,7 @@ async fn test_commands() { // Delete the linkdef wrapper .publish_command(DeleteLinkdef { - actor_id: ECHO_ACTOR_ID.to_owned(), + component_id: ECHO_ACTOR_ID.to_owned(), provider_id: HTTP_SERVER_PROVIDER_ID.to_owned(), link_name: wadm::DEFAULT_LINK_NAME.to_owned(), contract_id: "wasmcloud:httpserver".to_string(), @@ -225,7 +225,7 @@ async fn test_commands() { // We could have more than one link due to local testing, so search for the proper link assert!( !inventory.into_iter().any(|ld| { - ld.actor_id == ECHO_ACTOR_ID + ld.component_id == ECHO_ACTOR_ID && ld.provider_id == HTTP_SERVER_PROVIDER_ID && ld.contract_id == "wasmcloud:httpserver" }), @@ -262,8 +262,8 @@ async fn test_commands() { // Stop the actor wrapper - .publish_command(ScaleActor { - actor_id: Some(ECHO_ACTOR_ID.to_owned()), + .publish_command(ScaleComponent { + component_id: Some(ECHO_ACTOR_ID.to_owned()), reference: "wasmcloud.azurecr.io/echo:0.3.4".to_string(), count: 0, host_id: host_id.clone(), @@ -335,8 +335,8 @@ async fn test_annotation_stop() { // Start an actor wrapper - .publish_command(ScaleActor { - actor_id: Some(ECHO_ACTOR_ID.to_string()), + .publish_command(ScaleComponent { + component_id: Some(ECHO_ACTOR_ID.to_string()), reference: "wasmcloud.azurecr.io/echo:0.3.4".to_string(), host_id: host_id.clone(), count: 2, @@ -387,8 +387,8 @@ async fn test_annotation_stop() { // Stop the managed actors wrapper - .publish_command(ScaleActor { - actor_id: Some(ECHO_ACTOR_ID.to_owned()), + .publish_command(ScaleComponent { + component_id: Some(ECHO_ACTOR_ID.to_owned()), reference: "wasmcloud.azurecr.io/echo:0.3.4".to_string(), count: 0, host_id: host_id.clone(), diff --git a/nottests/e2e_multitenant.rs b/nottests/e2e_multitenant.rs index 84fa1ea5..13f3b227 100644 --- a/nottests/e2e_multitenant.rs +++ b/nottests/e2e_multitenant.rs @@ -172,7 +172,7 @@ async fn test_basic_separation(client_info: &ClientInfo) -> anyhow::Result<()> { .map_err(|e| anyhow::anyhow!("{e:?}"))?; if !links.iter().any(|ld| { - ld.actor_id == ECHO_ACTOR_ID + ld.component_id == ECHO_ACTOR_ID && ld.provider_id == HTTP_SERVER_PROVIDER_ID && ld.contract_id == "wasmcloud:httpserver" }) { @@ -207,7 +207,7 @@ async fn test_basic_separation(client_info: &ClientInfo) -> anyhow::Result<()> { .map_err(|e| anyhow::anyhow!("{e:?}"))?; if !links.iter().any(|ld| { - ld.actor_id == MESSAGE_PUB_ACTOR_ID + ld.component_id == MESSAGE_PUB_ACTOR_ID && ld.provider_id == HTTP_SERVER_PROVIDER_ID && ld.contract_id == "wasmcloud:httpserver" }) { @@ -217,7 +217,7 @@ async fn test_basic_separation(client_info: &ClientInfo) -> anyhow::Result<()> { ) } if !links.iter().any(|ld| { - ld.actor_id == MESSAGE_PUB_ACTOR_ID + ld.component_id == MESSAGE_PUB_ACTOR_ID && ld.provider_id == NATS_PROVIDER_ID && ld.contract_id == "wasmcloud:messaging" }) { @@ -247,7 +247,7 @@ async fn test_basic_separation(client_info: &ClientInfo) -> anyhow::Result<()> { .map_err(|e| anyhow::anyhow!("{e:?}"))?; if links.iter().any(|ld| { - ld.actor_id == ECHO_ACTOR_ID + ld.component_id == ECHO_ACTOR_ID && ld.provider_id == HTTP_SERVER_PROVIDER_ID && ld.contract_id == "wasmcloud:httpserver" }) { @@ -279,7 +279,7 @@ async fn test_basic_separation(client_info: &ClientInfo) -> anyhow::Result<()> { .map_err(|e| anyhow::anyhow!("{e:?}"))?; if links.iter().any(|ld| { - ld.actor_id == MESSAGE_PUB_ACTOR_ID + ld.component_id == MESSAGE_PUB_ACTOR_ID && ld.provider_id == HTTP_SERVER_PROVIDER_ID && ld.contract_id == "wasmcloud:httpserver" }) { @@ -289,7 +289,7 @@ async fn test_basic_separation(client_info: &ClientInfo) -> anyhow::Result<()> { ) } if links.iter().any(|ld| { - ld.actor_id == MESSAGE_PUB_ACTOR_ID + ld.component_id == MESSAGE_PUB_ACTOR_ID && ld.provider_id == NATS_PROVIDER_ID && ld.contract_id == "wasmcloud:messaging" }) { @@ -302,9 +302,14 @@ async fn test_basic_separation(client_info: &ClientInfo) -> anyhow::Result<()> { check_status(&stream, LATTICE_EAST, "echo-simple", StatusType::Deployed) .await .unwrap(); - check_status(&stream, LATTICE_WEST, "messaging-simple", StatusType::Deployed) - .await - .unwrap(); + check_status( + &stream, + LATTICE_WEST, + "messaging-simple", + StatusType::Deployed, + ) + .await + .unwrap(); Ok(()) }) diff --git a/nottests/e2e_upgrades.rs b/nottests/e2e_upgrades.rs index 08afd4f9..02edbd2c 100644 --- a/nottests/e2e_upgrades.rs +++ b/nottests/e2e_upgrades.rs @@ -149,7 +149,7 @@ async fn test_upgrade(client_info: &ClientInfo) { println!("Links: {:?}", links); if !links.iter().any(|ld| { - ld.actor_id == ECHO_ACTOR_ID + ld.component_id == ECHO_ACTOR_ID && ld.provider_id == HTTP_SERVER_PROVIDER_ID && ld.contract_id == "wasmcloud:httpserver" && ld @@ -165,7 +165,7 @@ async fn test_upgrade(client_info: &ClientInfo) { } if !links.iter().any(|ld| { - ld.actor_id == KV_COUNTER_ACTOR_ID + ld.component_id == KV_COUNTER_ACTOR_ID && ld.provider_id == KV_REDIS_PROVIDER_ID && ld.contract_id == "wasmcloud:keyvalue" && ld @@ -266,7 +266,7 @@ async fn test_upgrade(client_info: &ClientInfo) { .map_err(|e| anyhow::anyhow!("{e:?}"))?; if !links.iter().any(|ld| { - ld.actor_id == ECHO_ACTOR_ID + ld.component_id == ECHO_ACTOR_ID && ld.provider_id == HTTP_SERVER_PROVIDER_ID && ld.contract_id == "wasmcloud:httpserver" && ld @@ -282,7 +282,7 @@ async fn test_upgrade(client_info: &ClientInfo) { } if links.iter().any(|ld| { - ld.actor_id == KV_COUNTER_ACTOR_ID + ld.component_id == KV_COUNTER_ACTOR_ID && ld.provider_id == KV_REDIS_PROVIDER_ID && ld.contract_id == "wasmcloud:keyvalue" }) { @@ -378,7 +378,7 @@ async fn test_upgrade(client_info: &ClientInfo) { .map_err(|e| anyhow::anyhow!("{e:?}"))?; if !links.iter().any(|ld| { - ld.actor_id == ECHO_ACTOR_ID + ld.component_id == ECHO_ACTOR_ID && ld.provider_id == HTTP_SERVER_PROVIDER_ID && ld.contract_id == "wasmcloud:httpserver" && ld @@ -394,7 +394,7 @@ async fn test_upgrade(client_info: &ClientInfo) { } if links.iter().any(|ld| { - ld.actor_id == KV_COUNTER_ACTOR_ID + ld.component_id == KV_COUNTER_ACTOR_ID && ld.provider_id == KV_REDIS_PROVIDER_ID && ld.contract_id == "wasmcloud:keyvalue" }) { diff --git a/nottests/event_consumer_integration.rs b/nottests/event_consumer_integration.rs index 11d4928e..e961e97c 100644 --- a/nottests/event_consumer_integration.rs +++ b/nottests/event_consumer_integration.rs @@ -146,9 +146,9 @@ async fn test_event_stream() -> Result<()> { let mut evt = wait_for_event(&mut stream, LINK_OPERATION_TIMEOUT_DURATION).await; if let Event::LinkdefSet(link) = evt.as_ref() { assert_eq!( - link.linkdef.actor_id, ECHO_ACTOR_ID, + link.linkdef.component_id, ECHO_ACTOR_ID, "Expected to get a linkdef event for the right actor and provider, got actor ID: {}", - link.linkdef.actor_id, + link.linkdef.component_id, ); assert_eq!( link.linkdef.provider_id, HTTP_SERVER_PROVIDER_ID, @@ -178,9 +178,9 @@ async fn test_event_stream() -> Result<()> { let mut evt = wait_for_event(&mut stream, LINK_OPERATION_TIMEOUT_DURATION).await; if let Event::LinkdefDeleted(link) = evt.as_ref() { assert_eq!( - link.linkdef.actor_id, ECHO_ACTOR_ID, + link.linkdef.component_id, ECHO_ACTOR_ID, "Expected to get a linkdef event for the right actor and provider, got actor ID: {}", - link.linkdef.actor_id, + link.linkdef.component_id, ); assert_eq!( link.linkdef.provider_id, HTTP_SERVER_PROVIDER_ID, diff --git a/oam/README.md b/oam/README.md index 86454a53..e7a082fc 100644 --- a/oam/README.md +++ b/oam/README.md @@ -31,7 +31,7 @@ metadata: spec: components: - name: userinfo - type: actor + type: component properties: image: wasmcloud.azurecr.io/fake:1 traits: diff --git a/oam/custom.yaml b/oam/custom.yaml index e32b4f2a..d721ce21 100644 --- a/oam/custom.yaml +++ b/oam/custom.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: userinfo - type: actor + type: component properties: image: wasmcloud.azurecr.io/fake:1 traits: diff --git a/oam/echo.yaml b/oam/echo.yaml index 00f0abb2..49485a28 100644 --- a/oam/echo.yaml +++ b/oam/echo.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: echo - type: actor + type: component properties: image: wasmcloud.azurecr.io/echo:0.3.7 traits: diff --git a/oam/kvcounter.yaml b/oam/kvcounter.yaml index cf010dca..071dc446 100644 --- a/oam/kvcounter.yaml +++ b/oam/kvcounter.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: kvcounter - type: actor + type: component properties: image: file:///Users/brooks/github.com/wasmcloud/wadm/kvc/build/http_hello_world_s.wasm traits: diff --git a/oam/kvcounter_old.yaml b/oam/kvcounter_old.yaml index d50fc5b8..0b605019 100644 --- a/oam/kvcounter_old.yaml +++ b/oam/kvcounter_old.yaml @@ -9,7 +9,7 @@ metadata: spec: components: - name: kvcounter - type: actor + type: component properties: image: file://./build/http_hello_world_s.wasm traits: diff --git a/oam/oam.schema.json b/oam/oam.schema.json index e157aeb3..611091af 100644 --- a/oam/oam.schema.json +++ b/oam/oam.schema.json @@ -56,7 +56,7 @@ "type": "object", "anyOf": [ { - "$ref": "#/definitions/actorComponent" + "$ref": "#/definitions/wasmComponent" }, { "$ref": "#/definitions/providerComponent" @@ -107,22 +107,22 @@ "required": ["name", "type"], "additionalProperties": false }, - "actorComponent": { + "wasmComponent": { "type": "object", - "description": "This section defines the instances of actors to create with this application configuration.", + "description": "This section defines the instances of components to create with this application configuration.", "properties": { "name": { "type": "string", - "description": "The name of the actor to create an instance of." + "description": "The name of the component to create an instance of." }, "type": { - "description": "The type of instance : actor.", - "const": "actor" + "description": "The type of instance : component.", + "const": "component" }, "properties": { "type": "object", "description": "Overrides of parameters that are exposed by the application scope type defined in 'type'.", - "$ref": "#/definitions/actorProperties" + "$ref": "#/definitions/componentProperties" }, "traits": { "type": "array", @@ -163,19 +163,19 @@ "required": ["name", "type", "properties"], "additionalProperties": true }, - "actorProperties": { + "componentProperties": { "type": "object", "description": "Values supplied to parameters that are used to override the parameters exposed by other types.", "properties": { "image": { "type": "string", - "description": "The image reference to use for the actor.", + "description": "The image reference to use for the component.", "$comment": "Some systems have upper bounds for name length. Do we limit here?", "maxLength": 512 }, "id": { "type": "string", - "description": "The component identifier to use for the actor. Will be autogenerated if not supplied.", + "description": "The component identifier to use for the component. Will be autogenerated if not supplied.", "maxLength": 64 } }, diff --git a/oam/simple1.yaml b/oam/simple1.yaml index 916c5586..21056631 100644 --- a/oam/simple1.yaml +++ b/oam/simple1.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: userinfo - type: actor + type: component properties: image: wasmcloud.azurecr.io/fake:1 traits: diff --git a/oam/simple2.yaml b/oam/simple2.yaml index 20499c82..88860e37 100644 --- a/oam/simple2.yaml +++ b/oam/simple2.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: userinfo - type: actor + type: component properties: image: wasmcloud.azurecr.io/fake:1 traits: diff --git a/src/commands/mod.rs b/src/commands/mod.rs index 4dcaa574..2b5def0a 100644 --- a/src/commands/mod.rs +++ b/src/commands/mod.rs @@ -27,7 +27,7 @@ macro_rules! from_impl { /// All possible compensatory commands for a lattice #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] pub enum Command { - ScaleActor(ScaleActor), + ScaleComponent(ScaleComponent), StartProvider(StartProvider), StopProvider(StopProvider), PutLink(PutLink), @@ -78,15 +78,15 @@ impl Command { } } -/// Struct for the ScaleActor command +/// Struct for the ScaleComponent command #[derive(Clone, Debug, Serialize, Deserialize, Default, Eq)] -pub struct ScaleActor { - /// The ID of the actor to scale. This should be computed by wadm as a combination - /// of the manifest name and the actor name. - pub actor_id: String, - /// The host id on which to scale the actors +pub struct ScaleComponent { + /// The ID of the component to scale. This should be computed by wadm as a combination + /// of the manifest name and the component name. + pub component_id: String, + /// The host id on which to scale the components pub host_id: String, - /// The number of actors to scale to + /// The number of components to scale to pub count: u32, /// The OCI or bindle reference to scale pub reference: String, @@ -96,11 +96,11 @@ pub struct ScaleActor { pub annotations: BTreeMap, } -from_impl!(ScaleActor); +from_impl!(ScaleComponent); -impl PartialEq for ScaleActor { +impl PartialEq for ScaleComponent { fn eq(&self, other: &Self) -> bool { - self.actor_id == other.actor_id + self.component_id == other.component_id && self.host_id == other.host_id && self.count == other.count && self.model_name == other.model_name @@ -116,7 +116,7 @@ pub struct StartProvider { /// The ID of the provider to scale. This should be computed by wadm as a combination /// of the manifest name and the provider name. pub provider_id: String, - /// The host id on which to start the actor(s) + /// The host id on which to start the provider pub host_id: String, /// The name of the model/manifest that generated this command pub model_name: String, diff --git a/src/events/data.rs b/src/events/data.rs index 374dde44..2b6d0b30 100644 --- a/src/events/data.rs +++ b/src/events/data.rs @@ -49,7 +49,7 @@ pub struct ProviderHealthCheckInfo { } #[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq, Eq)] -pub struct ActorClaims { +pub struct ComponentClaims { pub call_alias: Option, #[serde(default)] pub expires_human: String, diff --git a/src/events/types.rs b/src/events/types.rs index d2c38cc4..b1bcebc6 100644 --- a/src/events/types.rs +++ b/src/events/types.rs @@ -307,7 +307,7 @@ pub struct ActorsStarted { pub annotations: BTreeMap, // Commented out for now because the host broken it and we actually don't use this right now // pub api_version: usize, - pub claims: ActorClaims, + pub claims: ComponentClaims, pub image_ref: String, pub count: usize, // TODO: Parse as nkey? @@ -365,10 +365,10 @@ event_impl!( #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] pub struct ComponentScaled { pub annotations: BTreeMap, - pub claims: Option, + pub claims: Option, pub image_ref: String, pub max_instances: usize, - pub actor_id: String, + pub component_id: String, #[serde(default)] pub host_id: String, } @@ -383,10 +383,10 @@ event_impl!( #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] pub struct ComponentScaleFailed { pub annotations: BTreeMap, - pub claims: Option, + pub claims: Option, pub image_ref: String, pub max_instances: usize, - pub actor_id: String, + pub component_id: String, #[serde(default)] pub host_id: String, pub error: String, diff --git a/src/model/mod.rs b/src/model/mod.rs index 4e6905a8..dec31a88 100644 --- a/src/model/mod.rs +++ b/src/model/mod.rs @@ -104,14 +104,14 @@ pub struct Component { #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] #[serde(tag = "type")] pub enum Properties { - #[serde(rename = "actor", alias = "component")] - Actor { properties: ActorProperties }, + #[serde(rename = "component", alias = "actor")] + Component { properties: ComponentProperties }, #[serde(rename = "capability")] Capability { properties: CapabilityProperties }, } #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] -pub struct ActorProperties { +pub struct ComponentProperties { /// The image reference to use pub image: String, /// The component ID to use for this actor. If not supplied, it will be generated @@ -218,7 +218,7 @@ pub struct Trait { impl Trait { /// Helper that creates a new linkdef type trait with the given properties - pub fn new_linkdef(props: LinkProperty) -> Trait { + pub fn new_link(props: LinkProperty) -> Trait { Trait { trait_type: LINK_TRAIT.to_owned(), properties: TraitProperty::Link(props), @@ -271,7 +271,7 @@ impl From for TraitProperty { } } -/// Properties for the config list associated with actors, providers, and link definitions +/// Properties for the config list associated with components, providers, and links /// /// ## Usage /// Defining a config block, like so: @@ -406,7 +406,7 @@ mod test { .spec .components .into_iter() - .find(|comp| matches!(comp.properties, Properties::Actor { .. })) + .find(|comp| matches!(comp.properties, Properties::Component { .. })) .expect("Should be able to find actor component"); let traits = actor_component.traits.expect("Should have Vec of traits"); assert!( @@ -492,7 +492,7 @@ mod test { .spec .components .iter() - .filter(|component| matches!(component.properties, Properties::Actor { .. })) + .filter(|component| matches!(component.properties, Properties::Component { .. })) .count(), 1, "Should have found 1 actor property" @@ -518,7 +518,7 @@ mod test { .components .clone() .into_iter() - .find(|component| matches!(component.properties, Properties::Actor { .. })) + .find(|component| matches!(component.properties, Properties::Component { .. })) .expect("Should find actor component") .traits .expect("Should have traits object"); @@ -590,13 +590,13 @@ mod test { target_config: vec![], name: Some("default".to_string()), }; - let trait_item = Trait::new_linkdef(linkdefprop); + let trait_item = Trait::new_link(linkdefprop); trait_vec.push(trait_item); let mut component_vec: Vec = Vec::new(); let component_item = Component { name: "userinfo".to_string(), - properties: Properties::Actor { - properties: ActorProperties { + properties: Properties::Component { + properties: ComponentProperties { image: "wasmcloud.azurecr.io/fake:1".to_string(), id: None, }, diff --git a/src/scaler/daemonscaler/mod.rs b/src/scaler/daemonscaler/mod.rs index a9533bdc..23b91d51 100644 --- a/src/scaler/daemonscaler/mod.rs +++ b/src/scaler/daemonscaler/mod.rs @@ -10,11 +10,11 @@ use crate::model::Spread; use crate::scaler::spreadscaler::{eligible_hosts, spreadscaler_annotations}; use crate::server::StatusInfo; use crate::{ - commands::{Command, ScaleActor}, + commands::{Command, ScaleComponent}, events::{Event, HostStarted, HostStopped}, model::{SpreadScalerProperty, TraitProperty}, scaler::Scaler, - storage::{Actor, Host, ReadStore}, + storage::{Component, Host, ReadStore}, }; pub mod provider; @@ -28,7 +28,7 @@ struct ActorSpreadConfig { /// OCI, Bindle, or File reference for an actor actor_reference: String, /// Unique component identifier for an actor - actor_id: String, + component_id: String, /// Lattice ID that this DaemonScaler monitors lattice_id: String, /// The name of the wadm model this DaemonScaler is under @@ -87,7 +87,7 @@ impl Scaler for ActorDaemonScaler { match event { // TODO: React to ComponentScaleFailed with an exponential backoff, can't just immediately retry since that // would cause a very tight loop of failures - Event::ComponentScaled(evt) if evt.actor_id == self.config.actor_id => { + Event::ComponentScaled(evt) if evt.component_id == self.config.component_id => { self.reconcile().await } Event::HostStopped(HostStopped { labels, .. }) @@ -114,15 +114,15 @@ impl Scaler for ActorDaemonScaler { async fn reconcile(&self) -> Result> { let hosts = self.store.list::(&self.config.lattice_id).await?; - let actor_id = &self.config.actor_id; - let actor = self + let component_id = &self.config.component_id; + let component = self .store - .get::(&self.config.lattice_id, actor_id) + .get::(&self.config.lattice_id, component_id) .await?; let mut spread_status = vec![]; - trace!(spread = ?self.config.spread_config.spread, ?actor_id, "Computing commands"); + trace!(spread = ?self.config.spread_config.spread, ?component_id, "Computing commands"); let commands = self .config .spread_config @@ -136,7 +136,7 @@ impl Scaler for ActorDaemonScaler { let actors_per_host = eligible_hosts .into_keys() .map(|id| { - let count = actor + let count = component .as_ref() .and_then(|actor| { actor.instances.get(&id.to_string()).map(|instances| { @@ -170,9 +170,9 @@ impl Scaler for ActorDaemonScaler { Ordering::Equal => None, // Scale actor can handle both up and down scaling Ordering::Less | Ordering::Greater => { - Some(Command::ScaleActor(ScaleActor { + Some(Command::ScaleComponent(ScaleComponent { reference: self.config.actor_reference.to_owned(), - actor_id: actor_id.to_owned(), + component_id: component_id.to_owned(), host_id: host_id.to_string(), count: self.config.spread_config.instances as u32, model_name: self.config.model_name.to_owned(), @@ -238,13 +238,13 @@ impl ActorDaemonScaler { pub fn new( store: S, actor_reference: String, - actor_id: String, + component_id: String, lattice_id: String, model_name: String, spread_config: SpreadScalerProperty, component_name: &str, ) -> Self { - let id = format!("{ACTOR_DAEMON_SCALER_TYPE}-{model_name}-{component_name}-{actor_id}"); + let id = format!("{ACTOR_DAEMON_SCALER_TYPE}-{model_name}-{component_name}-{component_id}"); // If no spreads are specified, an empty spread is sufficient to match _every_ host // in a lattice let spread_config = if spread_config.spread.is_empty() { @@ -259,7 +259,7 @@ impl ActorDaemonScaler { store, config: ActorSpreadConfig { actor_reference, - actor_id, + component_id, lattice_id, spread_config, model_name, @@ -290,7 +290,7 @@ mod test { model::{Spread, SpreadScalerProperty}, scaler::{daemonscaler::ActorDaemonScaler, manager::ScalerManager, Scaler}, server::StatusType, - storage::{Actor, Host, Store, WadmActorInfo}, + storage::{Component, Host, Store, WadmComponentInfo}, test_util::{NoopPublisher, TestLatticeSource, TestStore}, workers::{CommandPublisher, EventWorker, StatusPublisher}, }; @@ -301,7 +301,7 @@ mod test { async fn can_compute_spread_commands() -> Result<()> { let lattice_id = "one_host"; let actor_reference = "fakecloud.azurecr.io/echo:0.3.4".to_string(); - let actor_id = "fakecloud_azurecr_io_echo_0_3_4".to_string(); + let component_id = "fakecloud_azurecr_io_echo_0_3_4".to_string(); let host_id = "NASDASDIMAREALHOST"; let store = Arc::new(TestStore::default()); @@ -312,7 +312,7 @@ mod test { lattice_id, host_id.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::new(), providers: HashSet::new(), @@ -354,7 +354,7 @@ mod test { let daemonscaler = ActorDaemonScaler::new( store.clone(), actor_reference.to_string(), - actor_id.to_string(), + component_id.to_string(), lattice_id.to_string(), MODEL_NAME.to_string(), complex_spread, @@ -363,32 +363,32 @@ mod test { let cmds = daemonscaler.reconcile().await?; assert_eq!(cmds.len(), 4); - assert!(cmds.contains(&Command::ScaleActor(ScaleActor { - actor_id: actor_id.to_string(), + assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent { + component_id: component_id.to_string(), reference: actor_reference.to_string(), host_id: host_id.to_string(), count: 13, model_name: MODEL_NAME.to_string(), annotations: spreadscaler_annotations("ComplexOne", daemonscaler.id()) }))); - assert!(cmds.contains(&Command::ScaleActor(ScaleActor { - actor_id: actor_id.to_string(), + assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent { + component_id: component_id.to_string(), reference: actor_reference.to_string(), host_id: host_id.to_string(), count: 13, model_name: MODEL_NAME.to_string(), annotations: spreadscaler_annotations("ComplexTwo", daemonscaler.id()) }))); - assert!(cmds.contains(&Command::ScaleActor(ScaleActor { - actor_id: actor_id.to_string(), + assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent { + component_id: component_id.to_string(), reference: actor_reference.to_string(), host_id: host_id.to_string(), count: 13, model_name: MODEL_NAME.to_string(), annotations: spreadscaler_annotations("ComplexThree", daemonscaler.id()) }))); - assert!(cmds.contains(&Command::ScaleActor(ScaleActor { - actor_id: actor_id.to_string(), + assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent { + component_id: component_id.to_string(), reference: actor_reference.to_string(), host_id: host_id.to_string(), count: 13, @@ -493,7 +493,7 @@ mod test { .store( lattice_id, echo_id.to_string(), - Actor { + Component { id: echo_id.to_string(), name: "Echo".to_string(), issuer: "AASDASDASDASD".to_string(), @@ -501,7 +501,7 @@ mod test { ( host_id_one.to_string(), // One instance on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 1, annotations: spreadscaler_annotations( "RunInFakeCloud", @@ -512,7 +512,7 @@ mod test { ( host_id_two.to_string(), // 103 instances on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 103, annotations: spreadscaler_annotations( "RunInRealCloud", @@ -523,7 +523,7 @@ mod test { ( host_id_three.to_string(), // 400 instances on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 400, annotations: spreadscaler_annotations( "RunInPurgatoryCloud", @@ -541,7 +541,7 @@ mod test { .store( lattice_id, blobby_id.to_string(), - Actor { + Component { id: blobby_id.to_string(), name: "Blobby".to_string(), issuer: "AASDASDASDASD".to_string(), @@ -549,7 +549,7 @@ mod test { ( host_id_one.to_string(), // 3 instances on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 3, annotations: spreadscaler_annotations( "CrossRegionCustom", @@ -560,7 +560,7 @@ mod test { ( host_id_two.to_string(), // 19 instances on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 19, annotations: spreadscaler_annotations( "CrossRegionReal", @@ -579,7 +579,7 @@ mod test { lattice_id, host_id_one.to_string(), Host { - actors: HashMap::from_iter([ + components: HashMap::from_iter([ (echo_id.to_string(), 1), (blobby_id.to_string(), 3), ("MSOMEOTHERACTOR".to_string(), 3), @@ -603,7 +603,7 @@ mod test { lattice_id, host_id_two.to_string(), Host { - actors: HashMap::from_iter([ + components: HashMap::from_iter([ (echo_id.to_string(), 103), (blobby_id.to_string(), 19), ]), @@ -627,7 +627,7 @@ mod test { lattice_id, host_id_three.to_string(), Host { - actors: HashMap::from_iter([(echo_id.to_string(), 400)]), + components: HashMap::from_iter([(echo_id.to_string(), 400)]), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "purgatory".to_string()), @@ -649,7 +649,7 @@ mod test { for cmd in cmds.iter() { match cmd { - Command::ScaleActor(scale) => + Command::ScaleComponent(scale) => { #[allow(clippy::if_same_then_else)] if scale.host_id == *host_id_one { @@ -670,7 +670,7 @@ mod test { let mut cmds = blobby_daemonscaler.reconcile().await?; assert_eq!(cmds.len(), 2); cmds.sort_by(|a, b| match (a, b) { - (Command::ScaleActor(a), Command::ScaleActor(b)) => a.host_id.cmp(&b.host_id), + (Command::ScaleComponent(a), Command::ScaleComponent(b)) => a.host_id.cmp(&b.host_id), _ => panic!("Unexpected command in daemonscaler list"), }); @@ -679,14 +679,14 @@ mod test { cmds_iter.next().expect("one command"), cmds_iter.next().expect("two commands"), ) { - (Command::ScaleActor(scale1), Command::ScaleActor(scale2)) => { + (Command::ScaleComponent(scale1), Command::ScaleComponent(scale2)) => { assert_eq!(scale1.host_id, host_id_three.to_string()); assert_eq!(scale1.count, 3); assert_eq!(scale1.reference, blobby_ref); assert_eq!(scale2.host_id, host_id_two.to_string()); assert_eq!(scale2.count, 3); - assert_eq!(scale2.actor_id, blobby_id.to_string()); + assert_eq!(scale2.component_id, blobby_id.to_string()); } _ => panic!("Unexpected commands in daemonscaler list"), } @@ -769,7 +769,7 @@ mod test { .store( lattice_id, blobby_id.to_string(), - Actor { + Component { id: blobby_id.to_string(), name: "Blobby".to_string(), issuer: "AASDASDASDASD".to_string(), @@ -777,7 +777,7 @@ mod test { ( host_id_one.to_string(), // 10 instances on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 10, annotations: spreadscaler_annotations( "HighAvailability", @@ -788,7 +788,7 @@ mod test { ( host_id_two.to_string(), // 10 instances on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 10, annotations: spreadscaler_annotations( "HighAvailability", @@ -812,7 +812,7 @@ mod test { lattice_id, host_id_one.to_string(), Host { - actors: HashMap::from_iter([ + components: HashMap::from_iter([ (blobby_id.to_string(), 10), ("MSOMEOTHERACTOR".to_string(), 3), ]), @@ -835,7 +835,7 @@ mod test { lattice_id, host_id_two.to_string(), Host { - actors: HashMap::from_iter([(blobby_id.to_string(), 10)]), + components: HashMap::from_iter([(blobby_id.to_string(), 10)]), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "real".to_string()), @@ -924,7 +924,7 @@ mod test { for cmd in cmds.iter() { match cmd { - Command::ScaleActor(scale) => { + Command::ScaleComponent(scale) => { assert_eq!(scale.host_id, host_id_three.to_string()); assert_eq!(scale.count, 10); assert_eq!(scale.reference, blobby_ref); @@ -941,7 +941,7 @@ mod test { .store( lattice_id, blobby_id.to_string(), - Actor { + Component { id: blobby_id.to_string(), name: "Blobby".to_string(), issuer: "AASDASDASDASD".to_string(), @@ -949,7 +949,7 @@ mod test { ( host_id_one.to_string(), // 10 instances on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 10, annotations: spreadscaler_annotations( "HighAvailability", @@ -960,7 +960,7 @@ mod test { ( host_id_two.to_string(), // 10 instances on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 10, annotations: spreadscaler_annotations( "HighAvailability", diff --git a/src/scaler/daemonscaler/provider.rs b/src/scaler/daemonscaler/provider.rs index 31270e42..0177d25f 100644 --- a/src/scaler/daemonscaler/provider.rs +++ b/src/scaler/daemonscaler/provider.rs @@ -342,7 +342,7 @@ mod test { lattice_id, host_id_one.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("inda".to_string(), "cloud".to_string()), @@ -363,7 +363,7 @@ mod test { lattice_id, host_id_two.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("inda".to_string(), "cloud".to_string()), diff --git a/src/scaler/manager.rs b/src/scaler/manager.rs index afd2f081..33cdf55f 100644 --- a/src/scaler/manager.rs +++ b/src/scaler/manager.rs @@ -21,7 +21,7 @@ use tracing::{debug, error, instrument, trace, warn}; use crate::{ events::Event, model::{ - ActorProperties, CapabilityProperties, Component, Manifest, Properties, + CapabilityProperties, Component, ComponentProperties, Manifest, Properties, SpreadScalerProperty, Trait, TraitProperty, DAEMONSCALER_TRAIT, LINK_TRAIT, SPREADSCALER_TRAIT, }, @@ -580,15 +580,15 @@ where for component in components.iter() { let traits = component.traits.as_ref(); match &component.properties { - Properties::Actor { properties: props } => { + Properties::Component { properties: props } => { scalers.extend(traits.unwrap_or(&EMPTY_TRAIT_VEC).iter().filter_map(|trt| { - let actor_id = component_id(name, props.id.as_ref(), &props.image); + let component_id = compute_component_id(name, props.id.as_ref(), &props.image); match (trt.trait_type.as_str(), &trt.properties) { (SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p)) => { Some(Box::new(ActorSpreadScaler::new( snapshot_data.clone(), props.image.to_owned(), - actor_id, + component_id, lattice_id.to_owned(), name.to_owned(), p.to_owned(), @@ -599,7 +599,7 @@ where Some(Box::new(ActorDaemonScaler::new( snapshot_data.clone(), props.image.to_owned(), - actor_id, + component_id, lattice_id.to_owned(), name.to_owned(), p.to_owned(), @@ -613,14 +613,18 @@ where Properties::Capability { properties: CapabilityProperties { id, image, .. }, } - | Properties::Actor { - properties: ActorProperties { id, image }, + | Properties::Component { + properties: ComponentProperties { id, image }, } if component.name == p.target => { Some(Box::new(LinkScaler::new( snapshot_data.clone(), LinkScalerConfig { - source_id: actor_id.to_string(), - target: component_id(name, id.as_ref(), &image), + source_id: component_id.to_string(), + target: compute_component_id( + name, + id.as_ref(), + &image, + ), wit_namespace: p.namespace.to_owned(), wit_package: p.package.to_owned(), wit_interfaces: p.interfaces.to_owned(), @@ -644,7 +648,7 @@ where })) } Properties::Capability { properties: props } => { - let provider_id = component_id(name, props.id.as_ref(), &props.image); + let provider_id = compute_component_id(name, props.id.as_ref(), &props.image); let mut scaler_specified = false; if let Some(traits) = traits { scalers.extend(traits.iter().filter_map(|trt| { @@ -696,14 +700,14 @@ where (LINK_TRAIT, TraitProperty::Link(p)) => { components.iter().find_map(|component| { match &component.properties { - Properties::Actor { properties: cappy } + Properties::Component { properties: cappy } if component.name == p.target => { Some(Box::new(LinkScaler::new( snapshot_data.clone(), LinkScalerConfig { source_id: provider_id.to_string(), - target: component_id( + target: compute_component_id( name, cappy.id.as_ref(), &cappy.image, @@ -765,7 +769,7 @@ where /// Based on the name of the model and the optionally provided ID, returns a unique ID for the /// component that is a sanitized version of the component reference and model name, separated /// by a dash. -pub(crate) fn component_id( +pub(crate) fn compute_component_id( model_name: &str, component_id: Option<&String>, component_ref: &str, @@ -787,13 +791,13 @@ pub(crate) fn component_id( #[cfg(test)] mod test { - use crate::scaler::manager::component_id; + use crate::scaler::manager::compute_component_id; #[test] fn compute_proper_component_id() { // User supplied ID always takes precedence assert_eq!( - component_id( + compute_component_id( "mymodel", Some(&"myid".to_string()), "wasmcloud.azurecr.io/echo:0.3.4" @@ -801,7 +805,7 @@ mod test { "myid" ); assert_eq!( - component_id( + compute_component_id( "some model name with spaces cause yaml", Some(&"myid".to_string()), "wasmcloud.azurecr.io/echo:0.3.4" @@ -810,12 +814,12 @@ mod test { ); // Sanitize component reference assert_eq!( - component_id("mymodel", None, "wasmcloud.azurecr.io/echo:0.3.4"), + compute_component_id("mymodel", None, "wasmcloud.azurecr.io/echo:0.3.4"), "mymodel-wasmcloud_azurecr_io_echo_0_3_4" ); // Ensure we can support spaces in the model name, because YAML strings assert_eq!( - component_id( + compute_component_id( "some model name with spaces cause yaml", None, "wasmcloud.azurecr.io/echo:0.3.4" @@ -825,7 +829,7 @@ mod test { // Ensure we can support spaces in the model name, because YAML strings // Ensure we can support lowercasing the reference as well, just in case assert_eq!( - component_id("My ThInG", None, "file:///Users/me/thing.wasm"), + compute_component_id("My ThInG", None, "file:///Users/me/thing.wasm"), "my_thing-file____users_me_thing_wasm" ); } diff --git a/src/scaler/spreadscaler/link.rs b/src/scaler/spreadscaler/link.rs index 977f8e85..dd919c00 100644 --- a/src/scaler/spreadscaler/link.rs +++ b/src/scaler/spreadscaler/link.rs @@ -83,7 +83,7 @@ where async fn handle_event(&self, event: &Event) -> Result> { match event { // Trigger linkdef creation if this actor starts and belongs to this model - Event::ComponentScaled(evt) if evt.actor_id == self.config.source_id => { + Event::ComponentScaled(evt) if evt.component_id == self.config.source_id => { self.reconcile().await } Event::ProviderHealthCheckPassed(ProviderHealthCheckPassed { @@ -166,7 +166,7 @@ where // .then(|| { // trace!("Linkdef exists, but values are different, deleting and recreating"); // vec![Command::DeleteLinkdef(DeleteLinkdef { - // actor_id: actor_id.to_owned(), + // component_id: component_id.to_owned(), // provider_id: provider_id.to_owned(), // contract_id: self.config.provider_contract_id.to_owned(), // link_name: self.config.provider_link_name.to_owned(), @@ -180,7 +180,7 @@ where // } else if !exists || values_different { // trace!("Linkdef does not exist or needs to be recreated"); // commands.push(Command::PutLinkdef(PutLinkdef { - // actor_id: actor_id.to_owned(), + // component_id: component_id.to_owned(), // provider_id: provider_id.to_owned(), // link_name: self.config.provider_link_name.to_owned(), // contract_id: self.config.provider_contract_id.to_owned(), @@ -316,7 +316,7 @@ mod test { use crate::{ events::{ComponentScaled, ProviderHealthCheckInfo, ProviderInfo}, - storage::{Actor, Host, Provider, Store}, + storage::{Component, Host, Provider, Store}, test_util::{TestLatticeSource, TestStore}, APP_SPEC_ANNOTATION, }; @@ -327,7 +327,7 @@ mod test { .store( lattice_id, "actor".to_string(), - Actor { + Component { id: "actor".to_string(), reference: actor_ref.to_owned(), ..Default::default() @@ -354,7 +354,7 @@ mod test { async fn test_id_generator() { let lattice_id = "id_generator".to_string(); let actor_ref = "actor_ref".to_string(); - let actor_id = "actor_id".to_string(); + let component_id = "component_id".to_string(); let provider_ref = "provider_ref".to_string(); let provider_id = "provider_id".to_string(); @@ -371,7 +371,7 @@ mod test { create_store(&lattice_id, &actor_ref, &provider_ref).await, LinkScalerConfig { source_id: provider_id.clone(), - target: actor_id.clone(), + target: component_id.clone(), wit_namespace: "wit_namespace".to_string(), wit_package: "wit_package".to_string(), wit_interfaces: vec!["wit_interface".to_string()], @@ -385,7 +385,7 @@ mod test { ); let id = format!( - "{LINK_SCALER_TYPE}-{model_name}-{link_name}-{provider_id}-{actor_id}-{linkscaler_values_hash}", + "{LINK_SCALER_TYPE}-{model_name}-{link_name}-{provider_id}-{component_id}-{linkscaler_values_hash}", LINK_SCALER_TYPE = LINK_SCALER_TYPE, model_name = "model", link_name = "default", @@ -395,7 +395,7 @@ mod test { assert_eq!(scaler.id(), id, "LinkScaler ID should be the same when scalers have the same type, model name, provider link name, actor reference, provider reference, and values"); let id = format!( - "{LINK_SCALER_TYPE}-{model_name}-{link_name}-{actor_id}-{provider_id}-{linkscaler_values_hash}", + "{LINK_SCALER_TYPE}-{model_name}-{link_name}-{component_id}-{provider_id}-{linkscaler_values_hash}", LINK_SCALER_TYPE = LINK_SCALER_TYPE, model_name = "model", link_name = "default", @@ -411,7 +411,7 @@ mod test { let scaler = LinkScaler::new( create_store(&lattice_id, &actor_ref, &provider_ref).await, LinkScalerConfig { - source_id: actor_id.clone(), + source_id: component_id.clone(), target: provider_id.clone(), wit_namespace: "contr".to_string(), wit_package: "act".to_string(), @@ -426,7 +426,7 @@ mod test { ); let id = format!( - "{LINK_SCALER_TYPE}-{model_name}-{link_name}-{actor_id}-{provider_id}-{linkscaler_values_hash}", + "{LINK_SCALER_TYPE}-{model_name}-{link_name}-{component_id}-{provider_id}-{linkscaler_values_hash}", LINK_SCALER_TYPE = LINK_SCALER_TYPE, model_name = "model", link_name = "default", @@ -438,7 +438,7 @@ mod test { let scaler = LinkScaler::new( create_store(&lattice_id, &actor_ref, &provider_ref).await, LinkScalerConfig { - source_id: actor_id.clone(), + source_id: component_id.clone(), target: provider_id.clone(), wit_namespace: "contr".to_string(), wit_package: "act".to_string(), @@ -465,14 +465,14 @@ mod test { async fn test_no_linkdef() { let lattice_id = "no-linkdef".to_string(); let actor_ref = "actor_ref".to_string(); - let actor_id = "actor".to_string(); + let component_id = "actor".to_string(); let provider_ref = "provider_ref".to_string(); let provider_id = "provider".to_string(); let scaler = LinkScaler::new( create_store(&lattice_id, &actor_ref, &provider_ref).await, LinkScalerConfig { - source_id: actor_id.clone(), + source_id: component_id.clone(), target: provider_id.clone(), wit_namespace: "namespace".to_string(), wit_package: "package".to_string(), @@ -503,7 +503,7 @@ mod test { // let values = HashMap::from([("foo".to_string(), "bar".to_string())]); // let mut linkdef = LinkDefinition::default(); - // linkdef.actor_id = "actor".to_string(); + // linkdef.component_id = "actor".to_string(); // linkdef.provider_id = "provider".to_string(); // linkdef.contract_id = "contract".to_string(); // linkdef.link_name = "default".to_string(); @@ -534,12 +534,12 @@ mod test { async fn test_existing_linkdef() { let lattice_id = "existing-linkdef".to_string(); let actor_ref = "actor_ref".to_string(); - let actor_id = "actor".to_string(); + let component_id = "actor".to_string(); let provider_ref = "provider_ref".to_string(); let provider_id = "provider".to_string(); let linkdef = InterfaceLinkDefinition { - source_id: actor_id.to_string(), + source_id: component_id.to_string(), target: provider_id.to_string(), wit_namespace: "namespace".to_string(), wit_package: "package".to_string(), @@ -595,7 +595,7 @@ mod test { lattice_id, host_id_one.to_string(), Host { - actors: HashMap::from_iter([(echo_id.to_string(), 1)]), + components: HashMap::from_iter([(echo_id.to_string(), 1)]), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "fake".to_string()), @@ -663,7 +663,7 @@ mod test { .store( lattice_id, echo_id.to_string(), - Actor { + Component { id: echo_id.to_string(), reference: echo_ref.to_string(), ..Default::default() @@ -680,7 +680,7 @@ mod test { )]), claims: None, image_ref: echo_ref, - actor_id: echo_id.to_string(), + component_id: echo_id.to_string(), max_instances: 1, host_id: host_id_one.to_string(), })) diff --git a/src/scaler/spreadscaler/mod.rs b/src/scaler/spreadscaler/mod.rs index a417c1f8..6018eb40 100644 --- a/src/scaler/spreadscaler/mod.rs +++ b/src/scaler/spreadscaler/mod.rs @@ -9,11 +9,11 @@ use tracing::{instrument, trace, warn}; use crate::events::HostHeartbeat; use crate::server::StatusInfo; use crate::{ - commands::{Command, ScaleActor}, + commands::{Command, ScaleComponent}, events::{Event, HostStarted, HostStopped}, model::{Spread, SpreadScalerProperty, TraitProperty, DEFAULT_SPREAD_WEIGHT}, scaler::Scaler, - storage::{Actor, Host, ReadStore}, + storage::{Component, Host, ReadStore}, SCALER_KEY, }; @@ -31,7 +31,7 @@ struct ActorSpreadConfig { /// OCI, Bindle, or File reference for an actor actor_reference: String, /// Unique component identifier for an actor - actor_id: String, + component_id: String, /// Lattice ID that this SpreadScaler monitors lattice_id: String, /// The name of the wadm model this SpreadScaler is under @@ -82,7 +82,7 @@ impl Scaler for ActorSpreadScaler { match event { // TODO: React to ComponentScaleFailed with an exponential backoff, can't just immediately retry since that // would cause a very tight loop of failures - Event::ComponentScaled(evt) if evt.actor_id == self.config.actor_id => { + Event::ComponentScaled(evt) if evt.component_id == self.config.component_id => { self.reconcile().await } Event::HostStopped(HostStopped { labels, .. }) @@ -109,10 +109,10 @@ impl Scaler for ActorSpreadScaler { async fn reconcile(&self) -> Result> { let hosts = self.store.list::(&self.config.lattice_id).await?; - let actor_id = &self.config.actor_id; - let actor = self + let component_id = &self.config.component_id; + let component = self .store - .get::(&self.config.lattice_id, actor_id) + .get::(&self.config.lattice_id, component_id) .await?; let mut spread_status = vec![]; @@ -120,7 +120,7 @@ impl Scaler for ActorSpreadScaler { // NOTE(brooksmtownsend) it's easier to assign one host per list of requirements than // balance within those requirements. Users should be specific with their requirements // as wadm is not responsible for ambiguity, a future scaler like a DaemonScaler could handle this - trace!(spread_requirements = ?self.spread_requirements, ?actor_id, "Computing commands"); + trace!(spread_requirements = ?self.spread_requirements, ?component_id, "Computing commands"); let commands = self .spread_requirements .iter() @@ -132,7 +132,7 @@ impl Scaler for ActorSpreadScaler { // Parse the instances into a map of host_id -> number of running actors managed // by this scaler. Ignoring ones where we aren't running anything - let running_actors_per_host: HashMap<&String, usize> = actor + let running_actors_per_host: HashMap<&String, usize> = component .as_ref() .map(|actor| &actor.instances) .map(|instances| { @@ -164,8 +164,8 @@ impl Scaler for ActorSpreadScaler { // Start actors to reach desired instances Ordering::Less =>{ // Right now just start on the first available host. We can be smarter about it later - Some(vec![Command::ScaleActor(ScaleActor { - actor_id: actor_id.to_owned(), + Some(vec![Command::ScaleComponent(ScaleComponent { + component_id: component_id.to_owned(), reference: self.config.actor_reference.to_owned(), // SAFETY: We already checked that the list of hosts is not empty, so we can unwrap here host_id: eligible_hosts.keys().next().unwrap().to_string(), @@ -188,8 +188,8 @@ impl Scaler for ActorSpreadScaler { // Keep track of how many we've stopped, which will be the smaller of the current // instance count or the number we need to stop current_stopped += std::cmp::min(instance_count, remaining_to_stop); - commands.push(Command::ScaleActor(ScaleActor { - actor_id: actor_id.to_owned(), + commands.push(Command::ScaleComponent(ScaleComponent { + component_id: component_id.to_owned(), reference: self.config.actor_reference.to_owned(), host_id: host_id.to_owned(), count: count as u32, @@ -253,7 +253,7 @@ impl ActorSpreadScaler { pub fn new( store: S, actor_reference: String, - actor_id: String, + component_id: String, lattice_id: String, model_name: String, spread_config: SpreadScalerProperty, @@ -266,7 +266,7 @@ impl ActorSpreadScaler { spread_requirements: compute_spread(&spread_config), config: ActorSpreadConfig { actor_reference, - actor_id, + component_id, lattice_id, spread_config, model_name, @@ -403,7 +403,7 @@ mod test { spreadscaler::{spreadscaler_annotations, ActorSpreadScaler}, Scaler, }, - storage::{Actor, Host, Store, WadmActorInfo}, + storage::{Component, Host, Store, WadmComponentInfo}, test_util::{NoopPublisher, TestLatticeSource, TestStore}, workers::{CommandPublisher, EventWorker, StatusPublisher}, }; @@ -564,7 +564,7 @@ mod test { async fn can_compute_spread_commands() -> Result<()> { let lattice_id = "hoohah_multi_stop_actor"; let actor_reference = "fakecloud.azurecr.io/echo:0.3.4".to_string(); - let actor_id = "fakecloud_azurecr_io_echo_0_3_4".to_string(); + let component_id = "fakecloud_azurecr_io_echo_0_3_4".to_string(); let host_id = "NASDASDIMAREALHOST"; let store = Arc::new(TestStore::default()); @@ -575,7 +575,7 @@ mod test { lattice_id, host_id.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::new(), providers: HashSet::new(), @@ -621,7 +621,7 @@ mod test { let spreadscaler = ActorSpreadScaler::new( store.clone(), actor_reference.to_string(), - actor_id.to_string(), + component_id.to_string(), lattice_id.to_string(), MODEL_NAME.to_string(), complex_spread, @@ -630,24 +630,24 @@ mod test { let cmds = spreadscaler.reconcile().await?; assert_eq!(cmds.len(), 3); - assert!(cmds.contains(&Command::ScaleActor(ScaleActor { - actor_id: actor_id.to_string(), + assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent { + component_id: component_id.to_string(), reference: actor_reference.to_string(), host_id: host_id.to_string(), count: 10, model_name: MODEL_NAME.to_string(), annotations: spreadscaler_annotations("ComplexOne", spreadscaler.id()) }))); - assert!(cmds.contains(&Command::ScaleActor(ScaleActor { - actor_id: actor_id.to_string(), + assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent { + component_id: component_id.to_string(), reference: actor_reference.to_string(), host_id: host_id.to_string(), count: 8, model_name: MODEL_NAME.to_string(), annotations: spreadscaler_annotations("ComplexThree", spreadscaler.id()) }))); - assert!(cmds.contains(&Command::ScaleActor(ScaleActor { - actor_id: actor_id.to_string(), + assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent { + component_id: component_id.to_string(), reference: actor_reference.to_string(), host_id: host_id.to_string(), count: 85, @@ -752,7 +752,7 @@ mod test { .store( lattice_id, echo_id.to_string(), - Actor { + Component { id: echo_id.to_string(), name: "Echo".to_string(), issuer: "AASDASDASDASD".to_string(), @@ -760,7 +760,7 @@ mod test { ( host_id_one.to_string(), // One instance on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { annotations: spreadscaler_annotations( "RunInFakeCloud", echo_spreadscaler.id(), @@ -771,7 +771,7 @@ mod test { ( host_id_two.to_string(), // 103 instances on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { annotations: spreadscaler_annotations( "RunInRealCloud", echo_spreadscaler.id(), @@ -782,7 +782,7 @@ mod test { ( host_id_three.to_string(), // 400 instances on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { annotations: spreadscaler_annotations( "RunInPurgatoryCloud", echo_spreadscaler.id(), @@ -800,7 +800,7 @@ mod test { .store( lattice_id, blobby_id.to_string(), - Actor { + Component { id: blobby_id.to_string(), name: "Blobby".to_string(), issuer: "AASDASDASDASD".to_string(), @@ -808,7 +808,7 @@ mod test { ( host_id_one.to_string(), // 3 instances on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 3, annotations: spreadscaler_annotations( "CrossRegionCustom", @@ -819,7 +819,7 @@ mod test { ( host_id_two.to_string(), // 19 instances on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 19, annotations: spreadscaler_annotations( "CrossRegionReal", @@ -838,7 +838,7 @@ mod test { lattice_id, host_id_one.to_string(), Host { - actors: HashMap::from_iter([ + components: HashMap::from_iter([ (echo_id.to_string(), 1), (blobby_id.to_string(), 3), ("MSOMEOTHERACTOR".to_string(), 3), @@ -862,7 +862,7 @@ mod test { lattice_id, host_id_two.to_string(), Host { - actors: HashMap::from_iter([ + components: HashMap::from_iter([ (echo_id.to_string(), 103), (blobby_id.to_string(), 19), ]), @@ -886,7 +886,7 @@ mod test { lattice_id, host_id_three.to_string(), Host { - actors: HashMap::from_iter([(echo_id.to_string(), 400)]), + components: HashMap::from_iter([(echo_id.to_string(), 400)]), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "purgatory".to_string()), @@ -906,7 +906,7 @@ mod test { let mut cmds = echo_spreadscaler.reconcile().await?; assert_eq!(cmds.len(), 2); cmds.sort_by(|a, b| match (a, b) { - (Command::ScaleActor(a), Command::ScaleActor(b)) => a.host_id.cmp(&b.host_id), + (Command::ScaleComponent(a), Command::ScaleComponent(b)) => a.host_id.cmp(&b.host_id), _ => panic!("Unexpected commands in spreadscaler list"), }); @@ -915,14 +915,14 @@ mod test { cmds_iter.next().expect("one scale command"), cmds_iter.next().expect("two scale commands"), ) { - (Command::ScaleActor(scale1), Command::ScaleActor(scale2)) => { + (Command::ScaleComponent(scale1), Command::ScaleComponent(scale2)) => { assert_eq!(scale1.host_id, host_id_one.to_string()); assert_eq!(scale1.count, 206); assert_eq!(scale1.reference, echo_ref); assert_eq!(scale2.host_id, host_id_three.to_string()); assert_eq!(scale2.count, 103); - assert_eq!(scale2.actor_id, echo_id.to_string()); + assert_eq!(scale2.component_id, echo_id.to_string()); } _ => panic!("Unexpected commands in spreadscaler list"), } @@ -930,7 +930,7 @@ mod test { let mut cmds = blobby_spreadscaler.reconcile().await?; assert_eq!(cmds.len(), 2); cmds.sort_by(|a, b| match (a, b) { - (Command::ScaleActor(a), Command::ScaleActor(b)) => a.host_id.cmp(&b.host_id), + (Command::ScaleComponent(a), Command::ScaleComponent(b)) => a.host_id.cmp(&b.host_id), _ => panic!("Unexpected commands in spreadscaler list"), }); @@ -939,14 +939,14 @@ mod test { cmds_iter.next().expect("one scale command"), cmds_iter.next().expect("two scale commands"), ) { - (Command::ScaleActor(scale1), Command::ScaleActor(scale2)) => { + (Command::ScaleComponent(scale1), Command::ScaleComponent(scale2)) => { assert_eq!(scale1.host_id, host_id_three.to_string()); assert_eq!(scale1.count, 3); assert_eq!(scale1.reference, blobby_ref); assert_eq!(scale2.host_id, host_id_two.to_string()); assert_eq!(scale2.count, 3); - assert_eq!(scale2.actor_id, blobby_id.to_string()); + assert_eq!(scale2.component_id, blobby_id.to_string()); } _ => panic!("Unexpected commands in spreadscaler list"), } @@ -958,7 +958,7 @@ mod test { async fn can_handle_multiple_spread_matches() -> Result<()> { let lattice_id = "multiple_spread_matches"; let actor_reference = "fakecloud.azurecr.io/echo:0.3.4".to_string(); - let actor_id = "fakecloud_azurecr_io_echo_0_3_4".to_string(); + let component_id = "fakecloud_azurecr_io_echo_0_3_4".to_string(); let host_id = "NASDASDIMAREALHOST"; let store = Arc::new(TestStore::default()); @@ -986,7 +986,7 @@ mod test { let spreadscaler = ActorSpreadScaler::new( store.clone(), actor_reference.to_string(), - actor_id.to_string(), + component_id.to_string(), lattice_id.to_string(), MODEL_NAME.to_string(), real_spread, @@ -999,7 +999,7 @@ mod test { lattice_id, host_id.to_string(), Host { - actors: HashMap::from_iter([(actor_id.to_string(), 10)]), + components: HashMap::from_iter([(component_id.to_string(), 10)]), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("region".to_string(), "east".to_string()), @@ -1017,15 +1017,15 @@ mod test { store .store( lattice_id, - actor_id.to_string(), - Actor { - id: actor_id.to_string(), + component_id.to_string(), + Component { + id: component_id.to_string(), name: "Faketor".to_string(), issuer: "AASDASDASDASD".to_string(), instances: HashMap::from_iter([( host_id.to_string(), // 10 instances on this host under the first spread - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 10, annotations: spreadscaler_annotations("SimpleOne", spreadscaler.id()), }]), @@ -1039,16 +1039,16 @@ mod test { assert_eq!(cmds.len(), 2); // Should be enforcing 10 instances per spread - assert!(cmds.contains(&Command::ScaleActor(ScaleActor { - actor_id: "fakecloud_azurecr_io_echo_0_3_4".to_string(), + assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent { + component_id: "fakecloud_azurecr_io_echo_0_3_4".to_string(), reference: actor_reference.to_string(), host_id: host_id.to_string(), count: 15, model_name: MODEL_NAME.to_string(), annotations: spreadscaler_annotations("SimpleOne", spreadscaler.id()) }))); - assert!(cmds.contains(&Command::ScaleActor(ScaleActor { - actor_id: "fakecloud_azurecr_io_echo_0_3_4".to_string(), + assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent { + component_id: "fakecloud_azurecr_io_echo_0_3_4".to_string(), reference: actor_reference.to_string(), host_id: host_id.to_string(), count: 5, @@ -1063,7 +1063,7 @@ mod test { async fn calculates_proper_scale_commands() -> Result<()> { let lattice_id = "calculates_proper_scale_commands"; let actor_reference = "fakecloud.azurecr.io/echo:0.3.4".to_string(); - let actor_id = "fakecloud_azurecr_io_echo_0_3_4".to_string(); + let component_id = "fakecloud_azurecr_io_echo_0_3_4".to_string(); let host_id = "NASDASDIMAREALHOST"; let host_id2 = "NASDASDIMAREALHOST2"; @@ -1078,7 +1078,7 @@ mod test { let spreadscaler = ActorSpreadScaler::new( store.clone(), actor_reference.to_string(), - actor_id.to_string(), + component_id.to_string(), lattice_id.to_string(), MODEL_NAME.to_string(), real_spread, @@ -1093,7 +1093,7 @@ mod test { ( host_id.to_string(), Host { - actors: HashMap::from_iter([(actor_id.to_string(), 10)]), + components: HashMap::from_iter([(component_id.to_string(), 10)]), friendly_name: "hey".to_string(), labels: HashMap::new(), @@ -1107,7 +1107,7 @@ mod test { ( host_id2.to_string(), Host { - actors: HashMap::from_iter([(actor_id.to_string(), 10)]), + components: HashMap::from_iter([(component_id.to_string(), 10)]), friendly_name: "hey2".to_string(), labels: HashMap::new(), @@ -1125,22 +1125,22 @@ mod test { store .store( lattice_id, - actor_id.to_string(), - Actor { - id: actor_id.to_string(), + component_id.to_string(), + Component { + id: component_id.to_string(), name: "Faketor".to_string(), issuer: "AASDASDASDASD".to_string(), instances: HashMap::from_iter([ ( host_id.to_string(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 10, annotations: spreadscaler_annotations("default", spreadscaler.id()), }]), ), ( host_id2.to_string(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 10, annotations: spreadscaler_annotations("default", spreadscaler.id()), }]), @@ -1157,7 +1157,7 @@ mod test { assert_eq!(cmds.len(), 2); assert!( cmds.iter().any(|command| { - if let Command::ScaleActor(actor) = command { + if let Command::ScaleComponent(actor) = command { actor.host_id == host_id } else { false @@ -1167,7 +1167,7 @@ mod test { ); assert!( cmds.iter().any(|command| { - if let Command::ScaleActor(actor) = command { + if let Command::ScaleComponent(actor) = command { actor.host_id == host_id2 } else { false @@ -1180,16 +1180,16 @@ mod test { let cmds = spreadscaler.cleanup().await?; // Should stop 10 on each host - assert!(cmds.contains(&Command::ScaleActor(ScaleActor { - actor_id: actor_id.clone(), + assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent { + component_id: component_id.clone(), reference: actor_reference.clone(), host_id: host_id.to_string(), count: 0, model_name: MODEL_NAME.to_string(), annotations: spreadscaler_annotations("default", spreadscaler.id()) }))); - assert!(cmds.contains(&Command::ScaleActor(ScaleActor { - actor_id: actor_id.clone(), + assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent { + component_id: component_id.clone(), reference: actor_reference.clone(), host_id: host_id2.to_string(), count: 0, @@ -1274,7 +1274,7 @@ mod test { .store( lattice_id, blobby_id.to_string(), - Actor { + Component { id: blobby_id.to_string(), name: "Blobby".to_string(), issuer: "AASDASDASDASD".to_string(), @@ -1282,7 +1282,7 @@ mod test { ( host_id_one.to_string(), // 3 instances on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 3, annotations: spreadscaler_annotations( "CrossRegionCustom", @@ -1293,7 +1293,7 @@ mod test { ( host_id_two.to_string(), // 19 instances on this host - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 19, annotations: spreadscaler_annotations( "CrossRegionReal", @@ -1312,7 +1312,7 @@ mod test { lattice_id, host_id_one.to_string(), Host { - actors: HashMap::from_iter([ + components: HashMap::from_iter([ (blobby_id.to_string(), 3), ("MSOMEOTHERACTOR".to_string(), 3), ]), @@ -1335,7 +1335,7 @@ mod test { lattice_id, host_id_two.to_string(), Host { - actors: HashMap::from_iter([(blobby_id.to_string(), 19)]), + components: HashMap::from_iter([(blobby_id.to_string(), 19)]), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "real".to_string()), @@ -1356,7 +1356,7 @@ mod test { lattice_id, host_id_three.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "purgatory".to_string()), @@ -1411,7 +1411,7 @@ mod test { let mut cmds = blobby_spreadscaler.reconcile().await?; assert_eq!(cmds.len(), 2); cmds.sort_by(|a, b| match (a, b) { - (Command::ScaleActor(a), Command::ScaleActor(b)) => a.host_id.cmp(&b.host_id), + (Command::ScaleComponent(a), Command::ScaleComponent(b)) => a.host_id.cmp(&b.host_id), _ => panic!("Unexpected commands in spreadscaler list"), }); @@ -1420,21 +1420,21 @@ mod test { cmds_iter.next().expect("one scale command"), cmds_iter.next().expect("two scale commands"), ) { - (Command::ScaleActor(scale1), Command::ScaleActor(scale2)) => { + (Command::ScaleComponent(scale1), Command::ScaleComponent(scale2)) => { assert_eq!(scale1.host_id, host_id_three.to_string()); assert_eq!(scale1.count, 3); assert_eq!(scale1.reference, blobby_ref); assert_eq!(scale2.host_id, host_id_two.to_string()); assert_eq!(scale2.count, 3); - assert_eq!(scale2.actor_id, blobby_id.to_string()); + assert_eq!(scale2.component_id, blobby_id.to_string()); } _ => panic!("Unexpected commands in spreadscaler list"), } let modifying_event = ComponentScaled { annotations: spreadscaler_annotations("CrossRegionReal", blobby_spreadscaler.id()), - actor_id: blobby_id.to_string(), + component_id: blobby_id.to_string(), image_ref: blobby_ref.to_string(), host_id: host_id_two.to_string(), max_instances: 0, @@ -1455,7 +1455,7 @@ mod test { .await?; assert_eq!(cmds.len(), 2); cmds.sort_by(|a, b| match (a, b) { - (Command::ScaleActor(a), Command::ScaleActor(b)) => a.host_id.cmp(&b.host_id), + (Command::ScaleComponent(a), Command::ScaleComponent(b)) => a.host_id.cmp(&b.host_id), _ => panic!("Unexpected commands in spreadscaler list"), }); @@ -1464,14 +1464,14 @@ mod test { cmds_iter.next().expect("one scale command"), cmds_iter.next().expect("two scale commands"), ) { - (Command::ScaleActor(scale1), Command::ScaleActor(scale2)) => { + (Command::ScaleComponent(scale1), Command::ScaleComponent(scale2)) => { assert_eq!(scale1.host_id, host_id_three.to_string()); assert_eq!(scale1.count, 3); assert_eq!(scale1.reference, blobby_ref); assert_eq!(scale2.host_id, host_id_two.to_string()); assert_eq!(scale2.count, 3); - assert_eq!(scale2.actor_id, blobby_id.to_string()); + assert_eq!(scale2.component_id, blobby_id.to_string()); } _ => panic!("Unexpected commands in spreadscaler list"), } diff --git a/src/scaler/spreadscaler/provider.rs b/src/scaler/spreadscaler/provider.rs index 35c58d2a..839d72ce 100644 --- a/src/scaler/spreadscaler/provider.rs +++ b/src/scaler/spreadscaler/provider.rs @@ -406,7 +406,7 @@ mod test { lattice_id, host_id_one.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "fake".to_string()), @@ -426,7 +426,7 @@ mod test { lattice_id, host_id_two.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "real".to_string()), @@ -601,7 +601,7 @@ mod test { lattice_id, host_id_one.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "fake".to_string()), @@ -626,7 +626,7 @@ mod test { lattice_id, host_id_two.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "real".to_string()), @@ -645,7 +645,7 @@ mod test { lattice_id, host_id_three.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "inthemiddle".to_string()), @@ -664,7 +664,7 @@ mod test { lattice_id, host_id_four.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "fake".to_string()), @@ -820,7 +820,7 @@ mod test { lattice_id, host_id_one.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "fake".to_string()), @@ -840,7 +840,7 @@ mod test { lattice_id, host_id_two.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "real".to_string()), @@ -952,7 +952,7 @@ mod test { lattice_id, host_id_one.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "fake".to_string()), @@ -977,7 +977,7 @@ mod test { lattice_id, host_id_two.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "real".to_string()), @@ -1064,7 +1064,7 @@ mod test { lattice_id, host_id_one.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "fake".to_string()), @@ -1112,7 +1112,7 @@ mod test { lattice_id, host_id_two.to_string(), Host { - actors: HashMap::new(), + components: HashMap::new(), friendly_name: "hey".to_string(), labels: HashMap::from_iter([ ("cloud".to_string(), "fake".to_string()), diff --git a/src/server/handlers.rs b/src/server/handlers.rs index 912fbf22..a8d43d7d 100644 --- a/src/server/handlers.rs +++ b/src/server/handlers.rs @@ -11,8 +11,8 @@ use tracing::{debug, error, instrument, log::warn, trace}; use crate::{ model::{ - internal::StoredManifest, ActorProperties, CapabilityProperties, LinkProperty, Manifest, - Properties, Trait, TraitProperty, LATEST_VERSION, + internal::StoredManifest, CapabilityProperties, ComponentProperties, LinkProperty, + Manifest, Properties, Trait, TraitProperty, LATEST_VERSION, }, publisher::Publisher, server::StatusType, @@ -1014,8 +1014,8 @@ pub(crate) async fn validate_manifest(manifest: Manifest) -> anyhow::Result<()> } // Actor validation : Actors should have a unique identifier per manifest - if let Properties::Actor { - properties: ActorProperties { id: Some(id), .. }, + if let Properties::Component { + properties: ComponentProperties { id: Some(id), .. }, } = &component.properties { if !id_registry.insert(id.to_string()) { diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 37784c2a..66a2409a 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -7,7 +7,7 @@ pub mod reaper; pub(crate) mod snapshot; mod state; -pub use state::{Actor, Host, Provider, ProviderStatus, WadmActorInfo}; +pub use state::{Component, Host, Provider, ProviderStatus, WadmComponentInfo}; /// A trait that must be implemented with a unique identifier for the given type. This is used in /// the construction of keys for a store diff --git a/src/storage/reaper.rs b/src/storage/reaper.rs index a3a1fe03..7f48e7d3 100644 --- a/src/storage/reaper.rs +++ b/src/storage/reaper.rs @@ -7,7 +7,7 @@ use chrono::{Duration, Utc}; use tokio::{task::JoinHandle, time}; use tracing::{debug, error, info, instrument, trace, warn}; -use super::{Actor, Host, Provider, Store}; +use super::{Component, Host, Provider, Store}; /// A struct that can reap various pieces of data from the given store pub struct Reaper { @@ -152,7 +152,7 @@ impl Undertaker { #[instrument(level = "debug", skip(self, hosts), fields(lattice_id = %self.lattice_id))] async fn reap_actors(&self, hosts: &HashMap) { - let actors = match self.store.list::(&self.lattice_id).await { + let actors = match self.store.list::(&self.lattice_id).await { Ok(n) => n, Err(e) => { error!(error = %e, "Error when fetching actors from store. Will retry on next tick"); @@ -160,20 +160,22 @@ impl Undertaker { } }; - let (actors_to_remove, actors_to_update): (HashMap, HashMap) = - actors - .into_iter() - .map(|(id, mut actor)| { - // Only keep the instances where the host exists and the actor is in its map - actor.instances.retain(|host_id, _| { - hosts - .get(host_id) - .map(|host| host.actors.contains_key(&actor.id)) - .unwrap_or(false) - }); - (id, actor) - }) - .partition(|(_, actor)| actor.instances.is_empty()); + let (actors_to_remove, actors_to_update): ( + HashMap, + HashMap, + ) = actors + .into_iter() + .map(|(id, mut actor)| { + // Only keep the instances where the host exists and the actor is in its map + actor.instances.retain(|host_id, _| { + hosts + .get(host_id) + .map(|host| host.components.contains_key(&actor.id)) + .unwrap_or(false) + }); + (id, actor) + }) + .partition(|(_, actor)| actor.instances.is_empty()); debug!(to_remove = %actors_to_remove.len(), to_update = %actors_to_update.len(), "Filtered out list of actors to update and reap"); @@ -188,7 +190,7 @@ impl Undertaker { if let Err(e) = self .store - .delete_many::(&self.lattice_id, actors_to_remove.keys()) + .delete_many::(&self.lattice_id, actors_to_remove.keys()) .await { warn!(error = %e, "Error when deleting actors from store. Will retry on next tick") @@ -251,7 +253,7 @@ mod test { }; use crate::{ - storage::{ProviderStatus, ReadStore, WadmActorInfo}, + storage::{ProviderStatus, ReadStore, WadmComponentInfo}, test_util::TestStore, }; @@ -260,7 +262,7 @@ mod test { let store = Arc::new(TestStore::default()); let lattice_id = "reaper"; - let actor_id = "testactor"; + let component_id = "testactor"; let host1_id = "host1"; let host2_id = "host2"; @@ -270,20 +272,20 @@ mod test { lattice_id, [ ( - actor_id.to_string(), - Actor { - id: actor_id.to_string(), + component_id.to_string(), + Component { + id: component_id.to_string(), instances: HashMap::from([ ( host1_id.to_string(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { annotations: BTreeMap::default(), count: 1, }]), ), ( host2_id.to_string(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { annotations: BTreeMap::default(), count: 1, }]), @@ -294,11 +296,11 @@ mod test { ), ( "idontexist".to_string(), - Actor { + Component { id: "idontexist".to_string(), instances: HashMap::from([( host1_id.to_string(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { annotations: BTreeMap::default(), count: 1, }]), @@ -331,7 +333,7 @@ mod test { ( host1_id.to_string(), Host { - actors: HashMap::from([(actor_id.to_string(), 1)]), + components: HashMap::from([(component_id.to_string(), 1)]), providers: HashSet::default(), id: host1_id.to_string(), last_seen: Utc::now(), @@ -341,7 +343,7 @@ mod test { ( host2_id.to_string(), Host { - actors: HashMap::from([(actor_id.to_string(), 1)]), + components: HashMap::from([(component_id.to_string(), 1)]), providers: HashSet::default(), id: host2_id.to_string(), // Make this host stick around for longer @@ -365,10 +367,10 @@ mod test { // Now check that the providers, actors, and hosts were reaped let hosts = store.list::(lattice_id).await.unwrap(); assert_eq!(hosts.len(), 1, "Only one host should be left"); - let actors = store.list::(lattice_id).await.unwrap(); + let actors = store.list::(lattice_id).await.unwrap(); assert_eq!(actors.len(), 1, "Only one actor should remain in the store"); actors - .get(actor_id) + .get(component_id) .expect("Should have the correct actor in the store"); assert!( @@ -382,7 +384,7 @@ mod test { let store = Arc::new(TestStore::default()); let lattice_id = "reaper"; - let actor_id = "testactor"; + let component_id = "testactor"; let host1_id = "host1"; let host2_id = "host2"; @@ -390,20 +392,20 @@ mod test { store .store( lattice_id, - actor_id.to_string(), - Actor { - id: actor_id.to_string(), + component_id.to_string(), + Component { + id: component_id.to_string(), instances: HashMap::from([ ( host1_id.to_string(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { annotations: BTreeMap::default(), count: 1, }]), ), ( host2_id.to_string(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { annotations: BTreeMap::default(), count: 1, }]), @@ -422,7 +424,7 @@ mod test { ( host1_id.to_string(), Host { - actors: HashMap::from([(actor_id.to_string(), 1)]), + components: HashMap::from([(component_id.to_string(), 1)]), providers: HashSet::default(), id: host1_id.to_string(), last_seen: Utc::now() + Duration::milliseconds(600), @@ -432,7 +434,7 @@ mod test { ( host2_id.to_string(), Host { - actors: HashMap::default(), + components: HashMap::default(), providers: HashSet::default(), id: host2_id.to_string(), last_seen: Utc::now() + Duration::milliseconds(600), @@ -453,17 +455,17 @@ mod test { tokio::time::sleep(wait).await; // Make sure we only have one instance of the actor left - let actors = store.list::(lattice_id).await.unwrap(); - let actor = actors - .get(actor_id) + let actors = store.list::(lattice_id).await.unwrap(); + let component = actors + .get(component_id) .expect("Should have the correct actor in the store"); assert_eq!( - actor.instances.len(), + component.instances.len(), 1, "Only one host should remain in instances" ); assert_eq!( - actor + component .instances .get(host1_id) .expect("Should have instance left on the correct host") diff --git a/src/storage/snapshot.rs b/src/storage/snapshot.rs index e66e5923..cdb823c2 100644 --- a/src/storage/snapshot.rs +++ b/src/storage/snapshot.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use tokio::sync::RwLock; use wasmcloud_control_interface::InterfaceLinkDefinition; -use crate::storage::{Actor, Host, Provider, ReadStore, StateKind}; +use crate::storage::{Component, Host, Provider, ReadStore, StateKind}; use crate::workers::LinkSource; // NOTE(thomastaylor312): This type is real ugly and we should probably find a better way to @@ -70,7 +70,7 @@ where .collect::>(); let actors = self .store - .list::(&self.lattice_id) + .list::(&self.lattice_id) .await? .into_iter() .map(|(key, val)| (key, serde_json::to_value(val).unwrap())) @@ -87,7 +87,7 @@ where { let mut stored_state = self.stored_state.write().await; stored_state.insert(Provider::KIND.to_owned(), providers); - stored_state.insert(Actor::KIND.to_owned(), actors); + stored_state.insert(Component::KIND.to_owned(), actors); stored_state.insert(Host::KIND.to_owned(), hosts); } diff --git a/src/storage/state.rs b/src/storage/state.rs index 9b14a146..b898c37e 100644 --- a/src/storage/state.rs +++ b/src/storage/state.rs @@ -100,55 +100,55 @@ impl From<&ProviderStarted> for Provider { } } -/// A representation of a unique actor (as defined by its annotations) and its count. This struct +/// A representation of a unique component (as defined by its annotations) and its count. This struct /// has a custom implementation of PartialEq and Hash that _only_ compares the annotations. This is -/// not a very "pure" way of doing things, but it lets us access current counts of actors without +/// not a very "pure" way of doing things, but it lets us access current counts of components without /// having to do a bunch of extra work. #[derive(Debug, Serialize, Deserialize, Clone, Default, Eq)] -pub struct WadmActorInfo { +pub struct WadmComponentInfo { pub annotations: BTreeMap, pub count: usize, } -impl PartialEq for WadmActorInfo { +impl PartialEq for WadmComponentInfo { fn eq(&self, other: &Self) -> bool { self.annotations == other.annotations } } -impl Hash for WadmActorInfo { +impl Hash for WadmComponentInfo { fn hash(&self, state: &mut H) { self.annotations.hash(state); } } -impl Borrow> for WadmActorInfo { +impl Borrow> for WadmComponentInfo { fn borrow(&self) -> &BTreeMap { &self.annotations } } -/// A wasmCloud Actor +/// A wasmCloud Component #[derive(Debug, Serialize, Deserialize, Clone, Default)] -pub struct Actor { - /// ID of the actor, normally a public (n)key +pub struct Component { + /// ID of the component pub id: String, - /// Name of the actor + /// Name of the component pub name: String, - /// Issuer of the (signed) actor + /// Issuer of the (signed) component pub issuer: String, - /// All instances of this actor running in the lattice, keyed by the host ID and contains a hash + /// All instances of this component running in the lattice, keyed by the host ID and contains a hash /// map of annotations -> count for each set of unique annotations - pub instances: HashMap>, + pub instances: HashMap>, - /// The reference used to start the actor. Can be empty if it was started from a file + /// The reference used to start the component. Can be empty if it was started from a file pub reference: String, } -impl Actor { +impl Component { /// A helper method that returns the total count of running copies of this actor, regardless of /// which host they are running on pub fn count(&self) -> usize { @@ -168,20 +168,20 @@ impl Actor { } } -impl StateKind for Actor { - const KIND: &'static str = "actor"; +impl StateKind for Component { + const KIND: &'static str = "component"; } -impl From for Actor { +impl From for Component { fn from(value: ActorsStarted) -> Self { - Actor { + Component { id: value.public_key, name: value.claims.name, issuer: value.claims.issuer, reference: value.image_ref, instances: HashMap::from_iter([( value.host_id, - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { annotations: value.annotations, count: value.count, }]), @@ -190,16 +190,16 @@ impl From for Actor { } } -impl From<&ActorsStarted> for Actor { +impl From<&ActorsStarted> for Component { fn from(value: &ActorsStarted) -> Self { - Actor { + Component { id: value.public_key.clone(), name: value.claims.name.clone(), issuer: value.claims.issuer.clone(), reference: value.image_ref.clone(), instances: HashMap::from_iter([( value.host_id.clone(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { annotations: value.annotations.clone(), count: value.count, }]), @@ -208,17 +208,17 @@ impl From<&ActorsStarted> for Actor { } } -impl From for Actor { +impl From for Component { fn from(value: ComponentScaled) -> Self { let (name, issuer) = value.claims.map(|c| (c.name, c.issuer)).unwrap_or_default(); - Actor { - id: value.actor_id, + Component { + id: value.component_id, name, issuer, reference: value.image_ref, instances: HashMap::from_iter([( value.host_id, - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { annotations: value.annotations, count: value.max_instances, }]), @@ -227,10 +227,10 @@ impl From for Actor { } } -impl From<&ComponentScaled> for Actor { +impl From<&ComponentScaled> for Component { fn from(value: &ComponentScaled) -> Self { - Actor { - id: value.actor_id.clone(), + Component { + id: value.component_id.clone(), name: value .claims .as_ref() @@ -244,7 +244,7 @@ impl From<&ComponentScaled> for Actor { reference: value.image_ref.clone(), instances: HashMap::from_iter([( value.host_id.clone(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { annotations: value.annotations.clone(), count: value.max_instances, }]), @@ -256,11 +256,8 @@ impl From<&ComponentScaled> for Actor { /// A wasmCloud host #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct Host { - /// A map of actor IDs to the number of instances of the actor running on the host - // NOTE(thomastaylor312): If we ever start storing a _ton_ of actors and it gets slow, we might - // want to consider switching out the hash algorithm to something like `ahash` to speed up - // lookups and deserialization - pub actors: HashMap, + /// A map of component IDs to the number of instances of the component running on the host + pub components: HashMap, /// The randomly generated friendly name of the host pub friendly_name: String, @@ -343,7 +340,7 @@ impl From for Host { .collect(); Host { - actors, + components: actors, friendly_name: value.friendly_name, labels: value.labels, providers, @@ -384,7 +381,7 @@ impl From<&HostHeartbeat> for Host { .collect(); Host { - actors, + components: actors, friendly_name: value.friendly_name.clone(), labels: value.labels.clone(), providers, diff --git a/src/workers/command.rs b/src/workers/command.rs index bfb025ce..52780d33 100644 --- a/src/workers/command.rs +++ b/src/workers/command.rs @@ -31,7 +31,7 @@ impl Worker for CommandWorker { #[instrument(level = "trace", skip_all)] async fn do_work(&self, mut message: ScopedMessage) -> WorkResult<()> { let res = match message.as_ref() { - Command::ScaleActor(actor) => { + Command::ScaleComponent(actor) => { trace!(command = ?actor, "Handling scale actor command"); // Order here is intentional to prevent scalers from overwriting managed annotations let mut annotations = actor.annotations.clone(); @@ -40,7 +40,7 @@ impl Worker for CommandWorker { .scale_component( &actor.host_id, &actor.reference, - &actor.actor_id, + &actor.component_id, actor.count, Some(annotations.into_iter().collect()), // TODO(#252): Support config diff --git a/src/workers/event.rs b/src/workers/event.rs index 2157a624..7895a9c0 100644 --- a/src/workers/event.rs +++ b/src/workers/event.rs @@ -14,7 +14,7 @@ use crate::events::*; use crate::publisher::Publisher; use crate::scaler::manager::{ScalerList, ScalerManager}; use crate::server::StatusInfo; -use crate::storage::{Actor, Host, Provider, ProviderStatus, Store, WadmActorInfo}; +use crate::storage::{Component, Host, Provider, ProviderStatus, Store, WadmComponentInfo}; use crate::APP_SPEC_ANNOTATION; use super::event_helpers::*; @@ -56,7 +56,7 @@ where // multiple error cases, it was just easier to catch it into an anyhow Error and then convert at // the end - #[instrument(level = "debug", skip(self, actor), fields(actor_id = %actor.public_key, host_id = %actor.host_id))] + #[instrument(level = "debug", skip(self, actor), fields(component_id = %actor.public_key, host_id = %actor.host_id))] async fn handle_actors_started( &self, lattice_id: &str, @@ -66,10 +66,10 @@ where debug!("Fetching current data for actor"); // Because we could have created an actor from the host heartbeat, we just overwrite // everything except counts here - let mut actor_data = Actor::from(actor); + let mut actor_data = Component::from(actor); if let Some(mut current) = self .store - .get::(lattice_id, &actor.public_key) + .get::(lattice_id, &actor.public_key) .await? { trace!(actor = ?current, "Found existing actor data"); @@ -79,13 +79,13 @@ where // If an actor is already running on a host, update or add to the running count // where _all_ annotations match if let Some(instance) = current_instances.take(&actor.annotations) { - current_instances.insert(WadmActorInfo { + current_instances.insert(WadmComponentInfo { count: instance.count + actor.count, annotations: instance.annotations, }); } else { // Otherwise add a new entry with the count of started actors - current_instances.insert(WadmActorInfo { + current_instances.insert(WadmComponentInfo { count: actor.count, annotations: actor.annotations.clone(), }); @@ -93,7 +93,7 @@ where } else { current.instances.insert( actor.host_id.clone(), - HashSet::from([WadmActorInfo { + HashSet::from([WadmComponentInfo { count: actor.count, annotations: actor.annotations.clone(), }]), @@ -108,7 +108,7 @@ where if let Some(mut host) = self.store.get::(lattice_id, &actor.host_id).await? { trace!(host = ?host, "Found existing host data"); - host.actors + host.components .entry(actor.public_key.clone()) .and_modify(|count| *count += actor.count) .or_insert(actor.count); @@ -124,7 +124,7 @@ where .map_err(anyhow::Error::from) } - #[instrument(level = "debug", skip(self, actor), fields(actor_id = %actor.public_key, host_id = %actor.host_id))] + #[instrument(level = "debug", skip(self, actor), fields(component_id = %actor.public_key, host_id = %actor.host_id))] async fn handle_actors_stopped( &self, lattice_id: &str, @@ -134,7 +134,7 @@ where debug!("Fetching current data for actor"); if let Some(mut current) = self .store - .get::(lattice_id, &actor.public_key) + .get::(lattice_id, &actor.public_key) .await? { trace!(actor = ?current, "Found existing actor data"); @@ -154,7 +154,7 @@ where Some(current) => { // We aren't stopping everything, so just remove the stopped count from the // total - current_instances.insert(WadmActorInfo { + current_instances.insert(WadmComponentInfo { count: current.count - actor.count, annotations: current.annotations, }); @@ -176,7 +176,7 @@ where if current.instances.is_empty() { trace!("Last actor instance was removed, removing actor from storage"); self.store - .delete::(lattice_id, &actor.public_key) + .delete::(lattice_id, &actor.public_key) .await } else { self.store @@ -188,12 +188,12 @@ where // Update actor count in the host if let Some(mut host) = self.store.get::(lattice_id, &actor.host_id).await? { trace!(host = ?host, "Found existing host data"); - match host.actors.get(&actor.public_key) { + match host.components.get(&actor.public_key) { Some(existing_count) if actor.count >= *existing_count => { - host.actors.remove(&actor.public_key); + host.components.remove(&actor.public_key); } Some(existing_count) => { - host.actors + host.components .insert(actor.public_key.to_owned(), *existing_count - actor.count); } // you cannot delete what doesn't exist @@ -208,7 +208,7 @@ where Ok(()) } - #[instrument(level = "debug", skip(self, actor), fields(actor_id = %actor.actor_id, host_id = %actor.host_id))] + #[instrument(level = "debug", skip(self, actor), fields(component_id = %actor.component_id, host_id = %actor.host_id))] async fn handle_component_scaled( &self, lattice_id: &str, @@ -219,8 +219,12 @@ where // Update actor count in the actor state, adding to the state if it didn't exist or removing // if the scale is down to zero. - let mut actor_data = Actor::from(actor); - if let Some(mut current) = self.store.get::(lattice_id, &actor.actor_id).await? { + let mut actor_data = Component::from(actor); + if let Some(mut current) = self + .store + .get::(lattice_id, &actor.component_id) + .await? + { trace!(actor = ?current, "Found existing actor data"); match current.instances.get_mut(&actor.host_id) { @@ -230,7 +234,7 @@ where } // If an actor is already running on a host, update the running count to the scaled max_instances value Some(current_instances) => { - current_instances.replace(WadmActorInfo { + current_instances.replace(WadmComponentInfo { count: actor.max_instances, annotations: actor.annotations.clone(), }); @@ -242,7 +246,7 @@ where None => { current.instances.insert( actor.host_id.clone(), - HashSet::from([WadmActorInfo { + HashSet::from([WadmComponentInfo { count: actor.max_instances, annotations: actor.annotations.clone(), }]), @@ -259,10 +263,10 @@ where trace!(host = ?host, "Found existing host data"); if actor.max_instances == 0 { - host.actors.remove(&actor.actor_id); + host.components.remove(&actor.component_id); } else { - host.actors - .entry(actor.actor_id.clone()) + host.components + .entry(actor.component_id.clone()) .and_modify(|count| *count = actor.max_instances) .or_insert(actor.max_instances); } @@ -274,12 +278,12 @@ where if actor_data.instances.is_empty() { self.store - .delete::(lattice_id, &actor.actor_id) + .delete::(lattice_id, &actor.component_id) .await .map_err(anyhow::Error::from) } else { self.store - .store(lattice_id, actor.actor_id.clone(), actor_data) + .store(lattice_id, actor.component_id.clone(), actor_data) .await .map_err(anyhow::Error::from) } @@ -344,16 +348,16 @@ where }; trace!("Fetching actors from store to remove stopped instances"); - let all_actors = self.store.list::(lattice_id).await?; + let all_components = self.store.list::(lattice_id).await?; #[allow(clippy::type_complexity)] let (actors_to_update, actors_to_delete): ( - Vec<(String, Actor)>, - Vec<(String, Actor)>, - ) = all_actors + Vec<(String, Component)>, + Vec<(String, Component)>, + ) = all_components .into_iter() .filter_map(|(id, mut actor)| { - if current.actors.contains_key(&id) { + if current.components.contains_key(&id) { actor.instances.remove(¤t.id); Some((id, actor)) } else { @@ -366,7 +370,10 @@ where trace!("Removing actors with no more running instances"); self.store - .delete_many::(lattice_id, actors_to_delete.into_iter().map(|(id, _)| id)) + .delete_many::( + lattice_id, + actors_to_delete.into_iter().map(|(id, _)| id), + ) .await?; trace!("Fetching providers from store to remove stopped instances"); @@ -607,16 +614,16 @@ where // END HANDLER FUNCTIONS async fn populate_actor_info( &self, - actors: &HashMap, + actors: &HashMap, host_id: &str, instance_map: Vec, - ) -> anyhow::Result> { + ) -> anyhow::Result> { let claims = self.ctl_client.get_claims().await?; Ok(instance_map .into_iter() .map(|actor_description| { - let instance = HashSet::from_iter([WadmActorInfo { + let instance = HashSet::from_iter([WadmComponentInfo { count: actor_description.max_instances as usize, annotations: actor_description .annotations @@ -627,19 +634,18 @@ where // Construct modified Actor with new instances included let mut new_instances = actor.instances.clone(); new_instances.insert(host_id.to_owned(), instance); - let actor = Actor { + let component = Component { instances: new_instances, reference: actor_description.image_ref, name: actor_description.name.unwrap_or(actor.name.clone()), ..actor.clone() }; - (actor_description.id, actor) - // TODO(brooksmtownsend): missing claims isn't dire anymore + (actor_description.id, component) } else if let Some(claim) = claims.get(&actor_description.id) { ( actor_description.id.clone(), - Actor { + Component { id: actor_description.id, name: claim.name.to_owned(), issuer: claim.issuer.to_owned(), @@ -648,11 +654,11 @@ where }, ) } else { - warn!("Claims not found for actor on host, information is missing"); + debug!("Claims not found for component on host, component is unsigned"); ( actor_description.id.clone(), - Actor { + Component { id: actor_description.id, name: "".to_owned(), issuer: "".to_owned(), @@ -662,7 +668,7 @@ where ) } }) - .collect::>()) + .collect::>()) } #[instrument(level = "debug", skip(self, host), fields(host_id = %host.host_id))] @@ -673,7 +679,7 @@ where inventory_actors: &Vec, ) -> anyhow::Result<()> { debug!("Fetching current actor state"); - let actors = self.store.list::(lattice_id).await?; + let actors = self.store.list::(lattice_id).await?; // Compare stored Actors to the "true" list on this host, updating stored // Actors when they differ from the authoratative heartbeat @@ -1278,7 +1284,7 @@ mod test { /***********************************************************/ let actor1 = ActorsStarted { - claims: ActorClaims { + claims: ComponentClaims { call_alias: Some("Grand Moff".into()), issuer: "Sheev Palpatine".into(), name: "Grand Moff Tarkin".into(), @@ -1293,7 +1299,7 @@ mod test { }; let actor2 = ActorsStarted { - claims: ActorClaims { + claims: ComponentClaims { call_alias: Some("Darth".into()), issuer: "Sheev Palpatine".into(), name: "Darth Vader".into(), @@ -1314,7 +1320,7 @@ mod test { .await .expect("Should be able to handle actor event"); - let actors = store.list::(lattice_id).await.unwrap(); + let actors = store.list::(lattice_id).await.unwrap(); assert_eq!(actors.len(), 1, "Should only be 1 actor in state"); assert_actor(&actors, &actor1, &[(&host1_id, 1)]); @@ -1324,7 +1330,10 @@ mod test { .await .expect("Should be able to access store") .expect("Should have the host in the store"); - assert_eq!(*host.actors.get(&actor1.public_key).unwrap_or(&0), 1_usize); + assert_eq!( + *host.components.get(&actor1.public_key).unwrap_or(&0), + 1_usize + ); worker .handle_actors_started(lattice_id, &actor1) @@ -1358,7 +1367,7 @@ mod test { .await .expect("Should be able to handle actor event"); - let actors = store.list::(lattice_id).await.unwrap(); + let actors = store.list::(lattice_id).await.unwrap(); assert_eq!( actors.len(), 2, @@ -1375,7 +1384,7 @@ mod test { /***********************************************************/ let actor1_scaled = ComponentScaled { - claims: Some(ActorClaims { + claims: Some(ComponentClaims { call_alias: Some("Grand Moff".into()), issuer: "Sheev Palpatine".into(), name: "Grand Moff Tarkin".into(), @@ -1383,7 +1392,7 @@ mod test { ..Default::default() }), image_ref: "coruscant.galactic.empire/tarkin:0.1.0".into(), - actor_id: "TARKIN".into(), + component_id: "TARKIN".into(), host_id: host1_id.clone(), annotations: BTreeMap::default(), max_instances: 500, @@ -1392,12 +1401,12 @@ mod test { .handle_component_scaled(lattice_id, &actor1_scaled) .await .expect("Should be able to handle actor event"); - let actors = store.list::(lattice_id).await.unwrap(); + let actors = store.list::(lattice_id).await.unwrap(); let actor = actors.get("TARKIN").expect("Actor should exist in state"); let hosts = store.list::(lattice_id).await.unwrap(); let host = hosts.get(&host1_id).expect("Host should exist in state"); assert_eq!( - host.actors.get(&actor1_scaled.actor_id), + host.components.get(&actor1_scaled.component_id), Some(&500), "Actor count in host should be updated" ); @@ -1408,7 +1417,7 @@ mod test { ); let actor1_scaled = ComponentScaled { - claims: Some(ActorClaims { + claims: Some(ComponentClaims { call_alias: Some("Grand Moff".into()), issuer: "Sheev Palpatine".into(), name: "Grand Moff Tarkin".into(), @@ -1416,7 +1425,7 @@ mod test { ..Default::default() }), image_ref: "coruscant.galactic.empire/tarkin:0.1.0".into(), - actor_id: "TARKIN".into(), + component_id: "TARKIN".into(), host_id: host1_id.clone(), annotations: BTreeMap::default(), max_instances: 200, @@ -1425,12 +1434,12 @@ mod test { .handle_component_scaled(lattice_id, &actor1_scaled) .await .expect("Should be able to handle actor event"); - let actors = store.list::(lattice_id).await.unwrap(); + let actors = store.list::(lattice_id).await.unwrap(); let actor = actors.get("TARKIN").expect("Actor should exist in state"); let hosts = store.list::(lattice_id).await.unwrap(); let host = hosts.get(&host1_id).expect("Host should exist in state"); assert_eq!( - host.actors.get(&actor1_scaled.actor_id), + host.components.get(&actor1_scaled.component_id), Some(&200), "Actor count in host should be updated" ); @@ -1441,7 +1450,7 @@ mod test { ); let actor1_scaled = ComponentScaled { - claims: Some(ActorClaims { + claims: Some(ComponentClaims { call_alias: Some("Grand Moff".into()), issuer: "Sheev Palpatine".into(), name: "Grand Moff Tarkin".into(), @@ -1449,7 +1458,7 @@ mod test { ..Default::default() }), image_ref: "coruscant.galactic.empire/tarkin:0.1.0".into(), - actor_id: "TARKIN".into(), + component_id: "TARKIN".into(), host_id: host1_id.clone(), annotations: BTreeMap::default(), max_instances: 0, @@ -1458,12 +1467,12 @@ mod test { .handle_component_scaled(lattice_id, &actor1_scaled) .await .expect("Should be able to handle actor event"); - let actors = store.list::(lattice_id).await.unwrap(); + let actors = store.list::(lattice_id).await.unwrap(); let actor = actors.get("TARKIN").expect("Actor should exist in state"); let hosts = store.list::(lattice_id).await.unwrap(); let host = hosts.get(&host1_id).expect("Host should exist in state"); assert_eq!( - host.actors.get(&actor1_scaled.actor_id), + host.components.get(&actor1_scaled.component_id), None, "Actor in host should be removed" ); @@ -1474,7 +1483,7 @@ mod test { ); let actor1_scaled = ComponentScaled { - claims: Some(ActorClaims { + claims: Some(ComponentClaims { call_alias: Some("Grand Moff".into()), issuer: "Sheev Palpatine".into(), name: "Grand Moff Tarkin".into(), @@ -1482,7 +1491,7 @@ mod test { ..Default::default() }), image_ref: "coruscant.galactic.empire/tarkin:0.1.0".into(), - actor_id: "TARKIN".into(), + component_id: "TARKIN".into(), host_id: host1_id.clone(), annotations: BTreeMap::default(), max_instances: 1, @@ -1491,12 +1500,12 @@ mod test { .handle_component_scaled(lattice_id, &actor1_scaled) .await .expect("Should be able to handle actor event"); - let actors = store.list::(lattice_id).await.unwrap(); + let actors = store.list::(lattice_id).await.unwrap(); let actor = actors.get("TARKIN").expect("Actor should exist in state"); let hosts = store.list::(lattice_id).await.unwrap(); let host = hosts.get(&host1_id).expect("Host should exist in state"); assert_eq!( - host.actors.get(&actor1_scaled.actor_id), + host.components.get(&actor1_scaled.component_id), Some(&1), "Actor in host should be readded from scratch" ); @@ -1569,7 +1578,7 @@ mod test { assert_eq!(hosts.len(), 2, "Should only have 2 hosts"); let host = hosts.get(&host1_id).expect("Host should still exist"); assert_eq!( - host.actors.len(), + host.components.len(), 2, "Should have two different actors running" ); @@ -1580,7 +1589,7 @@ mod test { ); let host = hosts.get(&host2_id).expect("Host should still exist"); assert_eq!( - host.actors.len(), + host.components.len(), 2, "Should have two different actors running" ); @@ -1687,7 +1696,7 @@ mod test { assert_provider(&providers, &provider1, &[&host1_id]); assert_provider(&providers, &provider2, &[&host1_id, &host2_id]); - let actors = store.list::(lattice_id).await.unwrap(); + let actors = store.list::(lattice_id).await.unwrap(); assert_eq!(actors.len(), 2, "Should still have 2 actors in state"); assert_actor(&actors, &actor1, &[(&host1_id, 2), (&host2_id, 2)]); assert_actor(&actors, &actor2, &[(&host1_id, 2), (&host2_id, 2)]); @@ -1711,7 +1720,7 @@ mod test { .await .expect("Should be able to handle actor stop event"); - let actors = store.list::(lattice_id).await.unwrap(); + let actors = store.list::(lattice_id).await.unwrap(); assert_eq!(actors.len(), 2, "Should still have 2 actors in state"); assert_actor(&actors, &actor1, &[(&host2_id, 2)]); assert_actor(&actors, &actor2, &[(&host1_id, 2), (&host2_id, 2)]); @@ -1721,8 +1730,14 @@ mod test { .await .expect("Should be able to access store") .expect("Should have the host in the store"); - assert_eq!(*host.actors.get(&actor1.public_key).unwrap_or(&0), 2_usize); - assert_eq!(*host.actors.get(&actor2.public_key).unwrap_or(&0), 2_usize); + assert_eq!( + *host.components.get(&actor1.public_key).unwrap_or(&0), + 2_usize + ); + assert_eq!( + *host.components.get(&actor2.public_key).unwrap_or(&0), + 2_usize + ); // Now stop on the other let stopped2 = ActorsStopped { @@ -1735,7 +1750,7 @@ mod test { .await .expect("Should be able to handle actor stop event"); - let actors = store.list::(lattice_id).await.unwrap(); + let actors = store.list::(lattice_id).await.unwrap(); assert_eq!(actors.len(), 1, "Should only have 1 actor in state"); // Double check the the old one is still ok assert_actor(&actors, &actor2, &[(&host1_id, 2), (&host2_id, 2)]); @@ -1766,10 +1781,10 @@ mod test { let hosts = store.list::(lattice_id).await.unwrap(); assert_eq!(hosts.len(), 2, "Should only have 2 hosts"); let host = hosts.get(&host1_id).expect("Host should still exist"); - assert_eq!(host.actors.len(), 1, "Should have 1 actor running"); + assert_eq!(host.components.len(), 1, "Should have 1 actor running"); assert_eq!(host.providers.len(), 1, "Should have 1 provider running"); let host = hosts.get(&host2_id).expect("Host should still exist"); - assert_eq!(host.actors.len(), 1, "Should have 1 actor running"); + assert_eq!(host.components.len(), 1, "Should have 1 actor running"); assert_eq!( host.providers.len(), 1, @@ -1894,10 +1909,10 @@ mod test { let hosts = store.list::(lattice_id).await.unwrap(); assert_eq!(hosts.len(), 2, "Should only have 2 hosts"); let host = hosts.get(&host1_id).expect("Host should still exist"); - assert_eq!(host.actors.len(), 1, "Should have 1 actor running"); + assert_eq!(host.components.len(), 1, "Should have 1 actor running"); assert_eq!(host.providers.len(), 1, "Should have 1 provider running"); let host = hosts.get(&host2_id).expect("Host should still exist"); - assert_eq!(host.actors.len(), 1, "Should have 1 actor running"); + assert_eq!(host.components.len(), 1, "Should have 1 actor running"); assert_eq!( host.providers.len(), 1, @@ -1905,7 +1920,7 @@ mod test { ); // Double check providers and actors are the same - let actors = store.list::(lattice_id).await.unwrap(); + let actors = store.list::(lattice_id).await.unwrap(); assert_eq!(actors.len(), 1, "Should only have 1 actor in state"); assert_actor(&actors, &actor2, &[(&host1_id, 2), (&host2_id, 2)]); @@ -1932,7 +1947,7 @@ mod test { let hosts = store.list::(lattice_id).await.unwrap(); assert_eq!(hosts.len(), 1, "Should only have 1 host"); let host = hosts.get(&host2_id).expect("Host should still exist"); - assert_eq!(host.actors.len(), 1, "Should have 1 actor running"); + assert_eq!(host.components.len(), 1, "Should have 1 actor running"); assert_eq!( host.providers.len(), 1, @@ -1940,7 +1955,7 @@ mod test { ); // Double check providers and actors are the same - let actors = store.list::(lattice_id).await.unwrap(); + let actors = store.list::(lattice_id).await.unwrap(); assert_eq!(actors.len(), 1, "Should only have 1 actor in state"); assert_actor(&actors, &actor2, &[(&host2_id, 2)]); @@ -2085,7 +2100,7 @@ mod test { // We test that the host is created in other tests, so just check that the actors and // providers were created properly - let actors = store.list::(lattice_id).await.unwrap(); + let actors = store.list::(lattice_id).await.unwrap(); assert_eq!(actors.len(), 2, "Store should now have two actors"); let actor = actors.get(&actor1_id).expect("Actor should exist"); let expected = claims.get(&actor1_id).unwrap(); @@ -2262,11 +2277,11 @@ mod test { .store( lattice_id, "jabba".to_string(), - Actor { + Component { id: "jabba".to_string(), instances: HashMap::from([( host_id.to_string(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 1, annotations: BTreeMap::default(), }]), @@ -2337,7 +2352,7 @@ mod test { .await .expect("Should be able to handle host heartbeat"); - let actors = store.list::(lattice_id).await.unwrap(); + let actors = store.list::(lattice_id).await.unwrap(); assert_eq!(actors.len(), 2, "Should have 2 actors in the store"); let actor = actors.get("jabba").expect("Actor should exist"); assert_eq!( @@ -2405,7 +2420,7 @@ mod test { } fn assert_actor( - actors: &HashMap, + actors: &HashMap, event: &ActorsStarted, expected_counts: &[(&str, usize)], ) { diff --git a/test/data/all_hosts.yaml b/test/data/all_hosts.yaml index 079f9c1a..bc3c7476 100644 --- a/test/data/all_hosts.yaml +++ b/test/data/all_hosts.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: hello - type: actor + type: component properties: image: wasmcloud.azurecr.io/http-hello-world:0.1.0 traits: diff --git a/test/data/complex.yaml b/test/data/complex.yaml index fad0ca47..d7fa91c4 100644 --- a/test/data/complex.yaml +++ b/test/data/complex.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: blobby - type: actor + type: component properties: image: wasmcloud.azurecr.io/blobby:0.1.0 id: littleblobbytables diff --git a/test/data/duplicate_component.yaml b/test/data/duplicate_component.yaml index 33a5cf17..6b1cd1a1 100644 --- a/test/data/duplicate_component.yaml +++ b/test/data/duplicate_component.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: ui - type: actor + type: component properties: image: wasmcloud.azurecr.io/ui:0.3.2 traits: @@ -21,7 +21,7 @@ spec: app: petclinic - name: ui - type: actor + type: component properties: image: wasmcloud.azurecr.io/customers:0.3.1 traits: @@ -44,7 +44,7 @@ spec: app: petclinic - name: vets - type: actor + type: component properties: image: wasmcloud.azurecr.io/vets:0.3.1 traits: @@ -68,7 +68,7 @@ spec: app: petclinic - name: vets - type: actor + type: component properties: image: wasmcloud.azurecr.io/visits:0.3.1 traits: @@ -92,7 +92,7 @@ spec: app: petclinic - name: clinicapi - type: actor + type: component properties: image: wasmcloud.azurecr.io/clinicapi:0.3.1 traits: diff --git a/test/data/duplicate_id1.yaml b/test/data/duplicate_id1.yaml index 232a80d5..cac7ac1b 100644 --- a/test/data/duplicate_id1.yaml +++ b/test/data/duplicate_id1.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: userinfo - type: actor + type: component properties: image: wasmcloud.azurecr.io/fake:1 traits: diff --git a/test/data/duplicate_id2.yaml b/test/data/duplicate_id2.yaml index 033a3947..b248201a 100644 --- a/test/data/duplicate_id2.yaml +++ b/test/data/duplicate_id2.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: ui - type: actor + type: component properties: image: wasmcloud.azurecr.io/ui:0.3.2 id: ui @@ -22,7 +22,7 @@ spec: app: petclinic - name: ui2 - type: actor + type: component properties: image: wasmcloud.azurecr.io/ui:0.3.2 id: ui diff --git a/test/data/duplicate_linkdef.yaml b/test/data/duplicate_linkdef.yaml index 014d4851..af61985d 100644 --- a/test/data/duplicate_linkdef.yaml +++ b/test/data/duplicate_linkdef.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: ui - type: actor + type: component properties: image: wasmcloud.azurecr.io/ui:0.3.2 traits: @@ -21,7 +21,7 @@ spec: app: petclinic - name: customers - type: actor + type: component properties: image: wasmcloud.azurecr.io/customers:0.3.1 traits: @@ -44,7 +44,7 @@ spec: app: petclinic - name: vets - type: actor + type: component properties: image: wasmcloud.azurecr.io/vets:0.3.1 traits: @@ -78,7 +78,7 @@ spec: app: petclinic - name: visits - type: actor + type: component properties: image: wasmcloud.azurecr.io/visits:0.3.1 traits: @@ -102,7 +102,7 @@ spec: app: petclinic - name: clinicapi - type: actor + type: component properties: image: wasmcloud.azurecr.io/clinicapi:0.3.1 traits: diff --git a/test/data/events.json b/test/data/events.json index 4a9dc728..1ecd4363 100644 --- a/test/data/events.json +++ b/test/data/events.json @@ -20,7 +20,7 @@ }, "host_id": "NCBAWBHRT6JNSQIYQEURM3AQZ74I6HTQRRCFMTVPC3OAX4BUTCNTYADZ", "image_ref": "wasmcloud.azurecr.io/echo:0.3.4", - "actor_id": "wasmcloud_azurecr_io_echo_0_3_4", + "component_id": "wasmcloud_azurecr_io_echo_0_3_4", "max_instances": 10 } }, @@ -36,7 +36,7 @@ "error": "actor is already running with a different image reference `wasmcloud.azurecr.io/echo:0.3.4`", "host_id": "NCBAWBHRT6JNSQIYQEURM3AQZ74I6HTQRRCFMTVPC3OAX4BUTCNTYADZ", "image_ref": "wasmcloud.azurecr.io/echo:0.3.8", - "actor_id": "wasmcloud_azurecr_io_echo_0_3_4", + "component_id": "wasmcloud_azurecr_io_echo_0_3_4", "max_instances": 11, "public_key": "MBCFOPM6JW2APJLXJD3Z5O4CN7CPYJ2B4FTKLJUR5YR5MITIU7HD3WD5" } diff --git a/test/data/host_stop.yaml b/test/data/host_stop.yaml index 45060da0..83070cd0 100644 --- a/test/data/host_stop.yaml +++ b/test/data/host_stop.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: hello - type: actor + type: component properties: image: wasmcloud.azurecr.io/http-hello-world:0.1.0 traits: diff --git a/test/data/incorrect_component.yaml b/test/data/incorrect_component.yaml index 17fb97c6..19620d62 100644 --- a/test/data/incorrect_component.yaml +++ b/test/data/incorrect_component.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: ui - type: actor + type: component properties: image: wasmcloud.azurecr.io/ui:0.3.2 traits: @@ -20,7 +20,7 @@ spec: app: petclinic - name: ui - type: actor + type: component properties: image: wasmcloud.azurecr.io/customers:0.3.1 traits: @@ -38,7 +38,7 @@ spec: app: petclinic - name: vets - type: actor + type: component properties: image: wasmcloud.azurecr.io/vets:0.3.1 traits: @@ -57,7 +57,7 @@ spec: app: petclinic - name: vets - type: actor + type: component properties: image: wasmcloud.azurecr.io/visits:0.3.1 traits: @@ -76,7 +76,7 @@ spec: app: petclinic - name: clinicapi - type: actor + type: component properties: image: wasmcloud.azurecr.io/clinicapi:0.3.1 traits: diff --git a/test/data/long_image_refs.yaml b/test/data/long_image_refs.yaml index c6cc88b3..3b6d8dab 100644 --- a/test/data/long_image_refs.yaml +++ b/test/data/long_image_refs.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: echo - type: actor + type: component properties: image: file:///some/path/to/another/path/that/is/very/long/and/would/normally/crash/the/thing/but/in/this/case/doesnt/because/the/size/is/changed.wasm - name: httpserver diff --git a/test/data/lotta_actors.yaml b/test/data/lotta_actors.yaml index 1fdd37d7..30dd3b68 100644 --- a/test/data/lotta_actors.yaml +++ b/test/data/lotta_actors.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: hello - type: actor + type: component properties: image: wasmcloud.azurecr.io/http-hello-world:0.1.0 traits: diff --git a/test/data/missing_capability_component.yaml b/test/data/missing_capability_component.yaml index aa0c8e8d..01eb1a5e 100644 --- a/test/data/missing_capability_component.yaml +++ b/test/data/missing_capability_component.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: echo - type: actor + type: component properties: image: wasmcloud.azurecr.io/echo:0.3.7 traits: diff --git a/test/data/outdatedapp.yaml b/test/data/outdatedapp.yaml index 22539fc3..96739732 100644 --- a/test/data/outdatedapp.yaml +++ b/test/data/outdatedapp.yaml @@ -16,7 +16,7 @@ spec: components: # Latest, no modifications needed, actor component - name: xkcd - type: actor + type: component properties: image: wasmcloud.azurecr.io/xkcd:0.1.1 traits: @@ -30,7 +30,7 @@ spec: address: 0.0.0.0:8081 # Old actor component - name: echo - type: actor + type: component properties: image: wasmcloud.azurecr.io/echo:0.3.4 traits: @@ -45,7 +45,7 @@ spec: address: 0.0.0.0:8080 # No longer needed actor component - name: kvcounter - type: actor + type: component properties: image: wasmcloud.azurecr.io/kvcounter:0.4.0 traits: diff --git a/test/data/simple.yaml b/test/data/simple.yaml index 3303ab05..4d995d20 100644 --- a/test/data/simple.yaml +++ b/test/data/simple.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: hello - type: actor + type: component properties: image: wasmcloud.azurecr.io/http-hello-world:0.1.0 id: http_hello_world diff --git a/test/data/simple2.yaml b/test/data/simple2.yaml index 158ca0fd..54abac2b 100644 --- a/test/data/simple2.yaml +++ b/test/data/simple2.yaml @@ -8,7 +8,7 @@ metadata: spec: components: - name: messagepub - type: actor + type: component properties: image: wasmcloud.azurecr.io/message-pub:0.1.3 traits: diff --git a/test/data/upgradedapp.yaml b/test/data/upgradedapp.yaml index 457a6394..a232ae30 100644 --- a/test/data/upgradedapp.yaml +++ b/test/data/upgradedapp.yaml @@ -9,7 +9,7 @@ spec: components: # Latest, no modifications needed, actor component - name: xkcd - type: actor + type: component properties: image: wasmcloud.azurecr.io/xkcd:0.1.1 traits: @@ -23,7 +23,7 @@ spec: address: 0.0.0.0:8081 # Totally new actor component - name: messagepub - type: actor + type: component properties: image: wasmcloud.azurecr.io/message-pub:0.1.3 traits: @@ -32,7 +32,7 @@ spec: instances: 1 # Updated actor component - name: echo - type: actor + type: component properties: image: wasmcloud.azurecr.io/echo:0.3.8 traits: diff --git a/test/data/upgradedapp2.yaml b/test/data/upgradedapp2.yaml index cbf70e1b..b2887851 100644 --- a/test/data/upgradedapp2.yaml +++ b/test/data/upgradedapp2.yaml @@ -9,7 +9,7 @@ spec: components: # Latest, no modifications needed, actor component - name: xkcd - type: actor + type: component properties: image: wasmcloud.azurecr.io/xkcd:0.1.1 traits: @@ -22,7 +22,7 @@ spec: values: address: 0.0.0.0:8081 - name: messagepub - type: actor + type: component properties: image: wasmcloud.azurecr.io/message-pub:0.1.3 traits: @@ -30,7 +30,7 @@ spec: properties: instances: 1 - name: echo - type: actor + type: component properties: image: wasmcloud.azurecr.io/echo:0.3.8 traits: diff --git a/tests/storage_nats_kv.rs b/tests/storage_nats_kv.rs index 8795a72b..23764de5 100644 --- a/tests/storage_nats_kv.rs +++ b/tests/storage_nats_kv.rs @@ -5,8 +5,8 @@ use chrono::Utc; use wadm::{ events::ProviderInfo, storage::{ - nats_kv::NatsKvStore, Actor, Host, Provider, ProviderStatus, ReadStore, Store as WadmStore, - WadmActorInfo, + nats_kv::NatsKvStore, Component, Host, Provider, ProviderStatus, ReadStore, + Store as WadmStore, WadmComponentInfo, }, }; @@ -20,13 +20,13 @@ async fn test_round_trip() { let lattice_id = "roundtrip"; - let actor1 = Actor { + let actor1 = Component { id: "testactor".to_string(), name: "Test Actor".to_string(), issuer: "afakekey".to_string(), instances: HashMap::from([( "testhost".to_string(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 1, annotations: BTreeMap::new(), }]), @@ -35,13 +35,13 @@ async fn test_round_trip() { ..Default::default() }; - let actor2 = Actor { + let actor2 = Component { id: "anotheractor".to_string(), name: "Another Actor".to_string(), issuer: "afakekey".to_string(), instances: HashMap::from([( "testhost".to_string(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 1, annotations: BTreeMap::new(), }]), @@ -51,7 +51,7 @@ async fn test_round_trip() { }; let host = Host { - actors: HashMap::from([("testactor".to_string(), 1)]), + components: HashMap::from([("testactor".to_string(), 1)]), id: "testhost".to_string(), providers: HashSet::from([ProviderInfo { provider_id: "testprovider".to_string(), @@ -109,7 +109,7 @@ async fn test_round_trip() { "Provider should be correct" ); - let stored_actor: Actor = store + let stored_actor: Component = store .get(lattice_id, &actor1.id) .await .expect("Unable to fetch stored actor") @@ -123,7 +123,7 @@ async fn test_round_trip() { .expect("Should be able to add a new actor"); let all_actors = store - .list::(lattice_id) + .list::(lattice_id) .await .expect("Should be able to get all actors"); @@ -145,12 +145,12 @@ async fn test_round_trip() { // Delete one of the actors and make sure the data is correct store - .delete::(lattice_id, &actor1.id) + .delete::(lattice_id, &actor1.id) .await .expect("Should be able to delete an actor"); let all_actors = store - .list::(lattice_id) + .list::(lattice_id) .await .expect("Should be able to get all actors"); @@ -174,7 +174,7 @@ async fn test_no_data() { assert!( store - .get::(lattice_id, "doesnotexist") + .get::(lattice_id, "doesnotexist") .await .expect("Should be able to query store") .is_none(), @@ -202,13 +202,13 @@ async fn test_multiple_lattice() { let lattice_id1 = "multiple_lattice"; let lattice_id2 = "other_lattice"; - let actor1 = Actor { + let actor1 = Component { id: "testactor".to_string(), name: "Test Actor".to_string(), issuer: "afakekey".to_string(), instances: HashMap::from([( "testhost".to_string(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 1, annotations: BTreeMap::new(), }]), @@ -217,13 +217,13 @@ async fn test_multiple_lattice() { ..Default::default() }; - let actor2 = Actor { + let actor2 = Component { id: "anotheractor".to_string(), name: "Another Actor".to_string(), issuer: "afakekey".to_string(), instances: HashMap::from([( "testhost".to_string(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 1, annotations: BTreeMap::new(), }]), @@ -243,20 +243,20 @@ async fn test_multiple_lattice() { .expect("Should be able to store data"); let first = store - .list::(lattice_id1) + .list::(lattice_id1) .await .expect("Should be able to list data"); assert_eq!(first.len(), 1, "First lattice should have exactly 1 actor"); - let actor = first + let component = first .get(&actor1.id) .expect("First lattice should have the right actor"); assert_eq!( - actor.name, actor1.name, + component.name, actor1.name, "Should have returned the correct actor" ); let second = store - .list::(lattice_id2) + .list::(lattice_id2) .await .expect("Should be able to list data"); assert_eq!( @@ -264,11 +264,11 @@ async fn test_multiple_lattice() { 1, "Second lattice should have exactly 1 actor" ); - let actor = second + let component = second .get(&actor2.id) .expect("Second lattice should have the right actor"); assert_eq!( - actor.name, actor2.name, + component.name, actor2.name, "Should have returned the correct actor" ); } @@ -279,13 +279,13 @@ async fn test_store_and_delete_many() { let lattice_id = "storemany"; - let actor1 = Actor { + let actor1 = Component { id: "testactor".to_string(), name: "Test Actor".to_string(), issuer: "afakekey".to_string(), instances: HashMap::from([( "testhost".to_string(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 1, annotations: BTreeMap::new(), }]), @@ -294,13 +294,13 @@ async fn test_store_and_delete_many() { ..Default::default() }; - let actor2 = Actor { + let actor2 = Component { id: "anotheractor".to_string(), name: "Another Actor".to_string(), issuer: "afakekey".to_string(), instances: HashMap::from([( "testhost".to_string(), - HashSet::from_iter([WadmActorInfo { + HashSet::from_iter([WadmComponentInfo { count: 1, annotations: BTreeMap::new(), }]), @@ -321,7 +321,7 @@ async fn test_store_and_delete_many() { .expect("Should be able to store multiple actors"); let all_actors = store - .list::(lattice_id) + .list::(lattice_id) .await .expect("Should be able to get all actors"); @@ -343,13 +343,13 @@ async fn test_store_and_delete_many() { // Now try to delete them all store - .delete_many::(lattice_id, [&actor1.id, &actor2.id]) + .delete_many::(lattice_id, [&actor1.id, &actor2.id]) .await .expect("Should be able to delete many"); // Double check that the list is empty now let all_actors = store - .list::(lattice_id) + .list::(lattice_id) .await .expect("Should be able to get all actors");