diff --git a/infra/default-builds/dockerfiles/test-ds-echo/src/main.rs b/infra/default-builds/dockerfiles/test-ds-echo/src/main.rs index 61e5e164c1..0403063124 100644 --- a/infra/default-builds/dockerfiles/test-ds-echo/src/main.rs +++ b/infra/default-builds/dockerfiles/test-ds-echo/src/main.rs @@ -96,7 +96,9 @@ async fn echo_tcp_server(port: u16) -> Result<()> { let listener = TcpListener::bind(&addr).await.context("bind failed")?; loop { - let (socket, _) = listener.accept().await.context("accept failed")?; + let (socket, addr) = listener.accept().await.context("accept failed")?; + println!("connection: {addr}"); + tokio::spawn(async move { let mut framed = Framed::new(socket, BytesCodec::new()); @@ -106,6 +108,8 @@ async fn echo_tcp_server(port: u16) -> Result<()> { .await .expect("write failed"); } + + println!("connection closed: {addr}"); }); } } diff --git a/infra/default-builds/outputs/test-ds-echo-tag.txt b/infra/default-builds/outputs/test-ds-echo-tag.txt index f7340d8248..7683b05564 100644 --- a/infra/default-builds/outputs/test-ds-echo-tag.txt +++ b/infra/default-builds/outputs/test-ds-echo-tag.txt @@ -1 +1 @@ -test-ds-echo:1723071093 \ No newline at end of file +test-ds-echo:1723150754 \ No newline at end of file diff --git a/infra/default-builds/outputs/test-ds-echo.tar b/infra/default-builds/outputs/test-ds-echo.tar index e44a4d5732..da64d48a28 100644 --- a/infra/default-builds/outputs/test-ds-echo.tar +++ b/infra/default-builds/outputs/test-ds-echo.tar @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bb862183cdb553e5ce8a2505bb11ed8facc37eb9e767af77198f930ff6d34ceb +oid sha256:9a369e5808750b3f431b3f24eced45cee717d9f4c3fd0259b6a511e2dea85ac1 size 10588672 diff --git a/lib/chirp-workflow/core/src/ctx/workflow.rs b/lib/chirp-workflow/core/src/ctx/workflow.rs index e50c22ec0d..fbc133c92e 100644 --- a/lib/chirp-workflow/core/src/ctx/workflow.rs +++ b/lib/chirp-workflow/core/src/ctx/workflow.rs @@ -1004,41 +1004,43 @@ impl WorkflowCtx { Ok(signal) } - /// Checks if the given signal exists in the database. - pub async fn query_signal(&mut self) -> GlobalResult> { - let event = self.relevant_history().nth(self.location_idx); - - // Signal received before - let signal = if let Some(event) = event { - tracing::debug!(name=%self.name, id=%self.workflow_id, "replaying signal"); - - // Validate history is consistent - let Event::Signal(signal) = event else { - return Err(WorkflowError::HistoryDiverged(format!( - "expected {event} at {}, found signal", - self.loc(), - ))) - .map_err(GlobalError::raw); - }; - - Some(T::parse(&signal.name, signal.body.clone()).map_err(GlobalError::raw)?) - } - // Listen for new message - else { - let ctx = ListenCtx::new(self); - - match T::listen(&ctx).await { - Ok(res) => Some(res), - Err(err) if matches!(err, WorkflowError::NoSignalFound(_)) => None, - Err(err) => return Err(err).map_err(GlobalError::raw), - } - }; - - // Move to next event - self.location_idx += 1; - - Ok(signal) - } + // TODO: Currently implemented wrong, if no signal is received it should still write a signal row to the + // database so that upon replay it again receives no signal + // /// Checks if the given signal exists in the database. + // pub async fn query_signal(&mut self) -> GlobalResult> { + // let event = self.relevant_history().nth(self.location_idx); + + // // Signal received before + // let signal = if let Some(event) = event { + // tracing::debug!(name=%self.name, id=%self.workflow_id, "replaying signal"); + + // // Validate history is consistent + // let Event::Signal(signal) = event else { + // return Err(WorkflowError::HistoryDiverged(format!( + // "expected {event} at {}, found signal", + // self.loc(), + // ))) + // .map_err(GlobalError::raw); + // }; + + // Some(T::parse(&signal.name, signal.body.clone()).map_err(GlobalError::raw)?) + // } + // // Listen for new message + // else { + // let ctx = ListenCtx::new(self); + + // match T::listen(&ctx).await { + // Ok(res) => Some(res), + // Err(err) if matches!(err, WorkflowError::NoSignalFound(_)) => None, + // Err(err) => return Err(err).map_err(GlobalError::raw), + // } + // }; + + // // Move to next event + // self.location_idx += 1; + + // Ok(signal) + // } pub async fn msg(&mut self, tags: serde_json::Value, body: M) -> GlobalResult<()> where diff --git a/svc/pkg/cluster/src/ops/server/resolve_for_ip.rs b/svc/pkg/cluster/src/ops/server/resolve_for_ip.rs index 4fd032b583..86fb0537fe 100644 --- a/svc/pkg/cluster/src/ops/server/resolve_for_ip.rs +++ b/svc/pkg/cluster/src/ops/server/resolve_for_ip.rs @@ -32,6 +32,9 @@ pub async fn cluster_server_resolve_for_ip( WHERE ($1 OR cloud_destroy_ts IS NULL) AND public_ip = ANY($2) + -- When more than one record is returned per IP, sort by create_ts so that the first record is the + -- latest server created + ORDER BY create_ts DESC ", input.include_destroyed, input.ips diff --git a/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/node_exporter.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/node_exporter.sh index 740631a3b1..c2dde4c1ac 100644 --- a/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/node_exporter.sh +++ b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/node_exporter.sh @@ -31,7 +31,7 @@ User=node_exporter Group=node_exporter Type=simple # Reduce cardinality -ExecStart=/usr/bin/node_exporter --collector.disable-defaults --collector.cpu --collector.netdev --collector.conntrack --collector.meminfo --collector.filesystem --collector.filesystem.mount-points-exclude=^/opt/nomad/ +ExecStart=/usr/bin/node_exporter --collector.disable-defaults --collector.cpu --collector.netdev --collector.conntrack --collector.meminfo --collector.filesystem --collector.filesystem.mount-points-exclude=^/opt/nomad/ --collector.netstat --collector.sockstat --collector.tcpstat --collector.network_route --collector.arp --collector.filefd --collector.interrupts --collector.softirqs --collector.processes Restart=always RestartSec=2