Skip to content

Commit

Permalink
Merge pull request #127 from pipeless-ai/add_redis_event_exporter
Browse files Browse the repository at this point in the history
feat: Add redis event exporter
  • Loading branch information
miguelaeh committed Jan 29, 2024
2 parents 9c09485 + ce1b28c commit 537e8e9
Show file tree
Hide file tree
Showing 10 changed files with 267 additions and 57 deletions.
57 changes: 55 additions & 2 deletions pipeless/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion pipeless/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "pipeless-ai"
version = "1.6.3"
version = "1.7.0"
edition = "2021"
authors = ["Miguel A. Cabrera Minagorri"]
description = "An open-source computer vision framework to build and deploy applications in minutes"
Expand Down Expand Up @@ -47,6 +47,7 @@ gstreamer-rtsp = "0.21.0"
inquire = "0.6.2"
tabled = "0.15.0"
ctrlc = "3.4.2"
redis = { version = "0.24.0", features = ["aio", "tokio-comp"] }

[dependencies.uuid]
version = "1.4.1"
Expand Down
22 changes: 19 additions & 3 deletions pipeless/src/cli/start.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use pyo3;
use std::sync::Arc;
use std::{env, sync::Arc};
use tokio::sync::RwLock;
use gstreamer as gst;
use glib;
Expand All @@ -8,7 +8,7 @@ use ctrlc;

use crate as pipeless;

pub fn start_pipeless_node(stages_dir: &str) {
pub fn start_pipeless_node(project_dir: &str, export_redis_events: bool) {
ctrlc::set_handler(|| {
println!("Exiting...");
std::process::exit(0);
Expand All @@ -24,11 +24,27 @@ pub fn start_pipeless_node(stages_dir: &str) {
// Initialize Gstreamer
gst::init().expect("Unable to initialize gstreamer");

let frame_path_executor = Arc::new(RwLock::new(pipeless::stages::path::FramePathExecutor::new(stages_dir)));
let frame_path_executor = Arc::new(RwLock::new(pipeless::stages::path::FramePathExecutor::new(project_dir)));

// Init Tokio runtime
let tokio_rt = tokio::runtime::Runtime::new().expect("Unable to create Tokio runtime");
tokio_rt.block_on(async {
// Create event exporter when enabled
let event_exporter =
if export_redis_events {
let redis_url = env::var("PIPELESS_REDIS_URL")
.expect("Please export the PIPELESS_REDIS_URL environment variable in order to export events to Redis");
let redis_channel = env::var("PIPELESS_REDIS_CHANNEL")
.expect("Please export the PIPELESS_REDIS_CHANNEL environment variable in order to export events to Redis");
pipeless::event_exporters::EventExporter::new_redis_exporter(&redis_url, &redis_channel).await
} else {
pipeless::event_exporters::EventExporter::new_none_exporter()
};
{ // Context to lock the global event exporter in order to set it
let mut e_exp = pipeless::event_exporters::EVENT_EXPORTER.lock().await;
*e_exp = event_exporter;
}

let streams_table = Arc::new(RwLock::new(pipeless::config::streams::StreamsTable::new()));
let dispatcher = pipeless::dispatcher::Dispatcher::new(streams_table.clone());
let dispatcher_sender = dispatcher.get_sender().clone();
Expand Down
98 changes: 55 additions & 43 deletions pipeless/src/dispatcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@ pub struct Dispatcher {
receiver: tokio_stream::wrappers::UnboundedReceiverStream<DispatcherEvent>,
}
impl Dispatcher {
pub fn new(streams_table: Arc<RwLock<pipeless::config::streams::StreamsTable>>) -> Self {
pub fn new(
streams_table: Arc<RwLock<pipeless::config::streams::StreamsTable>>,
) -> Self {
let (sender, receiver) = tokio::sync::mpsc::unbounded_channel::<DispatcherEvent>();
Self {
sender,
Expand All @@ -36,7 +38,6 @@ impl Dispatcher {
),
streams_table
}

}

pub fn get_sender(&self) -> tokio::sync::mpsc::UnboundedSender<DispatcherEvent> {
Expand Down Expand Up @@ -66,7 +67,7 @@ impl Dispatcher {

pub fn start(
dispatcher: Dispatcher,
frame_path_executor_arc: Arc<RwLock<pipeless::stages::path::FramePathExecutor>>
frame_path_executor_arc: Arc<RwLock<pipeless::stages::path::FramePathExecutor>>,
) {
let running_managers: Arc<RwLock<HashMap<uuid::Uuid, pipeless::pipeline::Manager>>> = Arc::new(RwLock::new(HashMap::new()));
let frame_path_executor_arc = frame_path_executor_arc.clone();
Expand Down Expand Up @@ -152,6 +153,7 @@ pub fn start(
new_manager.get_pipeline_id().await
) {
error!("Error adding new stream to the streams config table: {}", err);
pipeless::event_exporters::events::export_stream_start_error_event(entry.get_id()).await;
}
let mut managers_map_guard = running_managers.write().await;
managers_map_guard.insert(new_manager.get_pipeline_id().await, new_manager);
Expand All @@ -160,6 +162,7 @@ pub fn start(
error!("Unable to create new pipeline: {}. Rolling back streams configuration.", err.to_string());
let removed = streams_table_guard.remove(entry.get_id());
if removed.is_none() { warn!("Error rolling back table, entry not found.") };
pipeless::event_exporters::events::export_stream_start_error_event(entry.get_id()).await;
}
}
},
Expand Down Expand Up @@ -195,50 +198,59 @@ pub fn start(
}
}
DispatcherEvent::PipelineFinished(pipeline_id, finish_state) => {
let mut table_write_guard = streams_table.write().await;
let stream_entry_option = table_write_guard.find_by_pipeline_id_mut(pipeline_id);
if let Some(entry) = stream_entry_option {
// Remove the pipeline from the stream entry since it finished
entry.unassign_pipeline();

// Update the target state of the stream based on the restart policy
match entry.get_restart_policy() {
pipeless::config::streams::RestartPolicy::Never => {
match finish_state {
pipeless::pipeline::PipelineEndReason::Completed => entry.set_target_state(pipeless::config::streams::StreamEntryState::Completed),
pipeless::pipeline::PipelineEndReason::Error => entry.set_target_state(pipeless::config::streams::StreamEntryState::Error),
pipeless::pipeline::PipelineEndReason::Updated => entry.set_target_state(pipeless::config::streams::StreamEntryState::Running),
}
},
pipeless::config::streams::RestartPolicy::Always => {
entry.set_target_state(pipeless::config::streams::StreamEntryState::Running);
},
pipeless::config::streams::RestartPolicy::OnError => {
if finish_state == pipeless::pipeline::PipelineEndReason::Error {
entry.set_target_state(pipeless::config::streams::StreamEntryState::Running);
} else {
entry.set_target_state(pipeless::config::streams::StreamEntryState::Error);
}
},
pipeless::config::streams::RestartPolicy::OnEos => {
if finish_state == pipeless::pipeline::PipelineEndReason::Completed {
let mut stream_uuid: Option<uuid::Uuid> = None;
{ // context to release the write lock
let mut table_write_guard = streams_table.write().await;
let stream_entry_option = table_write_guard.find_by_pipeline_id_mut(pipeline_id);
if let Some(entry) = stream_entry_option {
stream_uuid = Some(entry.get_id());
// Remove the pipeline from the stream entry since it finished
entry.unassign_pipeline();

// Update the target state of the stream based on the restart policy
match entry.get_restart_policy() {
pipeless::config::streams::RestartPolicy::Never => {
match finish_state {
pipeless::pipeline::PipelineEndReason::Completed => entry.set_target_state(pipeless::config::streams::StreamEntryState::Completed),
pipeless::pipeline::PipelineEndReason::Error => entry.set_target_state(pipeless::config::streams::StreamEntryState::Error),
pipeless::pipeline::PipelineEndReason::Updated => entry.set_target_state(pipeless::config::streams::StreamEntryState::Running),
}
},
pipeless::config::streams::RestartPolicy::Always => {
entry.set_target_state(pipeless::config::streams::StreamEntryState::Running);
} else {
entry.set_target_state(pipeless::config::streams::StreamEntryState::Completed);
}
},
}
},
pipeless::config::streams::RestartPolicy::OnError => {
if finish_state == pipeless::pipeline::PipelineEndReason::Error {
entry.set_target_state(pipeless::config::streams::StreamEntryState::Running);
} else {
entry.set_target_state(pipeless::config::streams::StreamEntryState::Error);
}
},
pipeless::config::streams::RestartPolicy::OnEos => {
if finish_state == pipeless::pipeline::PipelineEndReason::Completed {
entry.set_target_state(pipeless::config::streams::StreamEntryState::Running);
} else {
entry.set_target_state(pipeless::config::streams::StreamEntryState::Completed);
}
},
}

// Create new event since we have modified the streams config table
if let Err(err) = dispatcher_sender.send(DispatcherEvent::TableChange) {
warn!("Unable to send dispatcher event for streams table changed. Error: {}", err.to_string());
// Create new event since we have modified the streams config table
if let Err(err) = dispatcher_sender.send(DispatcherEvent::TableChange) {
warn!("Unable to send dispatcher event for streams table changed. Error: {}", err.to_string());
}
} else {
warn!("
Unable to unassign pipeline for stream. Stream entry not found.
Pipeline id: {}
", pipeline_id);
}
} else {
warn!("
Unable to unassign pipeline for stream. Stream entry not found.
Pipeline id: {}
", pipeline_id);
}

pipeless::event_exporters::events::export_stream_finished_event(
stream_uuid.unwrap_or_default(),
finish_state.to_string().as_str()
).await;
}
}
}
Expand Down
49 changes: 49 additions & 0 deletions pipeless/src/event_exporters/events.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
use std::fmt;
use log::warn;

pub enum EventType {
StreamStartError,
StreamFinished,
}
impl fmt::Display for EventType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
EventType::StreamStartError => write!(f, "StreamStartError"),
EventType::StreamFinished => write!(f, "StreamFinished"),
}
}
}

/*
* Exports a stream finished event to the external event exporter when it is enabled
*/
pub async fn export_stream_finished_event(stream_uuid: uuid::Uuid, stream_end_state: &str) {
let ext_event: serde_json::Value = serde_json::json!({
"type": EventType::StreamFinished.to_string(),
"end_state": stream_end_state,
"stream_uuid": stream_uuid.to_string(),
});
let ext_event_json_str = serde_json::to_string(&ext_event);
if let Ok(json_str) = ext_event_json_str {
super::EVENT_EXPORTER.lock().await.publish(&json_str).await;
} else {
warn!("Error serializing event to JSON string, skipping external publishing");
}
}

/*
* Exports a stream start error event to the external event exporter when it is enabled
*/
pub async fn export_stream_start_error_event(stream_uuid: uuid::Uuid) {
let ext_event: serde_json::Value = serde_json::json!({
"type": EventType::StreamStartError.to_string(),
"end_state": "error",
"stream_uuid": stream_uuid.to_string(),
});
let ext_event_json_str = serde_json::to_string(&ext_event);
if let Ok(json_str) = ext_event_json_str {
super::EVENT_EXPORTER.lock().await.publish(&json_str).await;
} else {
warn!("Error serializing event to JSON string, skipping external publishing");
}
}
Loading

0 comments on commit 537e8e9

Please sign in to comment.