From 90afe6fcf7dbaec73e6a007b61c5c68d530ccc2b Mon Sep 17 00:00:00 2001 From: mirkoCrobu Date: Wed, 3 Dec 2025 08:43:49 +0100 Subject: [PATCH 1/2] update example_versions --- Taskfile.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Taskfile.yml b/Taskfile.yml index 10ef00c7..e8dc25db 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -8,7 +8,7 @@ vars: GOLANGCI_LINT_VERSION: v2.4.0 GOIMPORTS_VERSION: v0.29.0 DPRINT_VERSION: 0.48.0 - EXAMPLE_VERSION: "0.5.1" + EXAMPLE_VERSION: "0.6.0" RUNNER_VERSION: "0.5.0" VERSION: # if version is not passed we hack the semver by encoding the commit as pre-release sh: echo "${VERSION:-0.0.0-$(git rev-parse --short HEAD)}" From 1d80193c6291ca6198ec198dce21834d65fc3608 Mon Sep 17 00:00:00 2001 From: mirkoCrobu Date: Wed, 3 Dec 2025 08:55:06 +0100 Subject: [PATCH 2/2] update runner version --- Taskfile.yml | 2 +- .../app_bricks/air_quality_monitoring/API.md | 192 +++ .../arduino/app_bricks/arduino_cloud/API.md | 48 + .../app_bricks/audio_classification/API.md | 97 ++ .../app_bricks/camera_code_detection/API.md | 127 ++ .../arduino/app_bricks/cloud_llm/API.md | 120 ++ .../app_bricks/dbstorage_sqlstore/API.md | 202 +++ .../app_bricks/dbstorage_tsstore/API.md | 136 ++ .../app_bricks/image_classification/API.md | 71 + .../app_bricks/keyword_spotting/API.md | 63 + .../arduino/app_bricks/mood_detector/API.md | 36 + .../app_bricks/motion_detection/API.md | 48 + .../api-docs/arduino/app_bricks/mqtt/API.md | 76 ++ .../app_bricks/object_detection/API.md | 85 ++ .../arduino/app_bricks/streamlit_ui/API.md | 38 + .../vibration_anomaly_detection/API.md | 104 ++ .../video_imageclassification/API.md | 109 ++ .../app_bricks/video_objectdetection/API.md | 102 ++ .../visual_anomaly_detection/API.md | 98 ++ .../arduino/app_bricks/wave_generator/API.md | 151 +++ .../app_bricks/weather_forecast/API.md | 121 ++ .../api-docs/arduino/app_bricks/web_ui/API.md | 123 ++ .../arduino/app_peripherals/microphone/API.md | 116 ++ .../arduino/app_peripherals/speaker/API.md | 100 ++ .../arduino/app_peripherals/usb_camera/API.md | 80 ++ .../assets/0.6.0/bricks-list.yaml | 334 +++++ .../audio_classification/brick_compose.yaml | 24 + .../dbstorage_tsstore/brick_compose.yaml | 29 + .../image_classification/brick_compose.yaml | 24 + .../keyword_spotting/brick_compose.yaml | 24 + .../motion_detection/brick_compose.yaml | 24 + .../object_detection/brick_compose.yaml | 24 + .../brick_compose.yaml | 24 + .../brick_compose.yaml | 21 + .../video_object_detection/brick_compose.yaml | 21 + .../brick_compose.yaml | 24 + .../docs/arduino/arduino_cloud/README.md | 44 + .../arduino/audio_classification/README.md | 54 + .../arduino/camera_code_detection/README.md | 45 + .../0.6.0/docs/arduino/cloud_llm/README.md | 109 ++ .../docs/arduino/dbstorage_sqlstore/README.md | 68 + .../docs/arduino/dbstorage_tsstore/README.md | 54 + .../arduino/image_classification/README.md | 43 + .../docs/arduino/keyword_spotting/README.md | 62 + .../docs/arduino/mood_detector/README.md | 38 + .../docs/arduino/motion_detection/README.md | 113 ++ .../docs/arduino/object_detection/README.md | 45 + .../0.6.0/docs/arduino/streamlit_ui/README.md | 34 + .../vibration_anomaly_detection/README.md | 121 ++ .../video_image_classification/README.md | 52 + .../arduino/video_object_detection/README.md | 64 + .../visual_anomaly_detection/README.md | 62 + .../docs/arduino/wave_generator/README.md | 119 ++ .../docs/arduino/weather_forecast/README.md | 51 + .../0.6.0/docs/arduino/web_ui/README.md | 45 + .../arduino/arduino_cloud/1_led_blink.py | 25 + .../2_light_with_colors_monitor.py | 21 + .../3_light_with_colors_command.py | 31 + .../1_glass_breaking_from_mic.py | 13 + .../2_glass_breaking_from_file.py | 10 + .../camera_code_detection/1_detection.py | 22 + .../camera_code_detection/2_detection_list.py | 22 + .../3_detection_with_overrides.py | 24 + .../arduino/cloud_llm/1_simple_prompt.py | 24 + .../cloud_llm/2_streaming_responses.py | 25 + .../examples/arduino/cloud_llm/3_no_memory.py | 24 + .../store_and_read_example.py | 23 + .../arduino/dbstorage_tsstore/1_write_read.py | 21 + .../dbstorage_tsstore/2_read_all_samples.py | 32 + .../image_classification_example.py | 21 + .../arduino/keyword_spotting/1_hello_world.py | 14 + .../object_detection_example.py | 25 + .../object_detection_example.py | 25 + .../arduino/wave_generator/01_basic_tone.py | 34 + .../wave_generator/02_waveform_types.py | 41 + .../wave_generator/03_frequency_sweep.py | 51 + .../wave_generator/04_envelope_control.py | 63 + .../wave_generator/05_external_speaker.py | 77 ++ .../weather_forecast_by_city_example.py | 12 + .../weather_forecast_by_coords_example.py | 11 + .../examples/arduino/web_ui/1_serve_webapp.py | 13 + .../arduino/web_ui/2_serve_webapp_and_api.py | 13 + .../arduino/web_ui/3_connect_disconnect.py | 14 + .../examples/arduino/web_ui/4_on_message.py | 13 + .../examples/arduino/web_ui/5_send_message.py | 13 + .../assets/0.6.0/models-list.yaml | 1185 +++++++++++++++++ .../app_bricks/air_quality_monitoring/API.md | 192 +++ .../arduino/app_bricks/arduino_cloud/API.md | 48 + .../app_bricks/audio_classification/API.md | 97 ++ .../app_bricks/camera_code_detection/API.md | 127 ++ .../arduino/app_bricks/cloud_llm/API.md | 120 ++ .../app_bricks/dbstorage_sqlstore/API.md | 202 +++ .../app_bricks/dbstorage_tsstore/API.md | 136 ++ .../app_bricks/image_classification/API.md | 71 + .../app_bricks/keyword_spotting/API.md | 63 + .../arduino/app_bricks/mood_detector/API.md | 36 + .../app_bricks/motion_detection/API.md | 48 + .../api-docs/arduino/app_bricks/mqtt/API.md | 76 ++ .../app_bricks/object_detection/API.md | 85 ++ .../arduino/app_bricks/streamlit_ui/API.md | 38 + .../vibration_anomaly_detection/API.md | 104 ++ .../video_imageclassification/API.md | 109 ++ .../app_bricks/video_objectdetection/API.md | 102 ++ .../visual_anomaly_detection/API.md | 98 ++ .../arduino/app_bricks/wave_generator/API.md | 151 +++ .../app_bricks/weather_forecast/API.md | 121 ++ .../api-docs/arduino/app_bricks/web_ui/API.md | 123 ++ .../arduino/app_peripherals/microphone/API.md | 116 ++ .../arduino/app_peripherals/speaker/API.md | 100 ++ .../arduino/app_peripherals/usb_camera/API.md | 80 ++ .../testdata/assets/0.6.0/bricks-list.yaml | 334 +++++ .../audio_classification/brick_compose.yaml | 24 + .../dbstorage_tsstore/brick_compose.yaml | 29 + .../image_classification/brick_compose.yaml | 24 + .../keyword_spotting/brick_compose.yaml | 24 + .../motion_detection/brick_compose.yaml | 24 + .../object_detection/brick_compose.yaml | 24 + .../brick_compose.yaml | 24 + .../brick_compose.yaml | 21 + .../video_object_detection/brick_compose.yaml | 21 + .../brick_compose.yaml | 24 + .../docs/arduino/arduino_cloud/README.md | 44 + .../arduino/audio_classification/README.md | 54 + .../arduino/camera_code_detection/README.md | 45 + .../0.6.0/docs/arduino/cloud_llm/README.md | 109 ++ .../docs/arduino/dbstorage_sqlstore/README.md | 68 + .../docs/arduino/dbstorage_tsstore/README.md | 54 + .../arduino/image_classification/README.md | 43 + .../docs/arduino/keyword_spotting/README.md | 62 + .../docs/arduino/mood_detector/README.md | 38 + .../docs/arduino/motion_detection/README.md | 113 ++ .../docs/arduino/object_detection/README.md | 45 + .../0.6.0/docs/arduino/streamlit_ui/README.md | 34 + .../vibration_anomaly_detection/README.md | 121 ++ .../video_image_classification/README.md | 52 + .../arduino/video_object_detection/README.md | 64 + .../visual_anomaly_detection/README.md | 62 + .../docs/arduino/wave_generator/README.md | 119 ++ .../docs/arduino/weather_forecast/README.md | 51 + .../0.6.0/docs/arduino/web_ui/README.md | 45 + .../arduino/arduino_cloud/1_led_blink.py | 25 + .../2_light_with_colors_monitor.py | 21 + .../3_light_with_colors_command.py | 31 + .../1_glass_breaking_from_mic.py | 13 + .../2_glass_breaking_from_file.py | 10 + .../camera_code_detection/1_detection.py | 22 + .../camera_code_detection/2_detection_list.py | 22 + .../3_detection_with_overrides.py | 24 + .../arduino/cloud_llm/1_simple_prompt.py | 24 + .../cloud_llm/2_streaming_responses.py | 25 + .../examples/arduino/cloud_llm/3_no_memory.py | 24 + .../store_and_read_example.py | 23 + .../arduino/dbstorage_tsstore/1_write_read.py | 21 + .../dbstorage_tsstore/2_read_all_samples.py | 32 + .../image_classification_example.py | 21 + .../arduino/keyword_spotting/1_hello_world.py | 14 + .../object_detection_example.py | 25 + .../object_detection_example.py | 25 + .../arduino/wave_generator/01_basic_tone.py | 34 + .../wave_generator/02_waveform_types.py | 41 + .../wave_generator/03_frequency_sweep.py | 51 + .../wave_generator/04_envelope_control.py | 63 + .../wave_generator/05_external_speaker.py | 77 ++ .../weather_forecast_by_city_example.py | 12 + .../weather_forecast_by_coords_example.py | 11 + .../examples/arduino/web_ui/1_serve_webapp.py | 13 + .../arduino/web_ui/2_serve_webapp_and_api.py | 13 + .../arduino/web_ui/3_connect_disconnect.py | 14 + .../examples/arduino/web_ui/4_on_message.py | 13 + .../examples/arduino/web_ui/5_send_message.py | 13 + .../testdata/assets/0.6.0/models-list.yaml | 1185 +++++++++++++++++ 171 files changed, 12363 insertions(+), 1 deletion(-) create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/air_quality_monitoring/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/arduino_cloud/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/audio_classification/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/camera_code_detection/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/cloud_llm/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/dbstorage_sqlstore/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/dbstorage_tsstore/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/image_classification/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/keyword_spotting/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/mood_detector/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/motion_detection/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/mqtt/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/object_detection/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/streamlit_ui/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/vibration_anomaly_detection/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/video_imageclassification/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/video_objectdetection/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/visual_anomaly_detection/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/wave_generator/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/weather_forecast/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/web_ui/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_peripherals/microphone/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_peripherals/speaker/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_peripherals/usb_camera/API.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/bricks-list.yaml create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/audio_classification/brick_compose.yaml create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/dbstorage_tsstore/brick_compose.yaml create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/image_classification/brick_compose.yaml create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/keyword_spotting/brick_compose.yaml create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/motion_detection/brick_compose.yaml create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/object_detection/brick_compose.yaml create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/vibration_anomaly_detection/brick_compose.yaml create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/video_image_classification/brick_compose.yaml create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/video_object_detection/brick_compose.yaml create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/visual_anomaly_detection/brick_compose.yaml create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/arduino_cloud/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/audio_classification/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/camera_code_detection/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/cloud_llm/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/dbstorage_sqlstore/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/dbstorage_tsstore/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/image_classification/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/keyword_spotting/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/mood_detector/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/motion_detection/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/object_detection/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/streamlit_ui/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/vibration_anomaly_detection/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/video_image_classification/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/video_object_detection/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/visual_anomaly_detection/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/wave_generator/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/weather_forecast/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/web_ui/README.md create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/arduino_cloud/1_led_blink.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/arduino_cloud/2_light_with_colors_monitor.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/arduino_cloud/3_light_with_colors_command.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/audio_classification/1_glass_breaking_from_mic.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/audio_classification/2_glass_breaking_from_file.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/camera_code_detection/1_detection.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/camera_code_detection/2_detection_list.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/camera_code_detection/3_detection_with_overrides.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/cloud_llm/1_simple_prompt.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/cloud_llm/2_streaming_responses.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/cloud_llm/3_no_memory.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/dbstorage_sqlstore/store_and_read_example.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/dbstorage_tsstore/1_write_read.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/dbstorage_tsstore/2_read_all_samples.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/image_classification/image_classification_example.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/keyword_spotting/1_hello_world.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/object_detection/object_detection_example.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/visual_anomaly_detection/object_detection_example.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/wave_generator/01_basic_tone.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/wave_generator/02_waveform_types.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/wave_generator/03_frequency_sweep.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/wave_generator/04_envelope_control.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/wave_generator/05_external_speaker.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/weather_forecast/weather_forecast_by_city_example.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/weather_forecast/weather_forecast_by_coords_example.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/web_ui/1_serve_webapp.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/web_ui/2_serve_webapp_and_api.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/web_ui/3_connect_disconnect.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/web_ui/4_on_message.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/examples/arduino/web_ui/5_send_message.py create mode 100644 debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/models-list.yaml create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/air_quality_monitoring/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/arduino_cloud/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/audio_classification/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/camera_code_detection/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/cloud_llm/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/dbstorage_sqlstore/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/dbstorage_tsstore/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/image_classification/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/keyword_spotting/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/mood_detector/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/motion_detection/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/mqtt/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/object_detection/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/streamlit_ui/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/vibration_anomaly_detection/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/video_imageclassification/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/video_objectdetection/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/visual_anomaly_detection/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/wave_generator/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/weather_forecast/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_bricks/web_ui/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_peripherals/microphone/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_peripherals/speaker/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/api-docs/arduino/app_peripherals/usb_camera/API.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/bricks-list.yaml create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/compose/arduino/audio_classification/brick_compose.yaml create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/compose/arduino/dbstorage_tsstore/brick_compose.yaml create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/compose/arduino/image_classification/brick_compose.yaml create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/compose/arduino/keyword_spotting/brick_compose.yaml create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/compose/arduino/motion_detection/brick_compose.yaml create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/compose/arduino/object_detection/brick_compose.yaml create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/compose/arduino/vibration_anomaly_detection/brick_compose.yaml create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/compose/arduino/video_image_classification/brick_compose.yaml create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/compose/arduino/video_object_detection/brick_compose.yaml create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/compose/arduino/visual_anomaly_detection/brick_compose.yaml create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/arduino_cloud/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/audio_classification/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/camera_code_detection/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/cloud_llm/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/dbstorage_sqlstore/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/dbstorage_tsstore/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/image_classification/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/keyword_spotting/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/mood_detector/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/motion_detection/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/object_detection/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/streamlit_ui/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/vibration_anomaly_detection/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/video_image_classification/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/video_object_detection/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/visual_anomaly_detection/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/wave_generator/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/weather_forecast/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/docs/arduino/web_ui/README.md create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/arduino_cloud/1_led_blink.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/arduino_cloud/2_light_with_colors_monitor.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/arduino_cloud/3_light_with_colors_command.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/audio_classification/1_glass_breaking_from_mic.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/audio_classification/2_glass_breaking_from_file.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/camera_code_detection/1_detection.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/camera_code_detection/2_detection_list.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/camera_code_detection/3_detection_with_overrides.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/cloud_llm/1_simple_prompt.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/cloud_llm/2_streaming_responses.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/cloud_llm/3_no_memory.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/dbstorage_sqlstore/store_and_read_example.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/dbstorage_tsstore/1_write_read.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/dbstorage_tsstore/2_read_all_samples.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/image_classification/image_classification_example.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/keyword_spotting/1_hello_world.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/object_detection/object_detection_example.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/visual_anomaly_detection/object_detection_example.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/wave_generator/01_basic_tone.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/wave_generator/02_waveform_types.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/wave_generator/03_frequency_sweep.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/wave_generator/04_envelope_control.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/wave_generator/05_external_speaker.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/weather_forecast/weather_forecast_by_city_example.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/weather_forecast/weather_forecast_by_coords_example.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/web_ui/1_serve_webapp.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/web_ui/2_serve_webapp_and_api.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/web_ui/3_connect_disconnect.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/web_ui/4_on_message.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/examples/arduino/web_ui/5_send_message.py create mode 100644 internal/e2e/daemon/testdata/assets/0.6.0/models-list.yaml diff --git a/Taskfile.yml b/Taskfile.yml index e8dc25db..5d7725b1 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -9,7 +9,7 @@ vars: GOIMPORTS_VERSION: v0.29.0 DPRINT_VERSION: 0.48.0 EXAMPLE_VERSION: "0.6.0" - RUNNER_VERSION: "0.5.0" + RUNNER_VERSION: "0.6.0" VERSION: # if version is not passed we hack the semver by encoding the commit as pre-release sh: echo "${VERSION:-0.0.0-$(git rev-parse --short HEAD)}" diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/air_quality_monitoring/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/air_quality_monitoring/API.md new file mode 100644 index 00000000..2264acdc --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/air_quality_monitoring/API.md @@ -0,0 +1,192 @@ +# air_quality_monitoring API Reference + +## Index + +- Class `AQILevel` +- Class `AirQualityData` +- Class `AirQualityMonitoring` +- Class `AirQualityLookupError` + +--- + +## `AQILevel` dataclass + +```python +class AQILevel() +``` + +Data class to represent AQI levels. + +### Attributes + +- **min_value** (*int*): Minimum AQI value for the level. +- **max_value** (*int*): Maximum AQI value for the level. +- **description** (*str*): Description of the AQI level. +- **color** (*str*): Color associated with the AQI level in hex. + + +--- + +## `AirQualityData` dataclass + +```python +class AirQualityData() +``` + +Data class to represent air quality data. + +### Attributes + +- **city** (*str*): Name of the city. +- **lat** (*float*): Latitude of the city. +- **lon** (*float*): Longitude of the city. +- **url** (*str*): URL for more information about the air quality data. +- **last_update** (*str*): Last update timestamp of the air quality data. +- **aqi** (*int*): Air Quality Index value. +- **dominantpol** (*str*): Dominant pollutant in the air. +- **iaqi** (*dict*): Individual AQI values for various pollutants. + +### Methods + +#### `pandas_dict()` + +Return the data as a dictionary suitable for pandas DataFrame. + + +--- + +## `AirQualityMonitoring` class + +```python +class AirQualityMonitoring(token: str) +``` + +Class to get air quality data from AQICN API. + +### Parameters + +- **token** (*str*): API token for AQICN service. + +### Raises + +- **ValueError**: If the token is not provided. + +### Methods + +#### `get_air_quality_by_city(city: str)` + +Get air quality data by city name. + +##### Parameters + +- **city** (*str*): Name of the city. + +##### Returns + +- (*AirQualityData*): Air quality assembled data. + +##### Raises + +- **AirQualityLookupError**: If the API request fails. + +#### `get_air_quality_by_coords(latitude: float, longitude: float)` + +Get air quality data by coordinates. + +##### Parameters + +- **latitude** (*float*): Latitude. +- **longitude** (*float*): Longitude. + +##### Returns + +- (*AirQualityData*): Air quality assembled data. + +##### Raises + +- **AirQualityLookupError**: If the API request fails. + +#### `get_air_quality_by_ip()` + +Get air quality data by IP address. + +##### Returns + +- (*AirQualityData*): Air quality assembled data. + +##### Raises + +- **AirQualityLookupError**: If the API request fails. + +#### `process(item: dict)` + +Process the input dictionary to get air quality data. + +##### Parameters + +- **item** (*dict*): Input dictionary containing either 'city', 'latitude' and 'longitude', or 'ip'. + +##### Returns + +- (*dict*): Air quality data. + +##### Raises + +- **ValueError**: If the input dictionary is not valid. + +#### `assemble_data(data: dict)` + +Create a payload for the air quality data. + +##### Parameters + +- **data** (*dict*): Air quality data. + +##### Returns + +- (*dict*): Payload with relevant air quality information. + +#### `map_aqi_level(aqi: int)` + +Returns AQILevel class matching provided AQI. + + +--- + +## `AirQualityLookupError` class + +```python +class AirQualityLookupError(message: str, status: str) +``` + +Custom exception for air quality lookup errors. + +### Parameters + +- **message** (*str*): Error message. +- **status** (*str*): Status of the error, defaults to None. + +### Methods + +#### `from_api_response(cls, data: dict)` + +AirQualityLookupError error handling based on response provided by AQI API. + +Documented errors: +- {"status": "error", "data": "Invalid key"} +- {"status": "error", "data": "Unknown station"} +- {"status": "error", "data": "Over quota"} +- {"status": "error", "data": "Invalid query"} +- {"status": "error", "data": "Too Many Requests"} +- {"status": "error", "data": "IP not allowed"} +- {"status": "error", "data": "Unknown error"} +- {"status": "error", "data": {"message": "..."}} + +##### Parameters + +- **data** (*dict*): Response data from the AQI API. + +##### Returns + +- (*AirQualityLookupError*): An instance of AirQualityLookupError with the error message and status. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/arduino_cloud/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/arduino_cloud/API.md new file mode 100644 index 00000000..8023f784 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/arduino_cloud/API.md @@ -0,0 +1,48 @@ +# arduino_cloud API Reference + +## Index + +- Class `ArduinoCloud` + +--- + +## `ArduinoCloud` class + +```python +class ArduinoCloud(device_id: str, secret: str, server: str, port: int) +``` + +Arduino Cloud client for managing devices and data. + +### Parameters + +- **device_id** (*str*): The unique identifier for the device. +If omitted, uses ARDUINO_DEVICE_ID environment variable. +- **secret** (*str*): The password for Arduino Cloud authentication. +If omitted, uses ARDUINO_SECRET environment variable. +- **server** (*str*) (optional): The server address for Arduino Cloud (default: "iot.arduino.cc"). +- **port** (*int*) (optional): The port to connect to the Arduino Cloud server (default: 8884). + +### Raises + +- **ValueError**: If either device_id or secret is not provided explicitly or via environment variable. + +### Methods + +#### `start()` + +Start the Arduino IoT Cloud client. + +#### `loop()` + +Run a single iteration of the Arduino IoT Cloud client loop, processing commands and updating state. + +#### `register(aiotobj: str | Any)` + +Register a variable or object with the Arduino Cloud client. + +##### Parameters + +- **aiotobj** (*str | Any*): The variable name or object from which to derive the variable name to register. +- ****kwargs** (*Any*): Additional keyword arguments for registration. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/audio_classification/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/audio_classification/API.md new file mode 100644 index 00000000..7573beee --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/audio_classification/API.md @@ -0,0 +1,97 @@ +# audio_classification API Reference + +## Index + +- Class `AudioClassificationException` +- Class `AudioClassification` + +--- + +## `AudioClassificationException` class + +```python +class AudioClassificationException() +``` + +Custom exception for AudioClassification errors. + + +--- + +## `AudioClassification` class + +```python +class AudioClassification(mic: Microphone, confidence: float) +``` + +AudioClassification module for detecting sounds and classifying audio using a specified model. + +### Parameters + +- **mic** (*Microphone*) (optional): Microphone instance used as the audio source. If None, a default Microphone will be initialized. +- **confidence** (*float*) (optional): Minimum confidence threshold (0.0–1.0) required +for a detection to be considered valid. Defaults to 0.8 (80%). + +### Raises + +- **ValueError**: If the model information cannot be retrieved, or if model parameters are missing or incomplete. + +### Methods + +#### `on_detect(class_name: str, callback: Callable[[], None])` + +Register a callback function to be invoked when a specific class is detected. + +##### Parameters + +- **class_name** (*str*): The class to check for in the classification results. +Must match one of the classes defined in the loaded model. +- **callback** (*callable*): Function to execute when the class is detected. +The callback must take no arguments and return None. + +##### Raises + +- **TypeError**: If `callback` is not callable. +- **ValueError**: If `callback` accepts any argument. + +#### `start()` + +Start real-time audio classification. + +Begins capturing audio from the configured microphone and +continuously classifies the incoming audio stream until stopped. + +#### `stop()` + +Stop real-time audio classification. + +Terminates audio capture and releases any associated resources. + +#### `classify_from_file(audio_path: str, confidence: float)` + +Classify audio content from a WAV file. + +Supported sample widths: + - 8-bit unsigned + - 16-bit signed + - 24-bit signed + - 32-bit signed + +##### Parameters + +- **audio_path** (*str*): Path to the `.wav` audio file to classify. +- **confidence** (*float*) (optional): Minimum confidence threshold (0.0–1.0) required +for a detection to be considered valid. Defaults to 0.8 (80%). + +##### Returns + +-: dict | None: A dictionary with keys: +- ``class_name`` (str): The detected sound class. +- ``confidence`` (float): Confidence score of the detection. +Returns None if no valid classification is found. + +##### Raises + +- **AudioClassificationException**: If the file cannot be found, read, or processed. +- **ValueError**: If the file uses an unsupported sample width. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/camera_code_detection/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/camera_code_detection/API.md new file mode 100644 index 00000000..b1ebd3de --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/camera_code_detection/API.md @@ -0,0 +1,127 @@ +# camera_code_detection API Reference + +## Index + +- Class `CameraCodeDetection` +- Class `Detection` +- Function `utils.draw_bounding_box` + +--- + +## `CameraCodeDetection` class + +```python +class CameraCodeDetection(camera: USBCamera, detect_qr: bool, detect_barcode: bool) +``` + +Scans a camera video feed for QR codes and/or barcodes. + +### Methods + +#### `start()` + +Start the detector and begin scanning for codes. + +#### `stop()` + +Stop the detector and release resources. + +#### `on_detect(callback: Callable[[Image, list[Detection]], None] | Callable[[Image, Detection], None] | None)` + +Registers or removes a callback to be triggered on code detection. + +When a QR code or barcode is detected in the camera feed, the provided callback function will be invoked. +The callback function should accept the Image frame and a list[Detection] or Detection objects. If the former +is used, it will receive all detections at once. If the latter is used, it will be called once for each +detection. If None is provided, the callback will be removed. + +##### Parameters + +- **callback** (*Callable[[Image, list[Detection]], None]*): A callback that will be called every time a detection +is made with all the detections. +- **callback** (*Callable[[Image, Detection], None]*): A callback that will be called every time a detection is +made with a single detection. +- **callback** (*None*): To unregister the current callback, if any. + +##### Examples + +```python +def on_code_detected(frame: Image, detection: Detection): + print(f"Detected {detection.type} with content: {detection.content}") + # Here you can add your code to process the detected code, + # e.g., draw a bounding box, save it to a database or log it. + +detector.on_detect(on_code_detected) +``` +#### `on_frame(callback: Callable[[Image], None] | None)` + +Registers a callback function to be called when a new camera frame is captured. + +The callback function should accept the Image frame. +If None is provided, the callback is removed. + +##### Parameters + +- **callback** (*Callable[[Image], None]*): A callback that will be called with each captured frame. +- **callback** (*None*): Signals to remove the current callback, if any. + +#### `on_error(callback: Callable[[Exception], None] | None)` + +Registers a callback function to be called when an error occurs in the detector. + +The callback function should accept the exception as an argument. +If None is provided, the callback is removed. + +##### Parameters + +- **callback** (*Callable*): A callback that will be called with the exception raised in the detector. +- **callback** (*None*): Signals to remove the current callback, if any. + +#### `loop()` + +Main loop to capture frames and detect codes. + + +--- + +## `Detection` class + +```python +class Detection(content: str, type: str, coords: np.ndarray) +``` + +This class represents a single QR code or barcode detection result from a video frame. + +This data structure holds the decoded content, the type of code, and its location +in the image as determined by the detection algorithm. + +### Attributes + +- **content** (*str*): The decoded string extracted from the QR code or barcode. +- **type** (*str*): The type of code detected, typically "QRCODE" or "BARCODE". +- **coords** (*np.ndarray*): A NumPy array of shape (4, 2) representing the four corner +points (x, y) of the detected code region in the image. + + +--- + +## `utils.draw_bounding_box` function + +```python +def draw_bounding_box(frame: Image, detection: Detection) +``` + +Draws a bounding box and label on an image for a detected QR code or barcode. + +This function overlays a green polygon around the detected code area and +adds a text label above (or below) the bounding box with the code type and content. + +### Parameters + +- **frame** (*Image*): The PIL Image object to draw on. This image will be modified in-place. +- **detection** (*Detection*): The detection result containing the code's content, type, and corner coordinates. + +### Returns + +- (*Image*): The annotated image with a bounding box and label drawn. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/cloud_llm/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/cloud_llm/API.md new file mode 100644 index 00000000..d2dd5903 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/cloud_llm/API.md @@ -0,0 +1,120 @@ +# cloud_llm API Reference + +## Index + +- Class `CloudLLM` +- Class `CloudModel` + +--- + +## `CloudLLM` class + +```python +class CloudLLM(api_key: str, model: Union[str, CloudModel], system_prompt: str, temperature: Optional[float], timeout: int) +``` + +A Brick for interacting with cloud-based Large Language Models (LLMs). + +This class wraps LangChain functionality to provide a simplified, unified interface +for chatting with models like Claude, GPT, and Gemini. It supports both synchronous +'one-shot' responses and streaming output, with optional conversational memory. + +### Parameters + +- **api_key** (*str*): The API access key for the target LLM service. Defaults to the +'API_KEY' environment variable. +- **model** (*Union[str, CloudModel]*): The model identifier. Accepts a `CloudModel` +enum member (e.g., `CloudModel.OPENAI_GPT`) or its corresponding raw string +value (e.g., `'gpt-4o-mini'`). Defaults to `CloudModel.ANTHROPIC_CLAUDE`. +- **system_prompt** (*str*): A system-level instruction that defines the AI's persona +and constraints (e.g., "You are a helpful assistant"). Defaults to empty. +- **temperature** (*Optional[float]*): The sampling temperature between 0.0 and 1.0. +Higher values make output more random/creative; lower values make it more +deterministic. Defaults to 0.7. +- **timeout** (*int*): The maximum duration in seconds to wait for a response before +timing out. Defaults to 30. + +### Raises + +- **ValueError**: If `api_key` is not provided (empty string). + +### Methods + +#### `with_memory(max_messages: int)` + +Enables conversational memory for this instance. + +Configures the Brick to retain a window of previous messages, allowing the +AI to maintain context across multiple interactions. + +##### Parameters + +- **max_messages** (*int*): The maximum number of messages (user + AI) to keep +in history. Older messages are discarded. Set to 0 to disable memory. +Defaults to 10. + +##### Returns + +- (*CloudLLM*): The current instance, allowing for method chaining. + +#### `chat(message: str)` + +Sends a message to the AI and blocks until the complete response is received. + +This method automatically manages conversation history if memory is enabled. + +##### Parameters + +- **message** (*str*): The input text prompt from the user. + +##### Returns + +- (*str*): The complete text response generated by the AI. + +##### Raises + +- **RuntimeError**: If the internal chain is not initialized or if the API request fails. + +#### `chat_stream(message: str)` + +Sends a message to the AI and yields response tokens as they are generated. + +This allows for processing or displaying the response in real-time (streaming). +The generation can be interrupted by calling `stop_stream()`. + +##### Parameters + +- **message** (*str*): The input text prompt from the user. + +##### Returns + +- (*str*): Chunks of text (tokens) from the AI response. + +##### Raises + +- **RuntimeError**: If the internal chain is not initialized or if the API request fails. +- **AlreadyGenerating**: If a streaming session is already active. + +#### `stop_stream()` + +Signals the active streaming generation to stop. + +This sets an internal flag that causes the `chat_stream` iterator to break +early. It has no effect if no stream is currently running. + +#### `clear_memory()` + +Clears the conversational memory history. + +Resets the stored context. This is useful for starting a new conversation +topic without previous context interfering. Only applies if memory is enabled. + + +--- + +## `CloudModel` class + +```python +class CloudModel() +``` + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/dbstorage_sqlstore/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/dbstorage_sqlstore/API.md new file mode 100644 index 00000000..7ba1dc29 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/dbstorage_sqlstore/API.md @@ -0,0 +1,202 @@ +# dbstorage_sqlstore API Reference + +## Index + +- Class `DBStorageSQLStoreError` +- Class `SQLStore` + +--- + +## `DBStorageSQLStoreError` class + +```python +class DBStorageSQLStoreError() +``` + +Exception raised for SQLite database operations errors. + +This exception is raised when database operations fail, such as connection +errors, SQL syntax errors, constraint violations, or table access issues. + + +--- + +## `SQLStore` class + +```python +class SQLStore(database_name: str) +``` + +SQLStore client for storing and retrieving data in a SQLite database. + +This class provides methods to create tables, insert, read, update, and delete records, +and execute raw SQL commands. It uses SQLite as the underlying database engine and +supports named access to columns using sqlite3.Row as the row factory. +It is designed to be thread-safe and can be used in multi-threaded applications. + +### Parameters + +- **database_name** (*str*) (optional): Name of the SQLite database file. +Defaults to "arduino.db". + +### Methods + +#### `start()` + +Open the SQLite database connection. + +This method establishes the database connection and should be called before +performing any database operations. The connection is thread-safe and enables +named column access using sqlite3.Row factory. + +##### Raises + +- **DBStorageSQLStoreError**: If there is an error starting the SQLite connection. + +#### `stop()` + +Close the SQLite database connection. + +##### Raises + +- **DBStorageSQLStoreError**: If there is an error stopping the SQLite connection. + +#### `create_table(table: str, columns: dict[str, str])` + +Create a table in the SQLite database if it does not already exist. + +##### Parameters + +- **table** (*str*): Name of the table to create. +- **columns** (*dict[str, str]*): Dictionary mapping column names to SQL types. +Common types: "INTEGER", "REAL", "TEXT", "BLOB", "INTEGER PRIMARY KEY". + +##### Raises + +- **DBStorageSQLStoreError**: If there is an error creating the table. + +#### `drop_table(table: str)` + +Remove a table and all its data from the database. This permanently deletes the table and all its data. + +##### Parameters + +- **table** (*str*): Name of the table to drop. + +##### Raises + +- **DBStorageSQLStoreError**: If there is an error dropping the table. + +#### `store(table: str, data: dict[str, Any], create_table: bool)` + +Store data in the specified table with automatic table creation. By default, it creates the table if it doesn't exist. + +##### Parameters + +- **table** (*str*): Name of the table to store the record in. +- **data** (*dict[str, Any]*): Dictionary of column names and their values. +Supported types: int (INTEGER), float (REAL), str (TEXT), bytes (BLOB). +- **create_table** (*bool*) (optional): If True, create the table if it doesn't exist +using automatic type inference. Defaults to True. + +##### Raises + +- **DBStorageSQLStoreError**: If there is an error inserting data or creating the table. + +#### `read(table: str, columns: Optional[list], condition: Optional[str], order_by: Optional[str], limit: Optional[int])` + +Get data from the specified table with flexible filtering options. If the table does not exist, it returns an empty list. + +##### Parameters + +- **table** (*str*): Name of the table to read from. +- **columns** (*Optional[list]*) (optional): List of column names to select. +If None, selects all columns. Defaults to None. +- **condition** (*Optional[str]*) (optional): WHERE clause for filtering results +(e.g., "age > 18"). Defaults to None. +- **order_by** (*Optional[str]*) (optional): ORDER BY clause for sorting results +(e.g., "name ASC"). Defaults to None. +- **limit** (*Optional[int]*) (optional): Maximum number of rows to return. +Use -1 for no limit. Defaults to -1. + +##### Returns + +- (*list[dict[str, Any]]*): List of dictionaries representing the rows, where each +dictionary maps column names to their values. Empty list if table doesn't exist. + +##### Raises + +- **DBStorageSQLStoreError**: If there is an error reading data from the table. + +#### `update(table: str, data: dict[str, Any], condition: Optional[str])` + +Update data or records in the specified table. + +##### Parameters + +- **table** (*str*): Name of the table to update. +- **data** (*dict[str, Any]*): Dictionary of column names and their new values. +- **condition** (*Optional[str]*) (optional): WHERE clause for filtering which records +to update (e.g., "id = 1"). If empty, updates all records. Defaults to "". + +##### Raises + +- **DBStorageSQLStoreError**: If there is error updating data in the table. + +#### `delete(table: str, condition: Optional[str])` + +Delete data from the specified table. If no condition is provided, this will delete ALL records from the table. + +##### Parameters + +- **table** (*str*): Name of the table to delete from. +- **condition** (*Optional[str]*) (optional): WHERE clause for filtering which records +to delete (e.g., "age < 18"). If empty, deletes all records. Defaults to "". + +##### Raises + +- **DBStorageSQLStoreError**: If there is an error deleting data from the table. + +#### `execute_sql(sql: str, args: Optional[tuple])` + +Execute a raw SQL command. + +##### Parameters + +- **sql** (*str*): The SQL command to execute. +- **args** (*Optional[tuple]*): Optional parameters for the SQL command. + +##### Returns + +-: list[dict[str, Any]] | None: A list of dictionaries representing the rows returned by the SQL command, +or None if the command does not return any rows. + +##### Raises + +- **DBStorageSQLStoreError**: If there is an error executing the SQL command. + +#### `create_or_replace_table(table: str, columns: dict[str, str], force_drop_table: bool)` + +Create or update a table in the SQLite database to match the provided schema. + +All schema changes (adding/removing/changing columns) are performed within a single transaction. +If any error occurs during the operation due to SQLite limitations or constraints, the transaction is rolled back +, and the table remains unchanged. If force_drop_table is True, after rollback, the table is dropped and recreated. + +If the table exists, it will add missing columns and remove extra columns unless they are not-simple columns. +(e.g., primary key, unique, indexed, or used in constraints/triggers/views). + +If a column's type has changed or if a column is not simple, it will raise an error unless +force_drop_table is True, in which case the table is dropped and recreated with the new schema, losing all +existing data in the table. + +##### Parameters + +- **table** (*str*): Name of the table to create or update. +- **columns** (*dict[str, str]*): Dictionary of column names and their SQL types. +- **force_drop_table** (*bool*): If True, always drop and recreate the table if schema change fails. + +##### Raises + +- **DBStorageSQLStoreError**: If there is an error creating or updating the table. All changes are rolled back. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/dbstorage_tsstore/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/dbstorage_tsstore/API.md new file mode 100644 index 00000000..de69bac8 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/dbstorage_tsstore/API.md @@ -0,0 +1,136 @@ +# dbstorage_tsstore API Reference + +## Index + +- Class `TimeSeriesStoreError` +- Class `TimeSeriesStore` + +--- + +## `TimeSeriesStoreError` class + +```python +class TimeSeriesStoreError() +``` + +Custom exception raised for TimeSeriesStore database operation errors. + + +--- + +## `TimeSeriesStore` class + +```python +class TimeSeriesStore(host: str, port: int, retention_days: int) +``` + +Time series database handler for storing and retrieving data using InfluxDB. + +This class extends the base InfluxDB handler and provides methods for writing samples to the database. +It allows writing and reading individual measurements with their values and timestamps. + +### Parameters + +- **host** (*str*) (optional): The hostname of the InfluxDB server. +Defaults to "dbstorage-influx". +- **port** (*int*) (optional): The port number of the InfluxDB server. +Defaults to 8086. +- **retention_days** (*int*) (optional): The number of days to retain data in the +InfluxDB bucket. Defaults to 7. + +### Methods + +#### `write_sample(measure: str, value: Any, ts: int, measurement_name: str)` + +Write a time series sample to the InfluxDB database. + +Stores a single data point with the specified measurement field, value, and timestamp. +If no timestamp is provided, the current time is used automatically. + +##### Parameters + +- **measure** (*str*): The name of the measurement field (e.g., "temperature", "humidity"). +This acts as the column name for the data point. +- **value** (*Any*): The numeric or string value to store. Supports int, float, str, and bool types. +- **ts** (*int*) (optional): The timestamp in milliseconds since epoch. +Defaults to 0 (current time). +- **measurement_name** (*str*) (optional): The measurement container name that groups +related fields together. Defaults to "arduino". + +##### Raises + +- **TimeSeriesStoreError**: If there is an error writing the sample to the InfluxDB database, +such as connection failures or invalid data types. + +#### `read_last_sample(measure: str, measurement_name: str, start_from: str)` + +Read the last sample of a specific measurement from the InfluxDB database. + +Retrieves the latest data point for the specified measurement field within +the given time range. + +##### Parameters + +- **measure** (*str*): The name of the measurement field to query (e.g., "temperature"). +- **measurement_name** (*str*) (optional): The measurement container name to search within. +Defaults to "arduino". +- **start_from** (*str*) (optional): The time range to search within. Supports relative +periods like "-1d" (1 day), "-2h" (2 hours), "-30m" (30 minutes) or +RFC3339 timestamps like "2024-01-01T00:00:00Z". Defaults to "-1d". + +##### Returns + +-: tuple | None: A tuple containing (field_name, timestamp_iso, value) where: +- field_name (str): The measurement field name +- timestamp_iso (str): ISO format timestamp string +- value (Any): The stored value +Returns None if no data is found in the specified time range. + +##### Raises + +- **TimeSeriesStoreError**: If the start_from value is invalid or if there is an error querying the InfluxDB database. + +#### `read_samples(measure: str, measurement_name: str, start_from: str, end_to: str, aggr_window: str, aggr_func: str, limit: int, order: str)` + +Read all samples of a specific measurement from the InfluxDB database. + +Retrieves multiple data points for the specified measurement field with support +for time range filtering, data aggregation, and result ordering. + +##### Parameters + +- **measure** (*str*): The name of the measurement field to query (e.g., "temperature"). +- **measurement_name** (*str*) (optional): The measurement container name to search within. +Defaults to "arduino". +- **start_from** (*str*) (optional): The start time for the query range. Supports relative +periods ("-7d", "-1h") or RFC3339 timestamps. Defaults to "-1d". +- **end_to** (*str*) (optional): The end time for the query range. Supports same formats +as start_from or "now()". Defaults to None (current time). +- **aggr_window** (*str*) (optional): Time window for data aggregation (e.g., "1h" for hourly, +"30m" for 30-minute intervals). Must be used with aggr_func. Defaults to None. +- **aggr_func** (*str*) (optional): Aggregation function to apply within each window. +Supported values: "mean", "max", "min", "sum". Must be used with aggr_window. +Defaults to None. +- **limit** (*int*) (optional): Maximum number of samples to return. Must be positive. +Defaults to 1000. +- **order** (*str*) (optional): Sort order for results by timestamp. Must be "asc" +(ascending, oldest first) or "desc" (descending, newest first). Defaults to "asc". + +##### Returns + +- (*list*): List of tuples, each containing (field_name, timestamp_iso, value) where: +- field_name (str): The measurement field name +- timestamp_iso (str): ISO format timestamp string +- value (Any): The stored or aggregated value +Empty list if no data found in the specified range. + +##### Raises + +- **TimeSeriesStoreError**: If any parameter is invalid, such as: +- Invalid time format in start_from or end_to +- Invalid order value (not "asc" or "desc") +- Invalid limit value (not positive integer) +- Invalid aggregation function +- Mismatched aggr_window and aggr_func (one specified without the other) +- Database query errors + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/image_classification/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/image_classification/API.md new file mode 100644 index 00000000..04f1b5a9 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/image_classification/API.md @@ -0,0 +1,71 @@ +# image_classification API Reference + +## Index + +- Class `ImageClassification` + +--- + +## `ImageClassification` class + +```python +class ImageClassification(confidence: float) +``` + +Module for image analysis and content classification using machine learning. + +This module processes an input image and returns: +- Corresponding class labels +- Confidence scores for each classification + +### Parameters + +- **confidence** (*float*) (optional): Minimum confidence threshold for +classification results. Defaults to 0.3. + +### Methods + +#### `classify_from_file(image_path: str, confidence: float)` + +Process a local image file to be classified. + +##### Parameters + +- **image_path** (*str*): Path to the image file on the local file system. +- **confidence** (*float*): Minimum confidence threshold for classification results. Default is None (use module defaults). + +##### Returns + +- (*dict*): Classification results containing class names and confidence, or None if an error occurs. + +#### `classify(image_bytes, image_type: str, confidence: float)` + +Process an in-memory image to be classified. + +##### Parameters + +- **image_bytes**: Can be raw bytes (e.g., from a file or stream) or a preloaded PIL image. +- **image_type** (*str*), default='jpg': The image format ('jpg', 'jpeg', or 'png'). Required if using raw bytes. Defaults to 'jpg'. +- **confidence** (*float*): Minimum confidence threshold for classification results. Default is None (use module defaults). + +##### Returns + +- (*dict*): Classification results containing class names and confidence, or None if an error occurs. + +#### `process(item)` + +Process an item to classify objects in an image. + +This method supports two input formats: +- A string path to a local image file. +- A dictionary containing raw image bytes under the 'image' key, and optionally an 'image_type' key (e.g., 'jpg', 'png'). + +##### Parameters + +- **item**: A file path (str) or a dictionary with the 'image' and 'image_type' keys (dict). +'image_type' is optional while 'image' contains image as bytes. + +##### Returns + +- (*dict*): Classification results or None if an error occurs. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/keyword_spotting/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/keyword_spotting/API.md new file mode 100644 index 00000000..a36fc0bd --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/keyword_spotting/API.md @@ -0,0 +1,63 @@ +# keyword_spotting API Reference + +## Index + +- Class `KeywordSpotting` + +--- + +## `KeywordSpotting` class + +```python +class KeywordSpotting(mic: Microphone, confidence: float, debounce_sec: float) +``` + +KeywordSpotting module for classifying audio data to detect keywords using a specified model. + +Processes continuous audio input to classify and detect specific keywords or phrases +using pre-trained models. Supports both framework-provided models and custom models +trained on Edge Impulse platform. + +### Parameters + +- **mic** (*Microphone*) (optional): Microphone instance for audio input. +If None, a default Microphone will be initialized. +- **confidence** (*float*) (optional): Confidence level for detection between 0.0 and 1.0. +Defaults to 0.8 (80%). Higher values reduce false positives. +- **debounce_sec** (*float*) (optional): Minimum seconds between repeated detections +of the same keyword. Defaults to 2.0 seconds. + +### Raises + +- **ValueError**: If the model information cannot be retrieved or if the model parameters are incomplete. + +### Methods + +#### `on_detect(keyword: str, callback: Callable[[], None])` + +Register a callback function to be invoked when a specific keyword is detected. + +##### Parameters + +- **keyword** (*str*): The keyword to check for in the classification results. +Must match the keyword as defined in the model. +- **callback** (*Callable[[], None]*): Callback function to run when the keyword is detected. +Must take no parameters and return None. + +##### Raises + +- **TypeError**: If callback is not callable. +- **ValueError**: If callback accepts any argument. + +#### `start()` + +Start the KeywordSpotter module and begin processing audio data. + +Begins continuous audio stream processing and keyword detection. + +#### `stop()` + +Stop the KeywordSpotter module and release resources. + +Stops audio processing and releases microphone and model resources. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/mood_detector/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/mood_detector/API.md new file mode 100644 index 00000000..f9e47fb2 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/mood_detector/API.md @@ -0,0 +1,36 @@ +# mood_detector API Reference + +## Index + +- Class `MoodDetector` + +--- + +## `MoodDetector` class + +```python +class MoodDetector() +``` + +A class to detect mood based on text sentiment analysis. It can classify text as **positive**, **negative**, or **neutral**. + +Notes: +- Case-insensitive; basic punctuation does not affect results. +- English-only. Non-English or mixed-language input may be treated as neutral. +- Empty or whitespace-only input typically returns neutral. +- Input must be plain text (str). + +### Methods + +#### `get_sentiment(text: str)` + +Analyze the sentiment of the provided text and return the mood. + +##### Parameters + +- **text** (*str*): The input text to analyze. + +##### Returns + +- (*str*): The mood of the text — one of `positive`, `negative`, or `neutral`. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/motion_detection/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/motion_detection/API.md new file mode 100644 index 00000000..08c380f6 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/motion_detection/API.md @@ -0,0 +1,48 @@ +# motion_detection API Reference + +## Index + +- Class `MotionDetection` + +--- + +## `MotionDetection` class + +```python +class MotionDetection(confidence: float) +``` + +This Motion Detection module classifies motion patterns using accelerometer data. + +### Parameters + +- **confidence** (*float*): Confidence level for detection. Default is 0.4 (40%). + +### Methods + +#### `on_movement_detection(movement: str, callback: callable)` + +Register a callback function to be invoked when a specific motion pattern is detected. + +##### Parameters + +- **movement** (*str*): The motion pattern name to check for in the classification results. +- **callback** (*callable*): Function to call when the specified motion pattern is detected. + +#### `accumulate_samples(accelerometer_samples: Tuple[float, float, float])` + +Accumulate accelerometer samples for motion detection. + +##### Parameters + +- **accelerometer_samples** (*tuple*): A tuple containing x, y, z acceleration values. Typically, these values are +in m/s^2, but depends on the model configuration. + +#### `get_sensor_samples()` + +Get the current sensor samples. + +##### Returns + +- (*iterable*): An iterable containing the accumulated sensor data (x, y, z acceleration values). + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/mqtt/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/mqtt/API.md new file mode 100644 index 00000000..865abbb9 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/mqtt/API.md @@ -0,0 +1,76 @@ +# mqtt API Reference + +## Index + +- Class `MQTT` + +--- + +## `MQTT` class + +```python +class MQTT(broker_address: str, broker_port: int, username: Optional[str], password: Optional[str], topics: List[str], client_id: str) +``` + +MQTT class for publishing and subscribing to MQTT topics. + +### Parameters + +- **broker_address** (*str*): The address of the MQTT broker. +- **broker_port** (*int*): The port of the MQTT broker. +- **username** (*str*), default=None: The username for MQTT authentication. Defaults to None. +- **password** (*str*), default=None: The password for MQTT authentication. Defaults to None. +- **topics** (*List[str]*) (optional), default=None: List of topics to subscribe to upon connection. Defaults to None. +- **client_id** (*str*) (optional), default=None: A unique client ID for the MQTT client. If None or empty, a random ID will be generated. Defaults to None. + +### Methods + +#### `start()` + +Start the MQTT client and connect to the broker. + +#### `stop()` + +Stop the MQTT client and disconnect from the broker. + +#### `publish(topic: str, message: str | dict)` + +Publish a message to the MQTT topic. + +##### Parameters + +- **topic** (*str*): The topic to publish the message to. +- **message** (*str|dict*): The message to publish. Can be a string or a dictionary. + +##### Raises + +- **ValueError**: If the topic is an empty string. +- **RuntimeError**: If the publish operation fails. + +#### `subscribe(topic: str)` + +Subscribe to a specified MQTT topic. + +##### Parameters + +- **topic** (*str*): The topic to subscribe to. + +##### Raises + +- **ValueError**: If the topic is an empty string. +- **RuntimeError**: If the subscription fails. + +#### `on_message(topic: str, fn: Callable[[mqtt.Client, object, mqtt.MQTTMessage], None])` + +Set the callback function for handling incoming messages on a specific topic. + +##### Parameters + +- **topic** (*str*): The topic to set the callback for. +- **fn** (*Callable[[mqtt.Client, object, mqtt.MQTTMessage], None]*): The callback function to handle incoming messages. + +##### Raises + +- **ValueError**: If the topic is an empty string or if fn is not callable. +- **RuntimeError**: If setting the callback fails. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/object_detection/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/object_detection/API.md new file mode 100644 index 00000000..bf58415c --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/object_detection/API.md @@ -0,0 +1,85 @@ +# object_detection API Reference + +## Index + +- Class `ObjectDetection` + +--- + +## `ObjectDetection` class + +```python +class ObjectDetection(confidence: float) +``` + +Module for object detection in images using a specified machine learning model. + +This module processes an input image and returns: +- Bounding boxes for detected objects +- Corresponding class labels +- Confidence scores for each detection + +### Parameters + +- **confidence** (*float*): Minimum confidence threshold for detections. Default is 0.3 (30%). + +### Raises + +- **ValueError**: If model information cannot be retrieved. + +### Methods + +#### `detect_from_file(image_path: str, confidence: float)` + +Process a local image file to detect and identify objects. + +##### Parameters + +- **image_path**: Path to the image file on the local file system. +- **confidence**: Minimum confidence threshold for detections. Default is None (use module defaults). + +##### Returns + +- (*dict*): Detection results containing class names, confidence, and bounding boxes. + +#### `detect(image_bytes, image_type: str, confidence: float)` + +Process an in-memory image to detect and identify objects. + +##### Parameters + +- **image_bytes**: Can be raw bytes (e.g., from a file or stream) or a preloaded PIL image. +- **image_type**, default='jpg': The image format ('jpg', 'jpeg', or 'png'). Required if using raw bytes. Defaults to 'jpg'. +- **confidence**: Minimum confidence threshold for detections. Default is None (use module defaults). + +##### Returns + +- (*dict*): Detection results containing class names, confidence, and bounding boxes. + +#### `draw_bounding_boxes(image: Image.Image | bytes, detections: dict)` + +Draw bounding boxes on an image enclosing detected objects using PIL. + +##### Parameters + +- **image**: The input image to annotate. Can be a PIL Image object or raw image bytes. +- **detections**: Detection results containing object labels and bounding boxes. + +##### Returns + +-: Image with bounding boxes and key points drawn. +None if input image or detections are invalid. + +#### `process(item)` + +Process an item to detect objects in an image. + +This method supports two input formats: +- A string path to a local image file. +- A dictionary containing raw image bytes under the 'image' key, and optionally an 'image_type' key (e.g., 'jpg', 'png'). + +##### Parameters + +- **item**: A file path (str) or a dictionary with the 'image' and 'image_type' keys (dict). +'image_type' is optional while 'image' contains image as bytes. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/streamlit_ui/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/streamlit_ui/API.md new file mode 100644 index 00000000..b5c7942e --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/streamlit_ui/API.md @@ -0,0 +1,38 @@ +# streamlit_ui API Reference + +## Index + +- Function `addons.arduino_header` + +--- + +## `addons.arduino_header` function + +```python +def arduino_header(title: str) +``` + +Arduino custom header. + +Render a minimal Arduino header: left-aligned title, right-aligned logo SVG, styled. SVG logo loaded by file. + +--- + +Streamlit UI Brick + +This module forwards the full [Streamlit](https://streamlit.io) API. + +For detailed usage of Streamlit components such as buttons, sliders, charts, and layouts, refer to the official Streamlit documentation: +https://docs.streamlit.io/develop/api-reference + +You can import this brick as: + + from arduino.app_bricks.streamlit_ui import st + +Then use it just like native Streamlit: + + st.title("My App") + st.button("Click me") + +Additionally, custom components like `st.arduino_header()` are provided to streamline Arduino integration. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/vibration_anomaly_detection/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/vibration_anomaly_detection/API.md new file mode 100644 index 00000000..1f6a7f40 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/vibration_anomaly_detection/API.md @@ -0,0 +1,104 @@ +# vibration_anomaly_detection API Reference + +## Index + +- Class `VibrationAnomalyDetection` + +--- + +## `VibrationAnomalyDetection` class + +```python +class VibrationAnomalyDetection(anomaly_detection_threshold: float) +``` + +Detect vibration anomalies from accelerometer time-series using a pre-trained + +Edge Impulse model. + +This Brick buffers incoming samples into a sliding window sized to the model’s +`input_features_count`, runs inference when a full window is available, extracts +the **anomaly score**, and (optionally) invokes a user-registered callback when +the score crosses a configurable threshold. + +Notes: + - Requires an active Edge Impulse runner; model info is fetched at init. + - The window size equals the model’s `input_features_count`; samples pushed + via `accumulate_samples()` are flattened before inference. + - The expected **units, axis order, and sampling rate** must match those + used during model training (e.g., m/s² vs g, [ax, ay, az], 100 Hz). + - A single callback is supported at a time (thread-safe registration). + +### Parameters + +- **anomaly_detection_threshold** (*float*): Threshold applied to the model’s +anomaly score to decide whether to trigger the registered callback. +Typical starting point is 1.0; tune based on your dataset. + +### Raises + +- **ValueError**: If the Edge Impulse runner is unreachable, or if the model +info is missing/invalid (e.g., non-positive `frequency` or +`input_features_count`). + +### Methods + +#### `accumulate_samples(sensor_samples: Iterable[float])` + +Append one or more accelerometer samples to the sliding window buffer. + +##### Parameters + +- **sensor_samples** (*Iterable[float]*): A sequence of numeric values. This can +be a single 3-axis sample `(ax, ay, az)`, multiple concatenated +triples, or any iterable whose flattened length contributes toward +the model’s `input_features_count`. + +##### Raises + +- **ValueError**: If `sensor_samples` is empty or None. + +#### `on_anomaly(callback: callable)` + +Register a handler to be invoked when an anomaly is detected. + +The callback signature can be one of: + - `callback()` + - `callback(anomaly_score: float)` + - `callback(anomaly_score: float, classification: dict)` + +##### Parameters + +- **callback** (*callable*): Function to invoke when `anomaly_score >= threshold`. +If a signature with `classification` is used and the model returns +an auxiliary classification head, a dict with label scores is passed. + +#### `loop()` + +Non-blocking processing step; run this periodically. + +Behavior: + - Pulls a full window from the buffer (if available). + - Runs inference via `infer_from_features(...)`. + - Extracts the anomaly score and, if `>= threshold`, invokes the + registered callback (respecting its signature). + +##### Raises + +- **StopIteration**: Propagated if an internal shutdown condition is signaled. + +#### `start()` + +Prepare the detector for a new session. + +Notes: + - Flushes the internal buffer so the next window starts clean. + - Call before beginning to stream new samples. + +#### `stop()` + +Stop the detector and release transient resources. + +Notes: + - Clears the internal buffer; does not alter the registered callback. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/video_imageclassification/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/video_imageclassification/API.md new file mode 100644 index 00000000..b3c4c6d4 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/video_imageclassification/API.md @@ -0,0 +1,109 @@ +# video_imageclassification API Reference + +## Index + +- Class `VideoImageClassification` + +--- + +## `VideoImageClassification` class + +```python +class VideoImageClassification(confidence: float, debounce_sec: float) +``` + +Module for image classification on a **live video stream** using a specified machine learning model. + +Provides a way to react to detected classes over a video stream invoking registered actions in real-time. + +### Parameters + +- **confidence** (*float*): The minimum confidence level for a classification to be considered valid. Default is 0.3. +- **debounce_sec** (*float*): The minimum time in seconds between consecutive detections of the same object +to avoid multiple triggers. Default is 0 seconds. + +### Raises + +- **RuntimeError**: If the host address could not be resolved. + +### Methods + +#### `on_detect_all(callback: Callable[[dict], None])` + +Register a callback invoked for **every classification event**. + +This callback is useful if you want to process all classified labels in a single +place, or be notified about any classification regardless of its type. + +##### Parameters + +- **callback** (*Callable[[dict], None]*): A function that accepts **exactly one argument**: a dictionary of +classifications above the confidence threshold, in the form +``{"label": confidence, ...}``. + +##### Raises + +- **TypeError**: If `callback` is not a function. +- **ValueError**: If `callback` does not accept exactly one argument. + +#### `on_detect(object: str, callback: Callable[[], None])` + +Register a callback invoked when a **specific label** is classified. + +The callback is triggered whenever the given label appears in the classification +results and passes the confidence and debounce filters. + +##### Parameters + +- **object** (*str*): The label to listen for (e.g., ``"dog"``). +- **callback** (*Callable[[], None]*): A function with **no parameters** that will be executed when the +label is detected. + +##### Raises + +- **TypeError**: If `callback` is not a function. +- **ValueError**: If `callback` accepts one or more parameters. + +#### `start()` + +Start the classification stream. + +This only sets the internal running flag. You must call +`execute` in a loop or a separate thread to actually begin receiving classification results. + +#### `stop()` + +Stop the classification stream and release resources. + +This clears the running flag. Any active `execute` loop +will exit gracefully at its next iteration. + +#### `execute()` + +Run the main classification loop. + +Behavior: + - Opens a WebSocket connection to the model runner. + - Receives classification messages in real time. + - Filters classifications below the confidence threshold. + - Applies debounce rules before invoking callbacks. + - Retries on transient connection errors until stopped. + +##### Raises + +- **ConnectionClosedOK**: Raised to exit when the server closes the connection cleanly. +- **ConnectionClosedError, TimeoutError, ConnectionRefusedError**: Logged and retried with backoff. + +#### `override_threshold(value: float)` + +Override the threshold for image classification model. + +##### Parameters + +- **value** (*float*): The new value for the threshold. + +##### Raises + +- **TypeError**: If the value is not a number. +- **RuntimeError**: If the model information is not available or does not support threshold override. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/video_objectdetection/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/video_objectdetection/API.md new file mode 100644 index 00000000..97d3f9fa --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/video_objectdetection/API.md @@ -0,0 +1,102 @@ +# video_objectdetection API Reference + +## Index + +- Class `VideoObjectDetection` + +--- + +## `VideoObjectDetection` class + +```python +class VideoObjectDetection(confidence: float, debounce_sec: float) +``` + +Module for object detection on a **live video stream** using a specified machine learning model. + +This brick: + - Connects to a model runner over WebSocket. + - Parses incoming classification messages with bounding boxes. + - Filters detections by a configurable confidence threshold. + - Debounces repeated triggers of the same label. + - Invokes per-label callbacks and/or a catch-all callback. + +### Parameters + +- **confidence** (*float*): Confidence level for detection. Default is 0.3 (30%). +- **debounce_sec** (*float*): Minimum seconds between repeated detections of the same object. Default is 0 seconds. + +### Raises + +- **RuntimeError**: If the host address could not be resolved. + +### Methods + +#### `on_detect(object: str, callback: Callable[[], None])` + +Register a callback invoked when a **specific label** is detected. + +##### Parameters + +- **object** (*str*): The label of the object to check for in the classification results. +- **callback** (*Callable[[], None]*): A function with **no parameters**. + +##### Raises + +- **TypeError**: If `callback` is not a function. +- **ValueError**: If `callback` accepts any parameters. + +#### `on_detect_all(callback: Callable[[dict], None])` + +Register a callback invoked for **every detection event**. + +This is useful to receive a consolidated dictionary of detections for each frame. + +##### Parameters + +- **callback** (*Callable[[dict], None]*): A function that accepts **one dict argument** with +the shape `{label: confidence, ...}`. + +##### Raises + +- **TypeError**: If `callback` is not a function. +- **ValueError**: If `callback` does not accept exactly one argument. + +#### `start()` + +Start the video object detection process. + +#### `stop()` + +Stop the video object detection process. + +#### `execute()` + +Connect to the model runner and process messages until `stop` is called. + +Behavior: + - Establishes a WebSocket connection to the runner. + - Parses ``"hello"`` messages to capture model metadata and optionally + performs a threshold override to align the runner with the local setting. + - Parses ``"classification"`` messages, filters detections by confidence, + applies debounce, then invokes registered callbacks. + - Retries on transient WebSocket errors while running. + +##### Raises + +- **ConnectionClosedOK**: Propagated to exit cleanly when the server closes the connection. +- **ConnectionClosedError, TimeoutError, ConnectionRefusedError**: Logged and retried with a short backoff while running. + +#### `override_threshold(value: float)` + +Override the threshold for object detection model. + +##### Parameters + +- **value** (*float*): The new value for the threshold in the range [0.0, 1.0]. + +##### Raises + +- **TypeError**: If the value is not a number. +- **RuntimeError**: If the model information is not available or does not support threshold override. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/visual_anomaly_detection/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/visual_anomaly_detection/API.md new file mode 100644 index 00000000..f931093b --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/visual_anomaly_detection/API.md @@ -0,0 +1,98 @@ +# visual_anomaly_detection API Reference + +## Index + +- Class `VisualAnomalyDetection` + +--- + +## `VisualAnomalyDetection` class + +```python +class VisualAnomalyDetection() +``` + +Module for detecting **visual anomalies** in images using a specified model. + +This module processes an input image and returns: +- Global anomaly metrics (`anomaly_max_score`, `anomaly_mean_score`), when available. +- A list of localized anomaly detections with label, score, and bounding boxes. + +Notes: + - Bounding boxes are returned as `[x_min, y_min, x_max, y_max]` (float). + - Methods return `None` when input is invalid or when the model output + does not contain expected anomaly fields. + +### Methods + +#### `detect_from_file(image_path: str)` + +Process a local image file to detect anomalies. + +##### Parameters + +- **image_path** (*str*): Path to the image file on the local file system. + +##### Returns + +-: dict | None: A dictionary with anomaly information, or `None` on error. + Example successful payload: + { + "anomaly_max_score": , # optional, if provided by model + "anomaly_mean_score": , # optional, if provided by model + "detection": [ + { + "class_name": , + "score": , # anomaly score for this region + "bounding_box_xyxy": [x1, y1, x2, y2] + }, + ... + ] + } + +- Returns `None` if `image_path` is falsy or if the inference result + does not include anomaly data. + +#### `detect(image_bytes, image_type: str)` + +Process an in-memory image to detect anomalies. + +##### Parameters + +- **image_bytes**: Raw image bytes (e.g., from a file or camera) or a PIL Image. +- **image_type** (*str*): Image format ('jpg', 'jpeg', 'png'). Required when passing raw bytes. +Defaults to 'jpg'. + +##### Returns + +-: dict | None: A dictionary with anomaly information, or `None` on error. + See `detect_from_file` for the response schema. + +- Returns `None` if `image_bytes` or `image_type` is missing/invalid. + +#### `process(item)` + +Process an item to detect anomalies (file path or in-memory image). + +This method supports two input formats: +- A string path to a local image file. +- A dictionary containing raw image bytes under the `'image'` key, and + optionally an `'image_type'` key (e.g., `'jpg'`, `'png'`). + +##### Parameters + +- **item** (*str | dict*): File path or a dict with `'image'` (bytes/PIL) and +optional `'image_type'` (str). + +##### Returns + +-: dict | None: Normalized anomaly payload or `None` if an error occurs or +the result lacks anomaly data. + +##### Examples + +```python +process("path/to/image.jpg") +# or +process({"image": image_bytes, "image_type": "png"}) +``` diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/wave_generator/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/wave_generator/API.md new file mode 100644 index 00000000..f8977727 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/wave_generator/API.md @@ -0,0 +1,151 @@ +# wave_generator API Reference + +## Index + +- Class `WaveGenerator` + +--- + +## `WaveGenerator` class + +```python +class WaveGenerator(sample_rate: int, wave_type: WaveType, block_duration: float, attack: float, release: float, glide: float, speaker: Speaker) +``` + +Continuous wave generator brick for audio synthesis. + +This brick generates continuous audio waveforms (sine, square, sawtooth, triangle) +and streams them to a USB speaker in real-time. It provides smooth transitions +between frequency and amplitude changes using configurable envelope parameters. + +The generator runs continuously in a background thread, producing audio blocks +at a steady rate with minimal latency. + +### Parameters + +- **sample_rate** (*int*): Audio sample rate in Hz (default: 16000). +- **wave_type** (*WaveType*): Initial waveform type (default: "sine"). +- **block_duration** (*float*): Duration of each audio block in seconds (default: 0.01). +- **attack** (*float*): Attack time for amplitude envelope in seconds (default: 0.01). +- **release** (*float*): Release time for amplitude envelope in seconds (default: 0.03). +- **glide** (*float*): Frequency glide time (portamento) in seconds (default: 0.02). +- **speaker** (*Speaker*) (optional): Pre-configured Speaker instance. If None, WaveGenerator +will create an internal Speaker optimized for real-time synthesis with: +- periodsize aligned to block_duration (eliminates buffer mismatch) +- queue_maxsize=8 (low latency: ~80ms max buffer) +- format=FLOAT_LE, channels=1 + +If providing an external Speaker, ensure: +- sample_rate matches WaveGenerator's sample_rate +- periodsize = int(sample_rate × block_duration) for optimal alignment +- Speaker is started/stopped manually (WaveGenerator won't manage its lifecycle) + +Example external Speaker configuration: + speaker = Speaker( + device="plughw:CARD=UH34", + sample_rate=16000, + format="FLOAT_LE", + periodsize=160, # 16000 × 0.01 = 160 frames + queue_maxsize=8 + ) + +### Raises + +- **SpeakerException**: If no USB speaker is found or device is busy. + +### Attributes + +- **sample_rate** (*int*): Audio sample rate in Hz (default: 16000). +- **wave_type** (*WaveType*): Type of waveform to generate. +- **frequency** (*float*): Current output frequency in Hz. +- **amplitude** (*float*): Current output amplitude (0.0-1.0). + +### Methods + +#### `start()` + +Start the wave generator and audio output. + +This starts the speaker device (if internally owned) and launches the producer thread +that continuously generates and streams audio blocks. + +#### `stop()` + +Stop the wave generator and audio output. + +This stops the producer thread and closes the speaker device (if internally owned). + +#### `set_frequency(frequency: float)` + +Set the target output frequency. + +The frequency will smoothly transition to the new value over the +configured glide time. + +##### Parameters + +- **frequency** (*float*): Target frequency in Hz (typically 20-8000 Hz). + +#### `set_amplitude(amplitude: float)` + +Set the target output amplitude. + +The amplitude will smoothly transition to the new value over the +configured attack/release time. + +##### Parameters + +- **amplitude** (*float*): Target amplitude in range [0.0, 1.0]. + +#### `set_wave_type(wave_type: WaveType)` + +Change the waveform type. + +##### Parameters + +- **wave_type** (*WaveType*): One of "sine", "square", "sawtooth", "triangle". + +##### Raises + +- **ValueError**: If wave_type is not valid. + +#### `set_volume(volume: int)` + +Set the speaker volume level. + +This is a wrapper that controls the hardware volume of the USB speaker device. + +##### Parameters + +- **volume** (*int*): Hardware volume level (0-100). + +##### Raises + +- **SpeakerException**: If the mixer is not available or if volume cannot be set. + +#### `get_volume()` + +Get the current speaker volume level. + +##### Returns + +- (*int*): Current hardware volume level (0-100). + +#### `set_envelope_params(attack: float, release: float, glide: float)` + +Update envelope parameters. + +##### Parameters + +- **attack** (*float*) (optional): Attack time in seconds. +- **release** (*float*) (optional): Release time in seconds. +- **glide** (*float*) (optional): Frequency glide time in seconds. + +#### `get_state()` + +Get current generator state. + +##### Returns + +- (*dict*): Dictionary containing current frequency, amplitude, wave type, etc. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/weather_forecast/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/weather_forecast/API.md new file mode 100644 index 00000000..0e6716a9 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/weather_forecast/API.md @@ -0,0 +1,121 @@ +# weather_forecast API Reference + +## Index + +- Class `WeatherData` +- Class `WeatherForecast` +- Class `CityLookupError` +- Class `WeatherForecastLookupError` + +--- + +## `WeatherData` class + +```python +class WeatherData() +``` + +Weather forecast data with standardized codes and categories. + +### Attributes + +- **code** (*int*): WMO weather code representing specific weather conditions. +- **description** (*str*): Human-readable weather description (e.g., "Partly cloudy", "Heavy rain"). +- **category** (*str*): Simplified weather category: "sunny", "cloudy", "rainy", "snowy", or "foggy". + + +--- + +## `WeatherForecast` class + +```python +class WeatherForecast() +``` + +Weather forecast service using the open-meteo.com API. + +Provides weather forecasts by city name or geographic coordinates with no API key required. +Returns structured weather data with WMO codes, descriptions, and simplified categories. + +### Methods + +#### `get_forecast_by_city(city: str, timezone: str, forecast_days: int)` + +Get weather forecast for a specified city. + +##### Parameters + +- **city** (*str*): City name (e.g., "London", "New York"). +- **timezone** (*str*), default="GMT": Timezone identifier. Defaults to "GMT". +- **forecast_days** (*int*), default=1: Number of days to forecast. Defaults to 1. + +##### Returns + +- (*WeatherData*): Weather forecast with code, description, and category. + +##### Raises + +- **RuntimeError**: If city lookup or weather data retrieval fails. + +#### `get_forecast_by_coords(latitude: str, longitude: str, timezone: str, forecast_days: int)` + +Get weather forecast for specific coordinates. + +##### Parameters + +- **latitude** (*str*): Latitude coordinate (e.g., "45.0703"). +- **longitude** (*str*): Longitude coordinate (e.g., "7.6869"). +- **timezone** (*str*), default="GMT": Timezone identifier. Defaults to "GMT". +- **forecast_days** (*int*), default=1: Number of days to forecast. Defaults to 1. + +##### Returns + +- (*WeatherData*): Weather forecast with code, description, and category. + +##### Raises + +- **RuntimeError**: If weather data retrieval fails. + +#### `process(item)` + +Process dictionary input to get weather forecast. + +This method checks if the item is a dictionary with latitude and longitude or city name. +If it is a dictionary with latitude and longitude, it retrieves the weather forecast by coordinates. +If it is a dictionary with city name, it retrieves the weather forecast by city. + +##### Parameters + +- **item** (*dict*): Dictionary with either "city" key or "latitude"/"longitude" keys. + +##### Returns + +-: WeatherData | dict: WeatherData object if valid input provided, empty dict if input format is invalid. + +##### Raises + +- **CityLookupError**: If the city is not found. +- **WeatherForecastLookupError**: If the weather forecast cannot be retrieved. + + +--- + +## `CityLookupError` class + +```python +class CityLookupError() +``` + +Exception raised when the city lookup (geocoding) fails. + + +--- + +## `WeatherForecastLookupError` class + +```python +class WeatherForecastLookupError() +``` + +Exception raised when the weather forecast lookup fails. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/web_ui/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/web_ui/API.md new file mode 100644 index 00000000..ec08afdb --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_bricks/web_ui/API.md @@ -0,0 +1,123 @@ +# web_ui API Reference + +## Index + +- Class `WebUI` + +--- + +## `WebUI` class + +```python +class WebUI(addr: str, port: int, ui_path_prefix: str, api_path_prefix: str, assets_dir_path: str, certs_dir_path: str, use_tls: bool, use_ssl: bool | None) +``` + +Module for deploying a web server that can host a web application and expose APIs to its clients. + +It uses FastAPI, Uvicorn, and Socket.IO to serve static files (e.g., HTML/CSS/JS), handle REST API endpoints, +and support real-time communication between the client and the server. + +### Parameters + +- **addr** (*str*) (optional), default="0.0.0.0" (all interfaces): Server bind address. Defaults to "0.0.0.0" (all interfaces). +- **port** (*int*) (optional), default=7000: Server port number. Defaults to 7000. +- **ui_path_prefix** (*str*) (optional), default="" (root): URL prefix for UI routes. Defaults to "" (root). +- **api_path_prefix** (*str*) (optional), default="" (root): URL prefix for API routes. Defaults to "" (root). +- **assets_dir_path** (*str*) (optional), default="/app/assets": Path to static assets directory. Defaults to "/app/assets". +- **certs_dir_path** (*str*) (optional), default="/app/certs": Path to TLS certificates directory. Defaults to "/app/certs". +- **use_tls** (*bool*) (optional), default=False: Enable TLS/HTTPS. Defaults to False. +- **use_ssl** (*bool*) (optional), default=None: Deprecated. Use use_tls instead. Defaults to None. + +### Methods + +#### `local_url()` + +Get the locally addressable URL of the web server. + +##### Returns + +- (*str*): The server's URL (including protocol, address, and port). + +#### `url()` + +Get the externally addressable URL of the web server. + +##### Returns + +- (*str*): The server's URL (including protocol, address, and port). + +#### `start()` + +Start the web server asynchronously. + +This sets up static file routing and WebSocket event handlers, configures TLS if enabled, and launches the server using Uvicorn. + +##### Raises + +- **RuntimeError**: If 'index.html' is missing in the static assets directory. +- **RuntimeError**: If TLS is enabled but certificates fail to generate. +- **RuntimeWarning**: If the server is already running. + +#### `stop()` + +Stop the web server gracefully. + +Waits up to 5 seconds for current requests to finish before terminating. + +#### `expose_api(method: str, path: str, function: Callable)` + +Register a route with the specified HTTP method and path. + +The path will be prefixed with the api_path_prefix configured during initialization. + +##### Parameters + +- **method** (*str*): HTTP method to use (e.g., "GET", "POST"). +- **path** (*str*): URL path for the API endpoint (without the prefix). +- **function** (*Callable*): Function to execute when the route is accessed. + +#### `on_connect(callback: Callable[[str], None])` + +Register a callback for WebSocket connection events. + +The callback should accept a single argument: the session ID (sid) of the connected client. + +##### Parameters + +- **callback** (*Callable[[str], None]*): Function to call when a client connects. Receives the session ID (sid) as its only argument. + +#### `on_disconnect(callback: Callable[[str], None])` + +Register a callback for WebSocket disconnection events. + +The callback should accept a single argument: the session ID (sid) of the disconnected client. + +##### Parameters + +- **callback** (*Callable[[str], None]*): Function to call when a client disconnects. Receives the session ID (sid) as its only argument. + +#### `on_message(message_type: str, callback: Callable[[str, Any], Any])` + +Register a callback function for a specific WebSocket message type received by clients. + +The client should send messages named as message_type for this callback to be triggered. + +If a response is returned by the callback, it will be sent back to the client +with a message type suffix "_response". + +##### Parameters + +- **message_type** (*str*): The message type name to listen for. +- **callback** (*Callable[[str, Any], Any]*): Function to handle the message. Receives two arguments: +the session ID (sid) and the incoming message data. + +#### `send_message(message_type: str, message: dict | str, room: str | None)` + +Send a message to connected WebSocket clients. + +##### Parameters + +- **message_type** (*str*): The name of the message event to emit. +- **message** (*dict | str*): The message payload to send (dict or str). +- **room** (*str*): The target Socket.IO room (defaults to all clients). + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_peripherals/microphone/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_peripherals/microphone/API.md new file mode 100644 index 00000000..fd9e8d96 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_peripherals/microphone/API.md @@ -0,0 +1,116 @@ +# microphone API Reference + +## Index + +- Class `MicrophoneException` +- Class `MicrophoneDisconnectedException` +- Class `Microphone` + +--- + +## `MicrophoneException` class + +```python +class MicrophoneException() +``` + +Custom exception for Microphone errors. + + +--- + +## `MicrophoneDisconnectedException` class + +```python +class MicrophoneDisconnectedException() +``` + +Raised when the microphone device is disconnected and max retries are exceeded. + + +--- + +## `Microphone` class + +```python +class Microphone(device: str, sample_rate: int, channels: int, format: str, periodsize: int, max_reconnect_attempts: int, reconnect_delay: float) +``` + +Microphone class for capturing audio using ALSA PCM interface. + +Handles automatic reconnection on device disconnection. + +### Parameters + +- **device** (*str*): ALSA device name or USB_MIC_1/2 macro. +- **sample_rate** (*int*): Sample rate in Hz (default: 16000). +- **channels** (*int*): Number of audio channels (default: 1). +- **format** (*str*): Audio format (default: "S16_LE"). +- **periodsize** (*int*): Period size in frames (default: 1024). +- **max_reconnect_attempts** (*int*): Maximum attempts to reconnect on disconnection (default: 30). +- **reconnect_delay** (*float*): Delay in seconds between reconnection attempts (default: 2.0). + +### Raises + +- **MicrophoneException**: If the microphone cannot be initialized or if the device is busy. + +### Methods + +#### `get_volume()` + +Get the current volume level of the microphone. + +##### Returns + +- (*int*): Volume level (0-100). If no mixer is available, returns -1. + +##### Raises + +- **MicrophoneException**: If the mixer is not available or if volume cannot be retrieved. + +#### `set_volume(volume: int)` + +Set the volume level of the microphone. + +##### Parameters + +- **volume** (*int*): Volume level (0-100). + +##### Raises + +- **MicrophoneException**: If the mixer is not available or if volume cannot be set. + +#### `start()` + +Start the microphone stream by opening the PCM device. + +#### `connect()` + +Try to connect the microphone device. + +#### `stream()` + +Yield audio chunks from the microphone. Each chunk has periodsize samples. + +- Handles automatic reconnection if the device is unplugged and replugged. +- Only one main loop, no nested loops. +- Thread safe and clean state management. +- When max reconnect attempts are reached, the generator returns (StopIteration for the caller). +- All PCM operations are protected by lock. + +##### Returns + +- (*np.ndarray*): Audio data as a numpy array of the correct dtype, depending on the format specified. + +#### `list_usb_devices()` + +Return a list of available USB microphone ALSA device names (plughw only). + +##### Returns + +- (*list*): List of USB microphone device names. + +#### `stop()` + +Close the PCM device if open. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_peripherals/speaker/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_peripherals/speaker/API.md new file mode 100644 index 00000000..94c64118 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_peripherals/speaker/API.md @@ -0,0 +1,100 @@ +# speaker API Reference + +## Index + +- Class `SpeakerException` +- Class `Speaker` + +--- + +## `SpeakerException` class + +```python +class SpeakerException() +``` + +Custom exception for Speaker errors. + + +--- + +## `Speaker` class + +```python +class Speaker(device: str, sample_rate: int, channels: int, format: str, periodsize: int, queue_maxsize: int) +``` + +Speaker class for reproducing audio using ALSA PCM interface. + +### Parameters + +- **device** (*str*): ALSA device name or USB_SPEAKER_1/2 macro. +- **sample_rate** (*int*): Sample rate in Hz (default: 16000). +- **channels** (*int*): Number of audio channels (default: 1). +- **format** (*str*): Audio format (default: "S16_LE"). +- **periodsize** (*int*): ALSA period size in frames (default: None = use hardware default). +For real-time synthesis, set to match generation block size. +For streaming/file playback, leave as None for hardware-optimal value. +- **queue_maxsize** (*int*): Maximum application queue depth in blocks (default: 100). +Lower values (5-20) reduce latency for interactive audio. +Higher values (50-200) provide stability for streaming. + +### Raises + +- **SpeakerException**: If the speaker cannot be initialized or if the device is busy. + +### Methods + +#### `list_usb_devices()` + +Return a list of available USB speaker ALSA device names (plughw only). + +##### Returns + +- (*list*): List of USB speaker device names. + +#### `get_volume()` + +Get the current volume level of the speaker. + +##### Returns + +- (*int*): Volume level (0-100). If no mixer is available, returns -1. + +##### Raises + +- **SpeakerException**: If the mixer is not available or if volume cannot be retrieved. + +#### `set_volume(volume: int)` + +Set the volume level of the speaker. + +##### Parameters + +- **volume** (*int*): Volume level (0-100). + +##### Raises + +- **SpeakerException**: If the mixer is not available or if volume cannot be set. + +#### `start()` + +Start the spaker stream by opening the PCM device. + +#### `stop()` + +Close the PCM device if open. + +#### `play(data: bytes | np.ndarray, block_on_queue: bool)` + +Play audio data through the speaker. + +##### Parameters + +- **data** (*bytes|np.ndarray*): Audio data to play as bytes or np.ndarray. +- **block_on_queue** (*bool*): If True, block until the queue has space for the data. + +##### Raises + +- **SpeakerException**: If the speaker is not started or if playback fails. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_peripherals/usb_camera/API.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_peripherals/usb_camera/API.md new file mode 100644 index 00000000..62b3537f --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/api-docs/arduino/app_peripherals/usb_camera/API.md @@ -0,0 +1,80 @@ +# usb_camera API Reference + +## Index + +- Class `CameraReadError` +- Class `CameraOpenError` +- Class `USBCamera` + +--- + +## `CameraReadError` class + +```python +class CameraReadError() +``` + +Exception raised when the specified camera cannot be found. + + +--- + +## `CameraOpenError` class + +```python +class CameraOpenError() +``` + +Exception raised when the camera cannot be opened. + + +--- + +## `USBCamera` class + +```python +class USBCamera(camera: int, resolution: tuple[int, int], fps: int, compression: bool, letterbox: bool) +``` + +Represents an input peripheral for capturing images from a USB camera device. + +This class uses OpenCV to interface with the camera and capture images. + +### Parameters + +- **camera** (*int*): Camera index (default is 0 - index is related to the first camera available from /dev/v4l/by-id devices). +- **resolution** (*tuple[int, int]*): Resolution as (width, height). If None, uses default resolution. +- **fps** (*int*): Frames per second for the camera. If None, uses default FPS. +- **compression** (*bool*): Whether to compress the captured images. If True, images are compressed to PNG format. +- **letterbox** (*bool*): Whether to apply letterboxing to the captured images. + +### Methods + +#### `capture()` + +Captures a frame from the camera, blocking to respect the configured FPS. + +##### Returns + +-: PIL.Image.Image | None: The captured frame as a PIL Image, or None if no frame is available. + +#### `capture_bytes()` + +Captures a frame from the camera and returns its raw bytes, blocking to respect the configured FPS. + +##### Returns + +-: bytes | None: The captured frame as a bytes array, or None if no frame is available. + +#### `start()` + +Starts the camera capture. + +#### `stop()` + +Stops the camera and releases its resources. + +#### `produce()` + +Alias for capture method. + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/bricks-list.yaml b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/bricks-list.yaml new file mode 100644 index 00000000..eaf2186d --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/bricks-list.yaml @@ -0,0 +1,334 @@ +bricks: +- id: arduino:dbstorage_sqlstore + name: Database - SQL + description: Simplified database storage layer for Arduino sensor data using SQLite + local database. + require_container: false + require_model: false + mount_devices_into_container: false + ports: [] + category: storage +- id: arduino:object_detection + name: Object Detection + description: "Brick for object detection using a pre-trained model. It processes\ + \ images and returns the predicted class label, bounding-boxes and confidence\ + \ score.\nBrick is designed to work with pre-trained models provided by framework\ + \ or with custom object detection models trained on Edge Impulse platform. \n" + require_container: true + require_model: true + mount_devices_into_container: false + ports: [] + category: video + model_name: yolox-object-detection + variables: + - name: CUSTOM_MODEL_PATH + default_value: /home/arduino/.arduino-bricks/ei-models + description: path to the custom model directory + - name: EI_OBJ_DETECTION_MODEL + default_value: /models/ootb/ei/yolo-x-nano.eim + description: path to the model file +- id: arduino:mood_detector + name: Mood Detection + description: 'This brick analyzes text sentiment to detect the mood expressed. + + It classifies text as positive, negative, or neutral. + + ' + require_container: false + require_model: false + mount_devices_into_container: false + ports: [] + category: text +- id: arduino:camera_code_detection + name: Camera Code Detection + description: Scans a camera for barcodes and QR codes + require_container: false + require_model: false + mount_devices_into_container: false + ports: [] + category: video + required_devices: + - camera +- id: arduino:audio_classification + name: Audio Classification + description: 'Brick for audio classification using a pre-trained model. It processes + audio input to classify different sounds. + + Brick is designed to work with pre-trained models provided by framework or with + custom audio classification models trained on Edge Impulse platform. + + ' + require_container: true + require_model: true + mount_devices_into_container: false + ports: [] + category: audio + model_name: glass-breaking + variables: + - name: CUSTOM_MODEL_PATH + default_value: /home/arduino/.arduino-bricks/ei-models + description: path to the custom model directory + - name: EI_AUDIO_CLASSIFICATION_MODEL + default_value: /models/ootb/ei/glass-breaking.eim + description: path to the model file +- id: arduino:arduino_cloud + name: Arduino Cloud + description: Connects to Arduino Cloud + require_container: false + require_model: false + mount_devices_into_container: false + ports: [] + category: null + variables: + - name: ARDUINO_DEVICE_ID + description: Arduino Cloud Device ID + - name: ARDUINO_SECRET + description: Arduino Cloud Secret +- id: arduino:wave_generator + name: Wave Generator + description: Continuous wave generator for audio synthesis. Generates sine, square, + sawtooth, and triangle waveforms with smooth frequency and amplitude transitions. + require_container: false + require_model: false + mount_devices_into_container: false + ports: [] + category: audio +- id: arduino:image_classification + name: Image Classification + description: "Brick for image classification using a pre-trained model. It processes\ + \ images and returns the predicted class label and confidence score.\nBrick is\ + \ designed to work with pre-trained models provided by framework or with custom\ + \ image classification models trained on Edge Impulse platform. \n" + require_container: true + require_model: true + mount_devices_into_container: false + ports: [] + category: video + model_name: mobilenet-image-classification + variables: + - name: CUSTOM_MODEL_PATH + default_value: /home/arduino/.arduino-bricks/ei-models + description: path to the custom model directory + - name: EI_CLASSIFICATION_MODEL + default_value: /models/ootb/ei/mobilenet-v2-224px.eim + description: path to the model file +- id: arduino:streamlit_ui + name: WebUI - Streamlit + description: A simplified user interface based on Streamlit and Python. + require_container: false + require_model: false + mount_devices_into_container: false + ports: + - 7000 + category: ui + requires_display: webview +- id: arduino:vibration_anomaly_detection + name: Vibration Anomaly detection + description: 'This Brick is designed for vibration anomaly detection and recognition, + leveraging pre-trained models. + + It takes input from sensors (accelerometer) to identify possible anomalies based + on vibration patterns. + + You can use it with pre-trained models provided by the framework or with your + own custom anomaly detections models trained on the Edge Impulse platform. + + ' + require_container: true + require_model: true + mount_devices_into_container: false + ports: [] + category: null + model_name: fan-anomaly-detection + variables: + - name: CUSTOM_MODEL_PATH + default_value: /home/arduino/.arduino-bricks/ei-models + description: path to the custom model directory + - name: CUSTOM_MODEL_PATH + default_value: /models/custom/ei/ + description: path to the custom model directory + - name: EI_VIBRATION_ANOMALY_DETECTION_MODEL + default_value: /models/ootb/ei/fan-anomaly-detection.eim +- id: arduino:web_ui + name: WebUI - HTML + description: A user interface based on HTML and JavaScript that can rely on additional + APIs and a WebSocket exposed by a web server. + require_container: false + require_model: false + mount_devices_into_container: false + ports: + - 7000 + category: ui + requires_display: webview +- id: arduino:keyword_spotting + name: Keyword Spotting + description: 'Brick for keyword spotting using a pre-trained model. It processes + audio input to detect specific keywords or phrases. + + Brick is designed to work with pre-trained models provided by framework or with + custom audio classification models trained on Edge Impulse platform. + + ' + require_container: true + require_model: true + mount_devices_into_container: false + ports: [] + category: audio + model_name: keyword-spotting-hey-arduino + required_devices: + - microphone + variables: + - name: CUSTOM_MODEL_PATH + default_value: /home/arduino/.arduino-bricks/ei-models + description: path to the custom model directory + - name: EI_KEYWORD_SPOTTING_MODEL + default_value: /models/ootb/ei/keyword-spotting-hey-arduino.eim + description: path to the model file +- id: arduino:video_image_classification + name: Video Image Classification + description: 'This image classification brick utilizes a pre-trained model to analyze + video streams from a camera. + + It identifies objects, returning their predicted class labels and confidence scores. + + The output is a video stream featuring classification as overaly, with the added + capability to trigger actions based on these detections. + + It supports pre-trained models provided by the framework and custom object detection + models trained on the Edge Impulse platform. + + ' + require_container: true + require_model: true + mount_devices_into_container: true + ports: [] + category: video + model_name: mobilenet-image-classification + required_devices: + - camera + variables: + - name: CUSTOM_MODEL_PATH + default_value: /home/arduino/.arduino-bricks/ei-models/ + description: path to the custom model directory + - name: EI_CLASSIFICATION_MODEL + default_value: /models/ootb/ei/mobilenet-v2-224px.eim + description: path to the model file + - name: VIDEO_DEVICE + default_value: /dev/video1 +- id: arduino:weather_forecast + name: Weather Forecast + description: Online weather forecast module for Arduino using open-meteo.com geolocation + and weather APIs. Requires an internet connection. + require_container: false + require_model: false + mount_devices_into_container: false + ports: [] + category: miscellaneous +- id: arduino:motion_detection + name: Motion detection + description: 'This Brick is designed for motion detection and recognition, leveraging + pre-trained models. + + It takes input from accelerometer sensors to identify various motion patterns. + + You can use it with pre-trained models provided by the framework or with your + custom motion classification models trained on the Edge Impulse platform. + + ' + require_container: true + require_model: true + mount_devices_into_container: false + ports: [] + category: null + model_name: updown-wave-motion-detection + variables: + - name: CUSTOM_MODEL_PATH + default_value: /home/arduino/.arduino-bricks/ei-models + description: path to the custom model directory + - name: EI_MOTION_DETECTION_MODEL + default_value: /models/ootb/ei/updown-wave-motion-detection.eim + description: path to the model file +- id: arduino:dbstorage_tsstore + name: Database - Time Series + description: Simplified time series database storage layer for Arduino sensor samples + built on top of InfluxDB. + require_container: true + require_model: false + mount_devices_into_container: false + ports: [] + category: storage + variables: + - name: APP_HOME + default_value: . + - name: DB_PASSWORD + default_value: Arduino15 + description: Database password + - name: DB_USERNAME + default_value: admin + description: Edge Impulse project API key + - name: INFLUXDB_ADMIN_TOKEN + default_value: 392edbf2-b8a2-481f-979d-3f188b2c05f0 + description: InfluxDB admin token +- id: arduino:visual_anomaly_detection + name: Visual Anomaly Detection + description: "Brick for visual anomaly detection using a pre-trained model. It processes\ + \ images to identify unusual patterns and returns detected anomalies with bounding\ + \ boxes. \nSupports pre-trained models provided by the framework or custom anomaly\ + \ detection models trained on the Edge Impulse platform. \n" + require_container: true + require_model: true + mount_devices_into_container: false + ports: [] + category: image + model_name: concrete-crack-anomaly-detection + variables: + - name: CUSTOM_MODEL_PATH + default_value: /home/arduino/.arduino-bricks/ei-models + description: path to the custom model directory + - name: EI_V_ANOMALY_DETECTION_MODEL + default_value: /models/ootb/ei/concrete-crack-anomaly-detection.eim + description: path to the model file +- id: arduino:video_object_detection + name: Video Object Detection + description: 'This object detection brick utilizes a pre-trained model to analyze + video streams from a camera. + + It identifies objects, returning their predicted class labels, bounding boxes, + and confidence scores. + + The output is a video stream featuring bounding boxes around detected objects, + with the added capability to trigger actions based on these detections. + + It supports pre-trained models provided by the framework and custom object detection + models trained on the Edge Impulse platform. + + ' + require_container: true + require_model: true + mount_devices_into_container: true + ports: [] + category: null + model_name: yolox-object-detection + required_devices: + - camera + variables: + - name: CUSTOM_MODEL_PATH + default_value: /home/arduino/.arduino-bricks/ei-models/ + description: path to the custom model directory + - name: EI_OBJ_DETECTION_MODEL + default_value: /models/ootb/ei/yolo-x-nano.eim + description: path to the model file + - name: VIDEO_DEVICE + default_value: /dev/video1 +- id: arduino:cloud_llm + name: Cloud LLM + description: Cloud LLM Brick enables seamless integration with cloud-based Large + Language Models (LLMs) for advanced AI capabilities in your Arduino projects. + require_container: false + require_model: false + mount_devices_into_container: false + ports: [] + category: null + variables: + - name: API_KEY + description: API Key for the cloud-based LLM service diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/audio_classification/brick_compose.yaml b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/audio_classification/brick_compose.yaml new file mode 100644 index 00000000..0a710c5f --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/audio_classification/brick_compose.yaml @@ -0,0 +1,24 @@ +# EI_AUDIO_CLASSIFICATION_MODEL = path to the model file +# CUSTOM_MODEL_PATH = path to the custom model directory +services: + ei-audio-classifier-runner: + image: ${DOCKER_REGISTRY_BASE:-ghcr.io/arduino/}app-bricks/ei-models-runner:0.6.0 + logging: + driver: "json-file" + options: + max-size: "5m" + max-file: "2" + devices: + - "/dev/dri:/dev/dri" + group_add: + - "render" + ports: + - ${BIND_ADDRESS:-127.0.0.1}:1339:1337 + volumes: + - "${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models}:${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models}" + command: ["--model-file", "${EI_AUDIO_CLASSIFICATION_MODEL:-/models/ootb/ei/glass-breaking.eim}", "--run-http-server", "1337", "--dont-print-predictions"] + healthcheck: + test: [ "CMD-SHELL", "wget -q --spider http://localhost:1337/api/info || exit 1" ] + interval: 2s + timeout: 2s + retries: 25 diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/dbstorage_tsstore/brick_compose.yaml b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/dbstorage_tsstore/brick_compose.yaml new file mode 100644 index 00000000..b43f55c6 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/dbstorage_tsstore/brick_compose.yaml @@ -0,0 +1,29 @@ +# BIND_ADDRESS = external container address +# BIND_PORT = container port +# DB_USERNAME = Edge Impulse project API key +# DB_PASSWORD = Database password +# INFLUXDB_ADMIN_TOKEN = InfluxDB admin token +services: + dbstorage-influx: + image: influxdb:2.7 + logging: + driver: "json-file" + options: + max-size: "5m" + max-file: "2" + ports: + - "${BIND_ADDRESS:-127.0.0.1}:${BIND_PORT:-8086}:8086" + volumes: + - "${APP_HOME:-.}/data/influx-data:/var/lib/influxdb2" + environment: + DOCKER_INFLUXDB_INIT_MODE: setup + DOCKER_INFLUXDB_INIT_USERNAME: "${DB_USERNAME:-admin}" + DOCKER_INFLUXDB_INIT_PASSWORD: "${DB_PASSWORD:-Arduino15}" + DOCKER_INFLUXDB_INIT_ORG: arduino + DOCKER_INFLUXDB_INIT_BUCKET: arduinostorage + DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: "${INFLUXDB_ADMIN_TOKEN:-392edbf2-b8a2-481f-979d-3f188b2c05f0}" + healthcheck: + test: [ "CMD-SHELL", "curl -f http://localhost:8086/health || exit 1" ] + interval: 2s + timeout: 3s + retries: 10 diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/image_classification/brick_compose.yaml b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/image_classification/brick_compose.yaml new file mode 100644 index 00000000..fe20b37e --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/image_classification/brick_compose.yaml @@ -0,0 +1,24 @@ +# EI_CLASSIFICATION_MODEL = path to the model file +# CUSTOM_MODEL_PATH = path to the custom model directory +services: + ei-classification-runner: + image: ${DOCKER_REGISTRY_BASE:-ghcr.io/arduino/}app-bricks/ei-models-runner:0.6.0 + logging: + driver: "json-file" + options: + max-size: "5m" + max-file: "2" + devices: + - "/dev/dri:/dev/dri" + group_add: + - "render" + ports: + - ${BIND_ADDRESS:-127.0.0.1}:1338:1337 + volumes: + - "${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models}:${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models}" + command: ["--model-file", "${EI_CLASSIFICATION_MODEL:-/models/ootb/ei/mobilenet-v2-224px.eim}", "--run-http-server", "1337", "--dont-print-predictions"] + healthcheck: + test: [ "CMD-SHELL", "wget -q --spider http://localhost:1337/api/info || exit 1" ] + interval: 2s + timeout: 2s + retries: 25 diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/keyword_spotting/brick_compose.yaml b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/keyword_spotting/brick_compose.yaml new file mode 100644 index 00000000..4340871e --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/keyword_spotting/brick_compose.yaml @@ -0,0 +1,24 @@ +# EI_KEYWORD_SPOTTING_MODEL = path to the model file +# CUSTOM_MODEL_PATH = path to the custom model directory +services: + ei-keyword-spot-runner: + image: ${DOCKER_REGISTRY_BASE:-ghcr.io/arduino/}app-bricks/ei-models-runner:0.6.0 + logging: + driver: "json-file" + options: + max-size: "5m" + max-file: "2" + devices: + - "/dev/dri:/dev/dri" + group_add: + - "render" + ports: + - ${BIND_ADDRESS:-127.0.0.1}:1340:1337 + volumes: + - "${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models}:${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models}" + command: ["--model-file", "${EI_KEYWORD_SPOTTING_MODEL:-/models/ootb/ei/keyword-spotting-hey-arduino.eim}", "--run-http-server", "1337", "--dont-print-predictions"] + healthcheck: + test: [ "CMD-SHELL", "wget -q --spider http://localhost:1337/api/info || exit 1" ] + interval: 2s + timeout: 2s + retries: 25 diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/motion_detection/brick_compose.yaml b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/motion_detection/brick_compose.yaml new file mode 100644 index 00000000..abc10e77 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/motion_detection/brick_compose.yaml @@ -0,0 +1,24 @@ +# EI_MOTION_DETECTION_MODEL = path to the model file +# CUSTOM_MODEL_PATH = path to the custom model directory +services: + ei-motion-detection-runner: + image: ${DOCKER_REGISTRY_BASE:-ghcr.io/arduino/}app-bricks/ei-models-runner:0.6.0 + logging: + driver: "json-file" + options: + max-size: "5m" + max-file: "2" + devices: + - "/dev/dri:/dev/dri" + group_add: + - "render" + ports: + - ${BIND_ADDRESS:-127.0.0.1}:1341:1337 + volumes: + - "${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models}:${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models}" + command: ["--model-file", "${EI_MOTION_DETECTION_MODEL:-/models/ootb/ei/updown-wave-motion-detection.eim}", "--run-http-server", "1337", "--dont-print-predictions"] + healthcheck: + test: [ "CMD-SHELL", "wget -q --spider http://localhost:1337/api/info || exit 1" ] + interval: 2s + timeout: 2s + retries: 25 diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/object_detection/brick_compose.yaml b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/object_detection/brick_compose.yaml new file mode 100644 index 00000000..99871411 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/object_detection/brick_compose.yaml @@ -0,0 +1,24 @@ +# EI_OBJ_DETECTION_MODEL = path to the model file +# CUSTOM_MODEL_PATH = path to the custom model directory +services: + ei-obj-detection-runner: + image: ${DOCKER_REGISTRY_BASE:-ghcr.io/arduino/}app-bricks/ei-models-runner:0.6.0 + logging: + driver: "json-file" + options: + max-size: "5m" + max-file: "2" + devices: + - "/dev/dri:/dev/dri" + group_add: + - "render" + ports: + - ${BIND_ADDRESS:-127.0.0.1}:${BIND_PORT:-1337}:1337 + volumes: + - "${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models}:${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models}" + command: ["--model-file", "${EI_OBJ_DETECTION_MODEL:-/models/ootb/ei/yolo-x-nano.eim}", "--run-http-server", "1337", "--dont-print-predictions"] + healthcheck: + test: [ "CMD-SHELL", "wget -q --spider http://localhost:1337/api/info || exit 1" ] + interval: 2s + timeout: 2s + retries: 25 diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/vibration_anomaly_detection/brick_compose.yaml b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/vibration_anomaly_detection/brick_compose.yaml new file mode 100644 index 00000000..e07c2890 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/vibration_anomaly_detection/brick_compose.yaml @@ -0,0 +1,24 @@ +# EI_MOTION_DETECTION_MODEL = path to the model file +# CUSTOM_MODEL_PATH = path to the custom model directory +services: + ei-anomaly-detection-runner: + image: ${DOCKER_REGISTRY_BASE:-ghcr.io/arduino/}app-bricks/ei-models-runner:0.6.0 + logging: + driver: "json-file" + options: + max-size: "5m" + max-file: "2" + devices: + - "/dev/dri:/dev/dri" + group_add: + - "render" + ports: + - ${BIND_ADDRESS:-127.0.0.1}:1342:1337 + volumes: + - "${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models}:${CUSTOM_MODEL_PATH:-/models/custom/ei/}" + command: ["--model-file", "${EI_VIBRATION_ANOMALY_DETECTION_MODEL:-/models/ootb/ei/fan-anomaly-detection.eim}", "--run-http-server", "1337", "--dont-print-predictions"] + healthcheck: + test: [ "CMD-SHELL", "wget -q --spider http://localhost:1337/api/info || exit 1" ] + interval: 2s + timeout: 2s + retries: 25 diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/video_image_classification/brick_compose.yaml b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/video_image_classification/brick_compose.yaml new file mode 100644 index 00000000..3dd8139a --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/video_image_classification/brick_compose.yaml @@ -0,0 +1,21 @@ +# EI_CLASSIFICATION_MODEL = path to the model file +# CUSTOM_MODEL_PATH = path to the custom model directory +services: + ei-video-classification-runner: + image: ${DOCKER_REGISTRY_BASE:-ghcr.io/arduino/}app-bricks/ei-models-runner:0.6.0 + logging: + driver: "json-file" + options: + max-size: "5m" + max-file: "2" + ports: + - ${BIND_ADDRESS:-0.0.0.0}:4912:4912 + volumes: + - "${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models/}:${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models/}" + - "/run/udev:/run/udev" + command: ["--model-file", "${EI_CLASSIFICATION_MODEL:-/models/ootb/ei/mobilenet-v2-224px.eim}", "--dont-print-predictions", "--mode", "streaming", "--preview-original-resolution", "--camera", "${VIDEO_DEVICE:-/dev/video1}"] + healthcheck: + test: [ "CMD-SHELL", "wget -q --spider http://ei-video-classification-runner:4912 || exit 1" ] + interval: 2s + timeout: 2s + retries: 25 diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/video_object_detection/brick_compose.yaml b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/video_object_detection/brick_compose.yaml new file mode 100644 index 00000000..804bc638 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/video_object_detection/brick_compose.yaml @@ -0,0 +1,21 @@ +# EI_OBJ_DETECTION_MODEL = path to the model file +# CUSTOM_MODEL_PATH = path to the custom model directory +services: + ei-video-obj-detection-runner: + image: ${DOCKER_REGISTRY_BASE:-ghcr.io/arduino/}app-bricks/ei-models-runner:0.6.0 + logging: + driver: "json-file" + options: + max-size: "5m" + max-file: "2" + ports: + - ${BIND_ADDRESS:-0.0.0.0}:4912:4912 + volumes: + - "${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models/}:${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models/}" + - "/run/udev:/run/udev" + command: ["--model-file", "${EI_OBJ_DETECTION_MODEL:-/models/ootb/ei/yolo-x-nano.eim}", "--dont-print-predictions", "--mode", "streaming", "--preview-original-resolution", "--camera", "${VIDEO_DEVICE:-/dev/video1}"] + healthcheck: + test: [ "CMD-SHELL", "wget -q --spider http://ei-video-obj-detection-runner:4912 || exit 1" ] + interval: 2s + timeout: 2s + retries: 25 diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/visual_anomaly_detection/brick_compose.yaml b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/visual_anomaly_detection/brick_compose.yaml new file mode 100644 index 00000000..ced99fcb --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/compose/arduino/visual_anomaly_detection/brick_compose.yaml @@ -0,0 +1,24 @@ +# EI_V_ANOMALY_DETECTION_MODEL = path to the model file +# CUSTOM_MODEL_PATH = path to the custom model directory +services: + ei-obj-video-anomalies-det-runner: + image: ${DOCKER_REGISTRY_BASE:-ghcr.io/arduino/}app-bricks/ei-models-runner:0.6.0 + logging: + driver: "json-file" + options: + max-size: "5m" + max-file: "2" + devices: + - "/dev/dri:/dev/dri" + group_add: + - "render" + ports: + - ${BIND_ADDRESS:-127.0.0.1}:1343:1337 + volumes: + - "${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models}:${CUSTOM_MODEL_PATH:-/home/arduino/.arduino-bricks/ei-models}" + command: ["--model-file", "${EI_V_ANOMALY_DETECTION_MODEL:-/models/ootb/ei/concrete-crack-anomaly-detection.eim}", "--run-http-server", "1337", "--dont-print-predictions"] + healthcheck: + test: [ "CMD-SHELL", "wget -q --spider http://localhost:1337/api/info || exit 1" ] + interval: 2s + timeout: 2s + retries: 25 diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/arduino_cloud/README.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/arduino_cloud/README.md new file mode 100644 index 00000000..00e3ab98 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/arduino_cloud/README.md @@ -0,0 +1,44 @@ +# Arduino Cloud Brick + +This Brick provides integration with the Arduino Cloud platform, enabling IoT devices to communicate and synchronize data seamlessly. + +## Overview + +The Arduino Cloud Brick simplifies the process of connecting your Arduino device to the Arduino Cloud. It abstracts the complexities of device management, authentication, and data synchronization, allowing developers to focus on building applications and features. With this module, you can easily register devices, exchange data, and leverage cloud-based automation for your projects. + +## Features + +- Connects Arduino devices to the Arduino Cloud +- Supports device registration and authentication +- Enables data exchange between devices and the cloud +- Provides APIs for sending and receiving data + +## Prerequisites + +To use this Brick, we need to have an active Arduino Cloud account, and a **device** and **thing** setup. To obtain the credentials, please follow the instructions at this [link](https://docs.arduino.cc/arduino-cloud/features/manual-device/). This is also covered in the [Blinking LED with Arduino Cloud](/examples/cloud-blink). + +During the device configuration, we will obtain a `device_id` and `secret_key`, which is needed to use this Brick. Note that a Thing with the device associated is required, and that you will need to create variables / dashboard to send and receive data from the board. + +### Adding Credentials + +The `device_id` and `secret_key` can be added inside the Arduino Cloud brick, by clicking on the **Brick Configuration** button inside the Brick. + +Clicking the button will provide two fields where the `device_id` and `secret_key` can be added to the Brick. + +## Code Example and Usage + +```python +from arduino.app_bricks.arduino_cloud import ArduinoCloud +from arduino.app_utils import App, Bridge + +iot_cloud = ArduinoCloud() + +def led_callback(client: object, value: bool): + """Callback function to handle LED blink updates from cloud.""" + print(f"LED blink value updated from cloud: {value}") + Bridge.call("set_led_state", value) + +iot_cloud.register("led", value=False, on_write=led_callback) + +App.run() +``` \ No newline at end of file diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/audio_classification/README.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/audio_classification/README.md new file mode 100644 index 00000000..c79534b1 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/audio_classification/README.md @@ -0,0 +1,54 @@ +# Audio Classification Brick + +This Brick lets you perform audio classification using a pre-trained neural network model. + +## Overview + +The Audio Classification Brick allows you to: + +- Analyze live audio from a microphone and detect specific sounds. +- Classify audio from existing .wav files. +- Register custom callbacks that trigger when a given class is detected. +- Easily integrate sound recognition into your project using simple Python APIs. + +## Features + +- Real-time audio classification from microphone input. +- Classifies sounds from .wav files of different bit depths (8, 16, 24, 32-bit). +- Configurable confidence threshold for detections. +- Callback support for specific class detections. +- Simple start/stop control for audio processing. + +## Prerequisites + +- USB-C hub (with USB A or 3.5 mm audio port) +- Analog 3.5 mm or USB microphone for real-time classification +- WAV audio files with supported bit depths: 8, 16, 24 or 32-bit + +## Code example and usage + +```python +from arduino.app_bricks.audio_classifier import AudioClassifier +from arduino.app_utils import App + +classifier = AudioClassifier() +classifier.on_detect("Glass_Breaking", lambda: print(f"Glass breaking sound detected!")) + +App.run() +``` + +or using an existing audio file: + +```python +from arduino.app_bricks.audio_classifier import AudioClassifier + +classifier = AudioClassifier() +classification = classifier.classify_from_file("glass_breaking.wav") +print("Result:", classification) +``` + +## Audio Classification Working Principle + +Audio classification models take raw audio signals and extract numerical features representing the waveform. These features are processed by the model, which assigns one or more class labels to the input, representing the most likely sounds present. + +When running in real time, the classifier continuously processes incoming audio data from the microphone, returning detected classes above a configurable confidence threshold. For offline usage, audio can be read from .wav files, decoded into features, and passed through the same classification pipeline. \ No newline at end of file diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/camera_code_detection/README.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/camera_code_detection/README.md new file mode 100644 index 00000000..786da81d --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/camera_code_detection/README.md @@ -0,0 +1,45 @@ +# Camera Code Detection Brick + +This Brick enables real-time barcode and QR code scanning from a camera video stream. + +## Overview + +The Camera Code Detection Brick allows you to: + +- Capture frames from a USB camera. +- Configure camera settings (resolution and frame rate). +- Define the type of code to detect: barcodes and/or QR codes. +- Process detections with customizable callbacks. + +## Features + +- Supported Code Formats: + - **Linear**: EAN-13, EAN-8, UPC-A + - **2D**: QR Code +- Single-code detection mode for focused scanning +- Multi-code detection for simultaneous barcode and QR code scanning +- Provides detection coordinates for precise code location + +## Prerequisites + +To use this Brick you should have a USB camera connected to your board. + +**Tip**: Use a USB-C® Hub with USB-A connectors to support commercial web cameras. + +## Code example and usage + +```python +from arduino.app_bricks.camera_code_detection import CameraCodeDetection + +def render_frame(frame): + ... + +def handle_detected_code(frame, detection): + ... + +# Select the camera you want to use, its resolution and the max fps +detection = CameraCodeDetection(camera=0, resolution=(640, 360), fps=10) +detection.on_frame(render_frame) +detection.on_detection(handle_detected_code) +detection.start() +``` diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/cloud_llm/README.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/cloud_llm/README.md new file mode 100644 index 00000000..add84514 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/cloud_llm/README.md @@ -0,0 +1,109 @@ +# Cloud LLM Brick + +The Cloud LLM Brick provides a seamless interface to interact with cloud-based Large Language Models (LLMs) such as OpenAI's GPT, Anthropic's Claude, and Google's Gemini. It abstracts the complexity of REST APIs, enabling you to send prompts, receive responses, and maintain conversational context within your Arduino projects. + +## Overview + +This Brick acts as a gateway to powerful AI models hosted in the cloud. It is designed to handle the nuances of network communication, authentication, and session management. Whether you need a simple one-off answer or a continuous conversation with memory, the Cloud LLM Brick provides a unified API for different providers. + +## Features + +- **Multi-Provider Support**: Compatible with major LLM providers including Anthropic (Claude), OpenAI (GPT), and Google (Gemini). +- **Conversational Memory**: Built-in support for windowed history, allowing the AI to remember context from previous exchanges. +- **Streaming Responses**: Receive text chunks in real-time as they are generated, ideal for responsive user interfaces. +- **Configurable Behavior**: Customize system prompts, temperature (creativity), and request timeouts. +- **Simple API**: Unified `chat` and `chat_stream` methods regardless of the underlying model provider. + +## Prerequisites + +- **Internet Connection**: The board must be connected to the internet to reach the LLM provider's API. +- **API Key**: A valid API key for the chosen service (e.g., OpenAI API Key, Anthropic API Key). +- **Python Dependencies**: The Brick relies on LangChain integration packages (`langchain-anthropic`, `langchain-openai`, `langchain-google-genai`). + +## Code Example and Usage + +### Basic Conversation + +This example initializes the Brick with an OpenAI model and performs a simple chat interaction. + +**Note:** The API key is not hardcoded. It is retrieved automatically from the **Brick Configuration** in App Lab. + +```python +import os +from arduino.app_bricks.cloud_llm import CloudLLM, CloudModel +from arduino.app_utils import App + +# Initialize the Brick (API key is loaded from configuration) +llm = CloudLLM( + model=CloudModel.OPENAI_GPT, + system_prompt="You are a helpful assistant for an IoT device." +) + +def simple_chat(): + # Send a prompt and print the response + response = llm.chat("What is the capital of Italy?") + print(f"AI: {response}") + +# Run the application +App.run(simple_chat) +``` + +### Streaming with Memory + +This example demonstrates how to enable conversational memory and process the response as a stream of tokens. + +```python +from arduino.app_bricks.cloud_llm import CloudLLM, CloudModel +from arduino.app_utils import App + +# Initialize with memory enabled (keeps last 10 messages) +# API Key is retrieved automatically from Brick Configuration +llm = CloudLLM( + model=CloudModel.ANTHROPIC_CLAUDE +).with_memory(max_messages=10) + +def chat_loop(): + while True: + user_input = input("You: ") + if user_input.lower() in ["exit", "quit"]: + break + + print("AI: ", end="", flush=True) + + # Stream the response token by token + for token in llm.chat_stream(user_input): + print(token, end="", flush=True) + print() # Newline after response + +App.run(chat_loop) +``` + +## Configuration + +The Brick is initialized with the following parameters: + +| Parameter | Type | Default | Description | +| :-------------- | :-------------------- | :---------------------------- | :--------------------------------------------------------------------------------------------------------------------------------------- | +| `api_key` | `str` | `os.getenv("API_KEY")` | The authentication key for the LLM provider. **Recommended:** Set this via the **Brick Configuration** menu in App Lab instead of code. | +| `model` | `str` \| `CloudModel` | `CloudModel.ANTHROPIC_CLAUDE` | The specific model to use. Accepts a `CloudModel` enum or its string value. | +| `system_prompt` | `str` | `""` | A base instruction that defines the AI's behavior and persona. | +| `temperature` | `float` | `0.7` | Controls randomness. `0.0` is deterministic, `1.0` is creative. | +| `timeout` | `int` | `30` | Maximum time (in seconds) to wait for a response. | + +### Supported Models + +You can select a model using the `CloudModel` enum or by passing the corresponding raw string identifier. + +| Enum Constant | Raw String ID | Provider Documentation | +| :---------------------------- | :------------------------- | :-------------------------------------------------------------------------- | +| `CloudModel.ANTHROPIC_CLAUDE` | `claude-3-7-sonnet-latest` | [Anthropic Models](https://docs.anthropic.com/en/docs/about-claude/models) | +| `CloudModel.OPENAI_GPT` | `gpt-4o-mini` | [OpenAI Models](https://platform.openai.com/docs/models) | +| `CloudModel.GOOGLE_GEMINI` | `gemini-2.5-flash` | [Google Gemini Models](https://ai.google.dev/gemini-api/docs/models/gemini) | + +## Methods + +- **`chat(message)`**: Sends a message and returns the complete response string. Blocks until generation is finished. +- **`chat_stream(message)`**: Returns a generator yielding response tokens as they arrive. +- **`stop_stream()`**: Interrupts an active streaming generation. +- **`with_memory(max_messages)`**: Enables history tracking. `max_messages` defines the context window size. +- **`clear_memory()`**: Resets the conversation history. \ No newline at end of file diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/dbstorage_sqlstore/README.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/dbstorage_sqlstore/README.md new file mode 100644 index 00000000..53a837d0 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/dbstorage_sqlstore/README.md @@ -0,0 +1,68 @@ +# Database - SQL Brick + +This brick helps you manage SQLite databases easily by providing a simple interface for creating tables, inserting data, and handling database connections. + +## Overview + +The Database - SQL brick allows you to: + +- Use a simple API for SQLite database operations +- Create tables with custom schemas +- Insert, update, and delete records +- Query data with flexible filters +- Manage connections automatically +- Handle errors for common database issues + +It provides thread-safe database operations using SQLite as the underlying database engine. It supports named access to columns for easy data handling. The brick automatically manages database file storage in a dedicated directory structure and handles the connection lifecycle. + +## Features + +- Thread-safe database operations for multi-threaded applications +- Automatic table creation with type inference from data +- Flexible data querying with `WHERE`, `ORDER BY`, and `LIMIT` clauses +- Schema management with column addition and removal capabilities +- Raw SQL execution for advanced operations +- Named column access using `sqlite3.Row` factory + +## Code example and usage + +Instantiate a new class to open (or create a new database): + +```python +from arduino.app_bricks.dbstorage_sqlstore import SQLStore + +db = SQLStore("example.db") +# ... Do work + +# Close database +db.stop() +``` + +To create a new table: + +```python +# Create a table +columns = { +    "id": "INTEGER PRIMARY KEY", +    "name": "TEXT", +    "age": "INTEGER" +} +db.create_table("users", columns) +``` + +Insert new data in a table: + +```python +# Insert data +data = { +    "name": "Alice", +    "age": 30 +} +db.store("users", data) +``` + +## Understanding Database Operations + +The SQLStore automatically creates a directory structure for database storage, placing files in `data/dbstorage_sqlstore/` within your application directory. The brick supports automatic type inference when creating tables, mapping Python types (*int*, *float*, *str*, *bytes*) to corresponding SQLite column types (*INTEGER*, *REAL*, *TEXT*, *BLOB*). + +The `store()` method can automatically create tables if they don't exist by analyzing the data types of the provided values. This makes it easy to get started without defining schemas upfront, while still allowing explicit table creation for more control over column definitions and constraints. diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/dbstorage_tsstore/README.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/dbstorage_tsstore/README.md new file mode 100644 index 00000000..c9209232 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/dbstorage_tsstore/README.md @@ -0,0 +1,54 @@ +# Database - Time Series Brick + +This brick helps you manage and store time series data efficiently using InfluxDB. + +## Overview + +The Database - Time series brick allows you to: + +- Efficiently store and retrieve time series data +- Use a simple API for writing and reading time series measurements +- Handle database connections automatically +- Integrate your projects easily with InfluxDB +- Use methods for querying and managing stored data +- Handle errors and manage resources robustly + +It provides a refined interface for working with time series data, automatically managing InfluxDB connections and providing flexible querying capabilities with time ranges, aggregation functions, and data retention policies. + +## Features + +- Automatic data retention management with configurable retention periods +- Flexible time range queries with relative periods (e.g., `-1d`, `-2h`) or absolute timestamps +- Data aggregation support with functions like *mean*, *max*, *min*, and *sum* +- Configurable measurement organization and field naming +- Thread-safe operations for concurrent access +- Built-in validation for time parameters and aggregation settings + +## Code example and usage + +Instantiate a new class to open a database connection: + +```python +import time +from arduino.app_bricks.dbstorage_tsstore import TimeSeriesStore + +db = TimeSeriesStore() +db.start() + +db.write_sample("temp", 21) +db.write_sample("hum", 45) +time.sleep(1) + +last_temp = db.read_last_sample("temp") +last_hum = db.read_last_sample("hum") +print(f"Last temperature: {last_temp}") +print(f"Last humidity: {last_hum}") + +db.stop() +``` + +## Understanding Time Series Operations + +The TimeSeriesStore organizes data using InfluxDB's measurement and field structure, where measurements work as containers for related metrics and fields represent individual sensor readings or data points. Each data point includes a timestamp, allowing for precise time-based queries and analysis. + +The brick supports flexible time range specifications using relative periods, such as `-1d` for the last day or `-2h` for the last two hours, as well as absolute timestamps in RFC 3339 format. Data retention is automatically managed based on the configured retention period, allowing for controlled storage usage while maintaining relevant historical data. \ No newline at end of file diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/image_classification/README.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/image_classification/README.md new file mode 100644 index 00000000..6341ed41 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/image_classification/README.md @@ -0,0 +1,43 @@ +# Image Classification Brick + +This Brick lets you perform image classification using a pre-trained neural network model. + +## Overview + +The Image Classification Brick allows you to: + +- Analyze images and categorize their contents using a machine learning model. +- Use locally stored image files or camera feeds as input. +- Easy integration with your project using simple Python APIs. + +## Features + +- Detects multiple objects in a single image +- Returns class names and confidence scores for detected objects +- Supports input as bytes, file paths or PIL images +- Configurable model parameters (e.g., image type, confidence threshold) + +## Code example and usage + +```python +import os +from arduino.app_bricks.image_classification import ImageClassification + +image_classification = ImageClassification() + +# Image frame can be as bytes or PIL image +frame = os.read("path/to/your/image.jpg") + +out = image_classification.classify(frame) +# is it possible to customize image type and confidence level +# out = image_classification.classify(frame, image_type = "png", confidence = 0.35) +if out and "classification" in out: + for i, obj_det in enumerate(out["classification"]): + # For every object detected, get its details + detected_object = obj_det.get("class_name", None) + confidence = obj_det.get("confidence", None) +``` + +## Image Classification Working Principle + +Image classification models take an input image and assign one or more class labels to it, representing the most likely categories present in the image. These models analyze the image as a whole and do not localize objects within the frame. The result is a ranked list of predicted labels, each accompanied by a confidence score indicating the model's likelihood of each label being correct. \ No newline at end of file diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/keyword_spotting/README.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/keyword_spotting/README.md new file mode 100644 index 00000000..d1b52304 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/keyword_spotting/README.md @@ -0,0 +1,62 @@ +# Keyword Spotter Brick + +Brick for keyword spotting using a pre-trained model that processes a continuous audio stream to detect specific keywords or phrases. + +## Overview + +The Keyword Spotter brick allows you to: + +- Detect specific keywords in real-time audio streams +- Use pre-trained models provided by the framework   +- Integrate custom audio classification models trained on the Edge Impulse platform +- Configure detection confidence levels and debounce timing +- Register callback functions for keyword detection events + +It processes audio input through a microphone to classify and detect targeted keywords or phrases. The brick supports both framework-provided models and custom models trained on the Edge Impulse platform, making it flexible for custom keyword detection applications. + +## Prerequisites + +Before using the Keyword Spotter brick, ensure you have the following components: + +- USB microphone + +Tips: +- Use a USB-C® Hub with USB-A connectors to support commercial USB cameras with microphone. +- Microphones included in USB camera/webcams are generally supported + +## Features + +- Real-time audio processing with continuous stream analysis +- Configurable confidence thresholds for detection accuracy +- Debounce functionality to prevent repeated detections   +- Callback-based event handling for detected keywords +- Support for custom Edge Impulse trained models +- Default microphone initialization when no mic specified + +## Code example and usage + +Here is a basic example for detecting the 'hello world' keyword: + +```python +from arduino.app_bricks.keyword_spotter import KeywordSpotter +from arduino.app_utils import App + +spotter = KeywordSpotter() +spotter.on_detect("helloworld", lambda: print(f"Hello world detected!")) + +App.run() +``` + +You can customize the confidence level and debounce timing: + +```python +spotter = KeywordSpotter(confidence=0.9, debounce_sec=3.0) +``` + +## Understanding Detection Parameters + +The KeywordSpotter uses three key configuration parameters: + +- The `confidence` parameter sets the minimum confidence level required for a detection, with higher values reducing false positives but potentially missing valid detections. +- The `debounce_sec` parameter prevents repeated detection callbacks for the same keyword within the specified time window. +- The `mic` parameter allows you to specify a custom Microphone instance. Otherwise, it defaults to a standard microphone. \ No newline at end of file diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/mood_detector/README.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/mood_detector/README.md new file mode 100644 index 00000000..23ede757 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/mood_detector/README.md @@ -0,0 +1,38 @@ +# Mood Detector Brick + +This directory contains the implementation of the Mood Detector Brick, which classifies text sentiment as positive, negative, or neutral using the NLTK VADER analyzer bundled with the Brick assets (no external download required at runtime). + +## Overview + +The Mood Detector Brick analyzes a sentence and returns its overall mood: "positive", "negative", or "neutral". It is lightweight, runs locally, and requires no internet connection. + +Examples: +- "I love this board!" -> positive +- "The weather is awful" -> negative +- "I am sad today" -> negative +- "The temperature is 25" -> neutral + + +## Features + +- Classifies text as positive, negative, or neutral. +- Runs locally; no external services required. +- Case-insensitive; robust to basic punctuation. +- Sensible defaults for edge cases: empty/whitespace -> neutral; non-English text -> typically neutral. +- Simple API: `MoodDetector.get_sentiment(text) -> str`. + +## Code example and usage + +```python +from arduino.app_bricks.mood_detector import MoodDetector + +mood = MoodDetector() + +print(mood.get_sentiment("This is a wonderful and amazing product!")) # positive +print(mood.get_sentiment("I am feeling very sad and disappointed today.")) # negative +print(mood.get_sentiment("The report will be ready by 5 PM.")) # neutral + +# Edge cases +print(mood.get_sentiment("")) # neutral (empty input) +print(mood.get_sentiment("Questo è bello")) # neutral (non-English) +``` diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/motion_detection/README.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/motion_detection/README.md new file mode 100644 index 00000000..405f0ecd --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/motion_detection/README.md @@ -0,0 +1,113 @@ +# Motion Detection Brick + +Leveraging pre-trained AI models, this brick enables motion detection by processing accelerometer samples to identify specific movements. + +It can integrate with models provided by the framework or custom models trained via the Edge Impulse platform. + +## Overview + +The Motion Detection brick allows you to: + +- Process accelerometer data to identify specific motion patterns +- Use pre-trained models provided by the framework +- Integrate custom motion classification models trained on the Edge Impulse platform +- Register callbacks for detected motion patterns +- Configure detection confidence levels + +It analyzes accelerometer samples in real-time to classify motion patterns such as gestures, activities or specific movements. The brick manages data buffering, model inference and provides callback mechanisms for handling detected motions. + +## Features + +- Real-time motion pattern classification using accelerometer data +- Configurable confidence thresholds for detection accuracy +- Automatic data buffering with sliding window processing +- Callback-based event handling for detected motion patterns +- Support for custom Edge Impulse trained models +- Thread-safe motion detection processing + +## Code example and usage + +```python +from arduino.app_bricks.motion_detection import MotionDetection +from arduino.app_utils import App + +motion_detection = MotionDetection(confidence=0.4) + +# Register function to receive samples from sketch +def record_sensor_movement(x: float, y: float, z: float): + # Acceleration from sensor is in g. While we need m/s^2. + x = x * 9.81 + y = y * 9.81 + z = z * 9.81 + + # Append the values to the sensor buffer. These samples will be sent to the model. + global motion_detection + motion_detection.accumulate_samples((x, y, z)) + +Bridge.provide("record_sensor_movement", record_sensor_movement) + +# Register action to take after successful detection +def on_updown_movement_detected(classification: dict): + print(f"updown movement detected!") + +motion_detection.on_movement_detection('updown', on_updown_movement_detected) + +App.run() +``` + +Samples can be provided by accelerometer connected to microcontroller. + +Here is an example using a Modulino Movement accelerometer. + +```c++ +#include +#include + +// Create a ModulinoMovement object +ModulinoMovement movement; + +float x_accel, y_accel, z_accel; // Accelerometer values in g + +unsigned long previousMillis = 0; // Stores last time values were updated +const long interval = 16; // Interval at which to read (16ms) - sampling rate of 62.5Hz and should be adjusted based on model definition +int has_movement = 0; // Flag to indicate if movement data is available + +void setup() { + Bridge.begin(); + + // Initialize Modulino I2C communication + Modulino.begin(Wire1); + + // Detect and connect to movement sensor module + while (!movement.begin()) { + delay(1000); + } +} + +void loop() { + unsigned long currentMillis = millis(); // Get the current time + + if (currentMillis - previousMillis >= interval) { + // Save the last time you updated the values + previousMillis = currentMillis; + + // Read new movement data from the sensor + has_movement = movement.update(); + if(has_movement == 1) { + // Get acceleration values + x_accel = movement.getX(); + y_accel = movement.getY(); + z_accel = movement.getZ(); + + Bridge.notify("record_sensor_movement", x_accel, y_accel, z_accel); + } + + } +} +``` + +## Understanding Motion Detection + +The Motion Detection brick processes accelerometer data through a sliding window buffer system, analyzing patterns of movement to classify specific motions or gestures. The confidence parameter determines the minimum threshold required for a motion pattern to be considered detected. + +Motion patterns are identified using machine learning models that have been trained to recognize specific movement signatures. Each detected motion includes a confidence score indicating the model's certainty about the classification, allowing you to filter results based on your reliability requirements. \ No newline at end of file diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/object_detection/README.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/object_detection/README.md new file mode 100644 index 00000000..3234ca67 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/object_detection/README.md @@ -0,0 +1,45 @@ +# Object Detection Brick + +This Brick provides a Python interface for **detecting objects** within a given image. + +## Overview + +The Object Detection Brick allows you to: + +- Detect objects in an image, either from a local file or directly from a camera feed. +- Locate detected objects in the image using bounding boxes. +- Get the detection confidence value of each object and its label. + +## Features + +- Performs real-time object detection on static images +- Outputs bounding boxes, class labels, and confidence scores for each detected object +- Supports multiple image formats, including JPEG, JPG, and PNG (default: JPG) +- Allows customization of detection confidence and non-maximum suppression (NMS) thresholds +- Easily integrates with PIL images or raw image byte streams + +## Code example and usage + +```python +import os +from arduino.app_bricks.object_detection import ObjectDetection + +object_detection = ObjectDetection() + +# Image frame can be as bytes or PIL image +frame = os.read("path/to/your/image.jpg") + +out = object_detection.detect(frame) +# is it possible to customize image type, confidence level and box overlap +# out = object_detection.detect(frame, image_type = "png", confidence = 0.35, overlap = 0.5) +if out and "detection" in out: + for i, obj_det in enumerate(out["detection"]): + # For every object detected, get its details + detected_object = obj_det.get("class_name", None) + bounding_box = obj_det.get("bounding_box_xyxy", None) + confidence = obj_det.get("confidence", None) + +# draw the bounding box and key points on the image +out_image = object_detection.draw_bounding_boxes(frame, out) +``` + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/streamlit_ui/README.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/streamlit_ui/README.md new file mode 100644 index 00000000..9d50a796 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/streamlit_ui/README.md @@ -0,0 +1,34 @@ +# WebUI - Streamlit Brick + +This brick enables you to create and host interactive, Python-based web applications powered by the **Streamlit** framework. + +## Overview + +The WebUI - Streamlit Brick allows you to: + +- Build rich, interactive UIs using simple Python syntax +- Display real-time data from sensors, devices, or external APIs +- Trigger actions in other bricks or microcontrollers through buttons, sliders, or inputs + +When running, your application will be accessible via a web browser at `http://:7000` + +## Features + +- Enables Streamlit web server functionality on port 7000 +- Supports interactive UI components for data visualization and input +- Easily integrates with other Python modules and Arduino bricks +- Supports themes, layout customization, and Markdown/HTML rendering + +## Code example and usage + +```python +from arduino.app_bricks.streamlit_ui import st + +st.title("Arduino Streamlit UI Example") +st.write("Interact with your Arduino modules using this web interface.") + +if st.button("Send Command"): + st.success("Command sent to Arduino!") + +``` + diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/vibration_anomaly_detection/README.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/vibration_anomaly_detection/README.md new file mode 100644 index 00000000..520fecb1 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/vibration_anomaly_detection/README.md @@ -0,0 +1,121 @@ +# Vibration Anomaly Detection Brick + +This Brick lets you detect vibration anomalies from accelerometer data using a pre-trained Edge Impulse model. It’s ideal for condition monitoring, predictive maintenance, and automation projects. + +## Overview + +The Vibration Anomaly Detection Brick allows you to: + +- Stream accelerometer samples and evaluate the anomaly score per window. +- Trigger a callback automatically when the anomaly score crosses a threshold. +- Integrate quickly via a simple Python API and Arduino Router Bridge. + +## Features + +- **Edge-Impulse powered**: runs your deployed model via `EdgeImpulseRunnerFacade`. +- **Sliding window ingestion**: samples are buffered to the model’s exact input length. +- **Threshold callbacks**: invoke your handler when `anomaly_score ≥ threshold`. +- **Flexible callback signatures**: + - `callback()` + - `callback(anomaly_score: float)` + - `callback(anomaly_score: float, classification: dict)` (if your model returns a classification head alongside anomaly) + +## Code Example and Usage + +In the Python® part, use the following script that exposes the `record_sensor_movement` function and analyzes incoming accelerometer data: + +```python +from arduino.app_bricks.vibration_anomaly_detection import VibrationAnomalyDetection +from arduino.app_utils import * +import time + +logger = Logger("Vibration Anomaly Example") + +# Create the Brick with a chosen anomaly threshold +vibration = VibrationAnomalyDetection(anomaly_detection_threshold=1.0) + +# Register the callback to run when an anomaly is detected +def on_detected_anomaly(anomaly_score: float, classification: dict = None): + print(f"[Anomaly] score={anomaly_score:.3f}") + +# Expose a function that Arduino can call via Router Bridge +# Expecting accelerations in 'g' from the microcontroller +def record_sensor_movement(x_g: float, y_g: float, z_g: float): + # Convert to m/s^2 if your model was trained in SI units + G_TO_MS2 = 9.80665 + x = x_g * G_TO_MS2 + y = y_g * G_TO_MS2 + z = z_g * G_TO_MS2 + # Push a triple (x, y, z) into the sliding window + vibration.accumulate_samples((x, y, z)) + +vibration.on_anomaly(on_detected_anomaly) + +Bridge.provide("record_sensor_movement", record_sensor_movement) + +model_info = vibration.get_model_info() +period = 1.0 / model_info.frequency if model_info and model_info.frequency > 0 else 0.02 + +# Run the host app (handles Router Bridge and our processing loop) +logger.info(f"Starting App... model_freq={getattr(model_info, 'frequency', 'unknown')}Hz period={period:.4f}s") + +App.run() +``` + +Any accelerometer can provide samples. Here is an example using **Modulino Movement** via Arduino Router Bridge: + +```c++ +#include +#include + +// Create a ModulinoMovement object +ModulinoMovement movement; + +float x_accel, y_accel, z_accel; // Accelerometer values in g + +unsigned long previousMillis = 0; // Stores last time values were updated +const long interval = 10; // Interval at which to read (10ms) - 100Hz sampling rate, adjust based on model requirements +int has_movement = 0; // Flag to indicate if movement data is available + +void setup() { + Bridge.begin(); + + // Initialize Modulino I2C communication + Modulino.begin(Wire1); + + // Detect and connect to movement sensor module + while (!movement.begin()) { + delay(1000); + } +} + +void loop() { + unsigned long currentMillis = millis(); // Get the current time + + if (currentMillis - previousMillis >= interval) { + // Save the last time you updated the values + previousMillis = currentMillis; + + // Read new movement data from the sensor + has_movement = movement.update(); + if(has_movement == 1) { + // Get acceleration values + x_accel = movement.getX(); + y_accel = movement.getY(); + z_accel = movement.getZ(); + + Bridge.notify("record_sensor_movement", x_accel, y_accel, z_accel); + } + + } +} +``` + +## Working Principle + +Vibration anomaly models learn normal accelerometer patterns over time. Each new time-window is compared to that baseline and assigned an anomaly score—higher scores mean the vibration deviates unusually (e.g., new frequencies or amplitudes). + +- **Buffering:** Incoming samples are appended to a `SlidingWindowBuffer` sized to your model’s `input_features_count`. +- **Inference:** When a full window is available, features are passed to `EdgeImpulseRunnerFacade.infer_from_features(...)`. +- **Scoring:** The Brick extracts the anomaly score (and any optional classification output) from the inference result. +- **Callback:** If `anomaly_score ≥ anomaly_detection_threshold`, your registered `on_anomaly(...)` callback is invoked. \ No newline at end of file diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/video_image_classification/README.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/video_image_classification/README.md new file mode 100644 index 00000000..4aa523dc --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/video_image_classification/README.md @@ -0,0 +1,52 @@ +# Video Image Classification Brick + +This Brick provides a Python® interface for **classifying video frames in real time** using a pre-trained machine learning model. + +## Overview + +The Video Image Classification Brick allows you to: + +- Continuously analyze frames from a live video stream. +- Classify each frame's content into one or more categories. +- Receive real-time callbacks when specific labels are detected. +- React to *all* classifications through a consolidated callback. +- Use pre-trained models bundled with the framework or custom models trained on the **Edge Impulse** platform. + +## Features + +- Performs live classification on video streams. +- Outputs class labels with their associated confidence scores. +- Supports custom callback functions for specific labels. +- Provides a global callback to handle all classifications at once. +- Configurable **confidence threshold** and **debounce interval** to reduce noise and avoid repeated triggers. +- Easy integration with Python® applications. + +## Prerequisites + +To use this Brick you should have a USB camera connected to your board. + +**Tip**: Use a USB-C® Hub with USB-A connectors to support commercial web cameras. + +## Code example and usage + +```python +from arduino.app_utils import App +from arduino.app_bricks.video_imageclassification import VideoImageClassification + +# Create a classification stream with default confidence threshold (0.3) +classification_stream = VideoImageClassification() + +# Example: callback when "sunglasses" are detected +def sunglass_detected(): + print("Detected sunglasses!") + +classification_stream.on_detect("sunglasses", sunglass_detected) + +# Example: callback for all classifications +def all_detected(results): + print("Classifications:", results) + +classification_stream.on_detect_all(all_detected) + +App.run() +``` diff --git a/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/video_object_detection/README.md b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/video_object_detection/README.md new file mode 100644 index 00000000..94f4de46 --- /dev/null +++ b/debian/arduino-app-cli/home/arduino/.local/share/arduino-app-cli/assets/0.6.0/docs/arduino/video_object_detection/README.md @@ -0,0 +1,64 @@ +# Video Object Detection Brick + +This Brick provides a Python interface for **detecting objects in real time from a USB camera video stream**. +It connects to a model runner over WebSocket, continuously analyzes incoming frames, and produces detection events with predicted labels, bounding boxes, and confidence scores. + +Beyond visualization, it allows you to **register callbacks** that react to detections, either for specific objects or for all detections, enabling event-driven logic in your applications. +It supports both **pre-trained models** provided by the framework and **custom models** trained with Edge Impulse. + +## Overview + +The Video Object Detection Brick allows you to: + +- Continuously detect objects from a live camera or video stream. +- Get bounding boxes, labels, and confidence scores in real time. +- Trigger custom Python functions when certain objects are detected. +- Handle all detections in a single callback if desired. +- Control confidence thresholds and debounce timing to avoid repeated triggers. +- Override the detection threshold dynamically at runtime (if supported by the model). + +## Features + +- Real-time detection stream with continuous object recognition. +- Outputs: + - **Class label** (e.g., "person", "bicycle") + - **Confidence score** for each detection + - **Bounding boxes** for localized detections +- Two callback styles: + - `on_detect("