From 73c6ab4f7884fa0530a0bc36ff835f9d7caafb10 Mon Sep 17 00:00:00 2001 From: Jason Andrews Date: Tue, 8 Jul 2025 13:40:32 +0100 Subject: [PATCH] spelling and link fixes --- .wordlist.txt | 127 +++++++++++++++++- content/learning-paths/automotive/_index.md | 8 +- .../1-get-started.md | 2 +- .../7-sme2-matmul-intr.md | 4 +- .../multiplying-matrices-with-sme2/_index.md | 2 +- .../zenoh-multinode-ros2/3_zenoh-multinode.md | 2 +- .../4_zenoh-ex1-pubsub.md | 4 +- .../5_zenoh-ex2-storagequery.md | 6 +- .../6_zenoh-ex3-queryable.md | 4 +- .../zenoh-multinode-ros2/7_zenoh-querycomp.md | 6 +- .../zenoh-multinode-ros2/_index.md | 2 +- .../embedded-and-microcontrollers/_index.md | 20 +-- .../1-overview.md | 2 +- content/learning-paths/iot/_index.md | 8 +- .../laptops-and-desktops/_index.md | 2 +- .../mobile-graphics-and-gaming/_index.md | 1 + .../servers-and-cloud-computing/_index.md | 11 +- .../06_running_inference.md | 2 +- 18 files changed, 173 insertions(+), 40 deletions(-) diff --git a/.wordlist.txt b/.wordlist.txt index 6620632450..00a6d1f24e 100644 --- a/.wordlist.txt +++ b/.wordlist.txt @@ -4323,4 +4323,129 @@ taskset unicast wrk's yy -zenoh \ No newline at end of file +zenoh +AFM +AOR +AWSEC +Agrawal +Arcee +Atheros +ChenYing +Colima +Corellium +Corestone +Croci +DBAREMETAL +Denormal +Docker's +Dpls +ETHOSU +FVP's +Gopalakrishnan +Gorman +HVM +Higham +Huai +ICML +IIR +IIoT +ImageStreams +Joana +Kuo +LANs +Liang +Libmath +MACC +MACCs +MachineSets +MobileNet +OpenShift's +PMLR +PipelineRun +PreserveOriginal +Queryable +Reimport +SPRA +STRINGIFY +Sram +TMS +Tekton +VPs +VSCode +WANs +WR +Waheed +Weidmann +Wikitest +XQuartz +Xiu +ZGVnN +Zenon +afm +aot +arXiv +arcee +armpl +ath +bitbake +bootloaders +cntr +cosf +cpp's +datadir +dddd +denormal +denormalized +edgeimpulse +ethosu +expf +geo +gupta +iMac +instrinsics +ish +keypair +learnable +libcurl +lldb +mL +mM +mR +mlr +nbc +nbr +nodeAffinity +nordic +oc +oe +openshift +pcs +podTemplate +podTemplate's +postinstallation +prepending +prog +queryable +redhat +reimport +shadergraph +sheel +spra +stefanalfbo +subnormals +taskrun +tg +thisunrolling +tokio +topologies +umax +varg +vexp +vgetq +vres +wifi +wlan +wlp +wlx +xquartz +zenohd \ No newline at end of file diff --git a/content/learning-paths/automotive/_index.md b/content/learning-paths/automotive/_index.md index f25f1f9cce..8a002e10db 100644 --- a/content/learning-paths/automotive/_index.md +++ b/content/learning-paths/automotive/_index.md @@ -12,15 +12,17 @@ title: Automotive weight: 4 subjects_filter: - Containers and Virtualization: 3 -- Performance and Architecture: 1 +- Performance and Architecture: 2 operatingsystems_filter: - Baremetal: 1 -- Linux: 3 +- Linux: 4 - RTOS: 1 tools_software_languages_filter: - Automotive: 1 +- C: 1 - Docker: 2 - Python: 2 +- Raspberry Pi: 1 - ROS 2: 1 -- ROS2: 1 +- ROS2: 2 --- diff --git a/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/1-get-started.md b/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/1-get-started.md index b6d5978122..71356a0f3e 100644 --- a/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/1-get-started.md +++ b/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/1-get-started.md @@ -58,7 +58,7 @@ code-examples/learning-paths/cross-platform/multiplying-matrices-with-sme2/ └── sme2_check.c ``` -Amongst other files, it includes: +Among other files, it includes: - Code examples. - A `Makefile` to build the code. - `run-fvp.sh` to run the FVP model. diff --git a/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/7-sme2-matmul-intr.md b/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/7-sme2-matmul-intr.md index eba6850aaf..ee245df3ba 100644 --- a/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/7-sme2-matmul-intr.md +++ b/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/7-sme2-matmul-intr.md @@ -248,8 +248,8 @@ The core of the multiplication is done in 2 parts: Once again, intrinsics makes it easy to fully leverage SME2, provided you have a solid understanding of its available instructions. The compiler is automatically -handling many low-level aspects (saving / restoring of the difeerent contexts), -as well as not using registers that are reserved on specific plaforms (like +handling many low-level aspects (saving / restoring of the diferent contexts), +as well as not using registers that are reserved on specific platforms (like `x18`). Predicates handle corner cases elegantly, ensuring robust execution. Most importantly, the code adapts to different SVL values across various hardware implementations without requiring recompilation. This follows the key diff --git a/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/_index.md b/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/_index.md index 918fcc44f8..9edcbecd67 100644 --- a/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/_index.md +++ b/content/learning-paths/cross-platform/multiplying-matrices-with-sme2/_index.md @@ -18,7 +18,7 @@ prerequisites: - Intermediate proficiency with the C programming language and the Armv9-A assembly language - A computer running Linux, macOS, or Windows - Installations of Git and Docker for project setup and emulation - - A platform that supports SME2 (see the list of [devices with SME2 support](/learning-paths/cross-platform/multiplying-matrices-with-sme2/1-get-started/#devices-with-sme2-support)) or an emulator to run code with SME2 instructions + - A platform that supports SME2 (see the list of [devices with SME2 support](/learning-paths/cross-platform/multiplying-matrices-with-sme2/1-get-started/#devices) or an emulator to run code with SME2 instructions - Compiler support for SME2 instructions (for example, LLVM 17+ with SME2 backend support) author: Arnaud de Grandmaison diff --git a/content/learning-paths/cross-platform/zenoh-multinode-ros2/3_zenoh-multinode.md b/content/learning-paths/cross-platform/zenoh-multinode-ros2/3_zenoh-multinode.md index 840dcc38d7..8d21f0bfc0 100644 --- a/content/learning-paths/cross-platform/zenoh-multinode-ros2/3_zenoh-multinode.md +++ b/content/learning-paths/cross-platform/zenoh-multinode-ros2/3_zenoh-multinode.md @@ -14,7 +14,7 @@ In this session, you’ll use Raspberry Pi boards to simulate a scalable, distri You’ll learn how to use Docker to deploy the environment on physical devices, and how to duplicate virtual instances using snapshot cloning on Arm Virtual Hardware. -This setup lets you simulate `real-world`, `cross-node communication`, making it ideal for validating Zenoh’s performance in robotics and industrial IoT use cases. +This setup lets you simulate `real-world`, `cross-node communication`, making it ideal for validating Zenoh's performance in robotics and industrial IoT use cases. ### Install Docker on Raspberry Pi diff --git a/content/learning-paths/cross-platform/zenoh-multinode-ros2/4_zenoh-ex1-pubsub.md b/content/learning-paths/cross-platform/zenoh-multinode-ros2/4_zenoh-ex1-pubsub.md index e1a223c562..fc0115618a 100644 --- a/content/learning-paths/cross-platform/zenoh-multinode-ros2/4_zenoh-ex1-pubsub.md +++ b/content/learning-paths/cross-platform/zenoh-multinode-ros2/4_zenoh-ex1-pubsub.md @@ -8,7 +8,7 @@ layout: learningpathall ## Example 1: Simple Pub/Sub -This first test demonstrates Zenoh’s real-time publish/subscribe model using two Raspberry Pi devices. +This first test demonstrates real-time publish/subscribe model using two Raspberry Pi devices. The following command is to initiate a subscriber for a key expression `demo/example/**`, i.e. a set of topics starting with the path `demo/example`. @@ -36,4 +36,4 @@ The result will look like: In the left-side window, I have logged into the device Pi4 and run the z_sub program. It receives values with the key `demo/example/zenoh-rs-pub` continuously published by z_pub running on Pi in the right-side window. -This basic example shows Zenoh’s zero-config discovery and low-latency pub/sub across physical nodes. +This basic example shows Zenoh's zero-config discovery and low-latency pub/sub across physical nodes. diff --git a/content/learning-paths/cross-platform/zenoh-multinode-ros2/5_zenoh-ex2-storagequery.md b/content/learning-paths/cross-platform/zenoh-multinode-ros2/5_zenoh-ex2-storagequery.md index d99dbcceb8..a0cdea6cef 100644 --- a/content/learning-paths/cross-platform/zenoh-multinode-ros2/5_zenoh-ex2-storagequery.md +++ b/content/learning-paths/cross-platform/zenoh-multinode-ros2/5_zenoh-ex2-storagequery.md @@ -8,13 +8,13 @@ layout: learningpathall ## Example 2: Storage and Query -The second example adds Zenoh’s data storage and querying capabilities—enabling nodes to retrieve historical values on demand. +The second example adds Zenoh's data storage and querying capabilities—enabling nodes to retrieve historical values on demand. Building on the previous Pub/Sub example, you’ll now explore how Zenoh supports `persistent data storage` and `on-demand querying` -- a powerful feature for robotics and IIoT applications. In a typical warehouse or factory scenario, autonomous robots may periodically publish sensor data (e.g., position, temperature, battery level), and a central system—or another robot—may later need to query the latest state of each unit. -Unlike Pub/Sub, which requires live, real-time message exchange, Zenoh’s storage and query model enables asynchronous access to data that was published earlier, even if the original publisher is no longer online. +Unlike Pub/Sub, which requires live, real-time message exchange, Zenoh's storage and query model enables asynchronous access to data that was published earlier, even if the original publisher is no longer online. In this example, you’ll run the zenohd daemon with in-memory storage and use z_put to publish data and z_get to retrieve it. @@ -70,5 +70,5 @@ The result will look like: If you have more than two Raspberry Pi devices, you can run the z_get command on a third RPi to validate that storage queries work seamlessly across a multi-node setup. {{% /notice %}} -This example shows how Zenoh’s Storage + Query model supports asynchronous data access and resilient state-sharing—critical capabilities in robotics and industrial IoT systems where network connectivity may be intermittent or system components loosely coupled. +This example shows how Zenoh's Storage + Query model supports asynchronous data access and resilient state-sharing—critical capabilities in robotics and industrial IoT systems where network connectivity may be intermittent or system components loosely coupled. diff --git a/content/learning-paths/cross-platform/zenoh-multinode-ros2/6_zenoh-ex3-queryable.md b/content/learning-paths/cross-platform/zenoh-multinode-ros2/6_zenoh-ex3-queryable.md index 16e0b35573..13f15e0020 100644 --- a/content/learning-paths/cross-platform/zenoh-multinode-ros2/6_zenoh-ex3-queryable.md +++ b/content/learning-paths/cross-platform/zenoh-multinode-ros2/6_zenoh-ex3-queryable.md @@ -8,7 +8,7 @@ layout: learningpathall ## Example 3: Computation on Query using Queryable -Next, you’ll explore Zenoh’s queryable capability, which lets a node dynamically respond to data queries by executing a custom computation or data generation function in this example. +Next, you’ll explore Zenoh's queryable capability, which lets a node dynamically respond to data queries by executing a custom computation or data generation function in this example. Unlike zenohd which simply returns stored data, a queryable node can register to handle a specific key expression and generate responses at runtime. This is ideal for distributed computing at the edge, where lightweight devices—such as Raspberry Pi nodes—can respond to requests with calculated values (e.g., sensor fusion, AI inference results, or diagnostics). @@ -16,7 +16,7 @@ Unlike zenohd which simply returns stored data, a queryable node can register to Imagine a robot fleet management system where the central planner queries each robot for its latest battery health score, which is not published continuously but calculated only when queried. -This saves bandwidth and enables edge compute optimization using Zenoh’s Queryable. +This saves bandwidth and enables edge compute optimization using Zenoh's Queryable. ### Step 1: Launch a Queryable Node diff --git a/content/learning-paths/cross-platform/zenoh-multinode-ros2/7_zenoh-querycomp.md b/content/learning-paths/cross-platform/zenoh-multinode-ros2/7_zenoh-querycomp.md index ecc463263f..a870fe5b54 100644 --- a/content/learning-paths/cross-platform/zenoh-multinode-ros2/7_zenoh-querycomp.md +++ b/content/learning-paths/cross-platform/zenoh-multinode-ros2/7_zenoh-querycomp.md @@ -10,7 +10,7 @@ layout: learningpathall Finally, you’ll combine pub/sub, storage, and queryable components to simulate a distributed computation flow—demonstrating how Zenoh enables intelligent, coordinated edge systems. -You’ll learn how to use Zenoh’s Queryable API in Rust to build a parameterized query system for estimating battery health at the edge. +You’ll learn how to use Zenoh's Queryable API in Rust to build a parameterized query system for estimating battery health at the edge. This extends a previous example by supporting runtime query parameters like battery level and temperature. @@ -93,7 +93,7 @@ async fn main() -> zenoh::Result<()> { } ``` -This edge node responds to real-time queries using Zenoh’s Queryable API. It listens for requests on the robot/battery/estimate key and returns a calculated battery health score based on provided input parameters. +This edge node responds to real-time queries using Zenoh's Queryable API. It listens for requests on the robot/battery/estimate key and returns a calculated battery health score based on provided input parameters. The program starts by establishing a Zenoh session using open(Config::default()). It then registers a queryable resource on the robot/battery/estimate key. Whenever this key is queried, a callback function is invoked asynchronously using tokio::spawn. @@ -160,7 +160,7 @@ The excepted output will be >> Received ('robot/battery/estimate': 'Estimated battery health: 85%') ``` -You’ve just built a responsive, parameterized edge compute service using Zenoh’s Queryable API in Rust — a lightweight but powerful pattern for real-time intelligence at the edge. +You’ve just built a responsive, parameterized edge compute service using Zenoh's Queryable API in Rust — a lightweight but powerful pattern for real-time intelligence at the edge. This approach not only minimizes network overhead but also enables each device to process and respond to context-aware queries on demand. It’s a strong foundation for building scalable, event-driven IoT systems that can adapt dynamically to operational needs. diff --git a/content/learning-paths/cross-platform/zenoh-multinode-ros2/_index.md b/content/learning-paths/cross-platform/zenoh-multinode-ros2/_index.md index 99794f7b96..48fa9c5db9 100644 --- a/content/learning-paths/cross-platform/zenoh-multinode-ros2/_index.md +++ b/content/learning-paths/cross-platform/zenoh-multinode-ros2/_index.md @@ -10,7 +10,7 @@ minutes_to_complete: 45 who_is_this_for: This learning path is designed for robotics developers, industrial automation engineers, and IoT system architects building distributed, scalable, and low-latency applications. Whether you are using Robot Operating System (ROS), developing autonomous systems, or designing multi-node communication frameworks, this guide will show you how to leverage the Eclipse Zenoh protocol on Arm-based platforms — both in the cloud (AVH or EC2) and on physical devices like Raspberry Pi. learning_objectives: - - Understand Zenoh’s architecture and its integration of pub/sub, storage, querying, and computation models. + - Understand Zenoh's architecture and its integration of pub/sub, storage, querying, and computation models. - Build and run Zenoh examples on both Arm servers and Raspberry Pi. - Set up and deploy a multi-node Zenoh system using Arm-based hardware or virtual environments. diff --git a/content/learning-paths/embedded-and-microcontrollers/_index.md b/content/learning-paths/embedded-and-microcontrollers/_index.md index 89d08161fc..8ee2672ec5 100644 --- a/content/learning-paths/embedded-and-microcontrollers/_index.md +++ b/content/learning-paths/embedded-and-microcontrollers/_index.md @@ -11,8 +11,8 @@ maintopic: true operatingsystems_filter: - Android: 1 - Baremetal: 30 -- Linux: 28 -- macOS: 6 +- Linux: 29 +- macOS: 7 - RTOS: 9 - Windows: 4 subjects_filter: @@ -20,7 +20,7 @@ subjects_filter: - Containers and Virtualization: 6 - Embedded Linux: 4 - Libraries: 3 -- ML: 14 +- ML: 15 - Performance and Architecture: 21 - RTOS Fundamentals: 4 - Security: 2 @@ -32,10 +32,10 @@ tools_software_languages_filter: - Arduino: 2 - Arm Compiler for Embedded: 7 - Arm Compiler for Linux: 1 -- Arm Compute Library: 1 +- Arm Compute Library: 2 - Arm Development Studio: 8 - Arm Fast Models: 4 -- Arm Virtual Hardware: 10 +- Arm Virtual Hardware: 11 - Assembly: 1 - AVH: 1 - C: 3 @@ -53,12 +53,12 @@ tools_software_languages_filter: - DSTREAM: 2 - Edge AI: 1 - Edge Impulse: 1 -- ExecuTorch: 2 -- Fixed Virtual Platform: 9 +- ExecuTorch: 3 +- Fixed Virtual Platform: 10 - FPGA: 1 - Fusion 360: 1 - FVP: 1 -- GCC: 8 +- GCC: 9 - GenAI: 2 - GitHub: 3 - GitLab: 1 @@ -79,8 +79,8 @@ tools_software_languages_filter: - NumPy: 1 - Paddle: 1 - Porcupine: 1 -- Python: 6 -- PyTorch: 2 +- Python: 7 +- PyTorch: 3 - QEMU: 1 - Raspberry Pi: 6 - Remote.It: 1 diff --git a/content/learning-paths/embedded-and-microcontrollers/visualizing-ethos-u-performance/1-overview.md b/content/learning-paths/embedded-and-microcontrollers/visualizing-ethos-u-performance/1-overview.md index cbfad5ce57..7345c0c727 100644 --- a/content/learning-paths/embedded-and-microcontrollers/visualizing-ethos-u-performance/1-overview.md +++ b/content/learning-paths/embedded-and-microcontrollers/visualizing-ethos-u-performance/1-overview.md @@ -18,7 +18,7 @@ For a learning path focused on creating and deploying your own TinyML models, pl ## Benefits and applications -New products, like Arm's [Ethos-U85](https://www.arm.com/products/silicon-ip-cpu/ethos/ethos-u85) NPU are available on FVPs earlier than on physical devices. FVPs also have a graphical user iterface (GUI), which is useful for for ML performance visualization due to: +New products, like Arm's [Ethos-U85](https://www.arm.com/products/silicon-ip-cpu/ethos/ethos-u85) NPU are available on FVPs earlier than on physical devices. FVPs also have a graphical user interface (GUI), which is useful for for ML performance visualization due to: - visual confirmation that your ML model is running on the desired device, - clearly indicated instruction counts, - confirmation of total execution time and diff --git a/content/learning-paths/iot/_index.md b/content/learning-paths/iot/_index.md index 221cec7ff4..90e093d397 100644 --- a/content/learning-paths/iot/_index.md +++ b/content/learning-paths/iot/_index.md @@ -13,10 +13,10 @@ subjects_filter: - Containers and Virtualization: 2 - Embedded Linux: 2 - ML: 2 -- Performance and Architecture: 2 +- Performance and Architecture: 3 operatingsystems_filter: - Baremetal: 4 -- Linux: 8 +- Linux: 9 - macOS: 2 - RTOS: 2 - Windows: 2 @@ -28,6 +28,7 @@ tools_software_languages_filter: - Azure: 1 - Balena Cloud: 1 - Balena OS: 1 +- C: 1 - Coding: 3 - Docker: 2 - Fixed Virtual Platform: 1 @@ -35,7 +36,8 @@ tools_software_languages_filter: - Matter: 1 - MCP: 1 - Python: 2 -- Raspberry Pi: 3 +- Raspberry Pi: 4 - Remote.It: 1 +- ROS2: 1 - VS Code: 1 --- diff --git a/content/learning-paths/laptops-and-desktops/_index.md b/content/learning-paths/laptops-and-desktops/_index.md index b3aa2da78f..47c7b9b83f 100644 --- a/content/learning-paths/laptops-and-desktops/_index.md +++ b/content/learning-paths/laptops-and-desktops/_index.md @@ -53,7 +53,7 @@ tools_software_languages_filter: - Kubernetes: 1 - Linux: 1 - LLM: 1 -- LLVM: 1 +- LLVM: 2 - llvm-mca: 1 - MSBuild: 1 - MTE: 1 diff --git a/content/learning-paths/mobile-graphics-and-gaming/_index.md b/content/learning-paths/mobile-graphics-and-gaming/_index.md index 59b948e122..0ea7407037 100644 --- a/content/learning-paths/mobile-graphics-and-gaming/_index.md +++ b/content/learning-paths/mobile-graphics-and-gaming/_index.md @@ -51,6 +51,7 @@ tools_software_languages_filter: - Kotlin: 7 - LiteRT: 1 - LLM: 1 +- LLVM: 1 - llvm-mca: 1 - MediaPipe: 2 - Memory Bug Report: 1 diff --git a/content/learning-paths/servers-and-cloud-computing/_index.md b/content/learning-paths/servers-and-cloud-computing/_index.md index b58279463c..37978e8ebc 100644 --- a/content/learning-paths/servers-and-cloud-computing/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/_index.md @@ -8,7 +8,7 @@ key_ip: maintopic: true operatingsystems_filter: - Android: 2 -- Linux: 152 +- Linux: 153 - macOS: 10 - Windows: 14 pinned_modules: @@ -22,7 +22,7 @@ subjects_filter: - Containers and Virtualization: 28 - Databases: 15 - Libraries: 9 -- ML: 27 +- ML: 28 - Performance and Architecture: 60 - Storage: 1 - Web: 10 @@ -34,6 +34,7 @@ tools_software_languages_filter: - 5G: 1 - ACL: 1 - AI: 1 +- Amazon Web Services: 1 - Android Studio: 1 - Ansible: 2 - Arm Compiler for Linux: 1 @@ -108,10 +109,12 @@ tools_software_languages_filter: - Keras: 1 - Kubernetes: 10 - Lambda: 1 +- Libamath: 1 - libbpf: 1 -- Libmath: 1 - Linaro Forge: 1 +- Linux: 1 - Litmus7: 1 +- Llama.cpp: 1 - LLM: 9 - llvm-mca: 1 - LSE: 1 @@ -136,7 +139,7 @@ tools_software_languages_filter: - perf: 5 - Perf: 1 - PostgreSQL: 4 -- Python: 27 +- Python: 28 - PyTorch: 9 - RAG: 1 - Redis: 3 diff --git a/content/learning-paths/servers-and-cloud-computing/arcee-foundation-model-on-aws/06_running_inference.md b/content/learning-paths/servers-and-cloud-computing/arcee-foundation-model-on-aws/06_running_inference.md index e9670cb603..b1c9aeb471 100644 --- a/content/learning-paths/servers-and-cloud-computing/arcee-foundation-model-on-aws/06_running_inference.md +++ b/content/learning-paths/servers-and-cloud-computing/arcee-foundation-model-on-aws/06_running_inference.md @@ -75,7 +75,7 @@ This command starts an non-interactive session with the model: - `-p` sets the prompt sent to the model - The tool will prompt you to enter text, and the model will generate a response -Here, you should see the model generating at about 40 tokens per second. This shows how a more aggressive quantization recipe helps deliver faster perfornmance. +Here, you should see the model generating at about 40 tokens per second. This shows how a more aggressive quantization recipe helps deliver faster performance. ## Using llama-server for API Access