diff --git a/.github/workflows/stats-pr.yml b/.github/workflows/stats-pr.yml new file mode 100644 index 0000000000..0d867b4822 --- /dev/null +++ b/.github/workflows/stats-pr.yml @@ -0,0 +1,39 @@ +name: Open Pull Request with updated stats report +on: + workflow_run: + workflows: ["Test Learning Path"] + types: [completed] +permissions: + actions: read + contents: write + pull-requests: write +jobs: + stats-pr: + runs-on: arm-linux-runner + steps: + - name: Checkout main branch + uses: actions/checkout@v2 + with: + ref: main + - name: Download stats report as artifact + uses: actions/download-artifact@v4 + with: + # Run ID of the workflow that uploaded the artifact + run-id: ${{ github.event.workflow_run.id }} + github-token: ${{ github.token }} + - name: Move stats file + # Unpack the artifact and move the stats file to the correct location + run: | + mv stats_current_test_info/stats_current_test_info.yml data/stats_current_test_info.yml + rm -rf stats_current_test_info + - name: Create Pull Request + uses: peter-evans/create-pull-request@v6 + if: success() + with: + commit-message: Update stats_current_test_info.yml + title: Update stats_current_test_info.yml + body: | + Update test result file with recent run + Auto-generated by create-pull-request: https://github.com/peter-evans/create-pull-request + branch: update-stats-current-test-info + base: main \ No newline at end of file diff --git a/.github/workflows/test-lp.yml b/.github/workflows/test-lp.yml new file mode 100644 index 0000000000..3bb6f2b35c --- /dev/null +++ b/.github/workflows/test-lp.yml @@ -0,0 +1,54 @@ +name: Test Learning Path +on: pull_request +jobs: + Test-Pull-Request: + runs-on: arm-linux-runner + steps: + - name: Check out repository code + uses: actions/checkout@v4 + with: + ref: ${{ steps.vars.outputs.branch-name }} + - name: Get all changed markdown files + id: changed-markdown-files + uses: tj-actions/changed-files@v44 + with: + files: | + **.md + - name: Install dependencies + if: steps.changed-markdown-files.outputs.any_changed == 'true' + run: pip install -r tools/requirements.txt + - name: Run test suite for all changed .md files + id: run-suite + if: steps.changed-markdown-files.outputs.any_changed == 'true' + # Run the test suite + run: | + set -o pipefail; ./tools/test_lp.sh ${{ steps.changed-markdown-files.outputs.all_changed_files }} 2>&1 | tee test-lp-output.txt + - name: Parse test suite errors + id: test-suite-state + if: success() + # Catch any missed errors if running multiple tests + run: | + cat test-lp-output.txt | grep -q 'Tests failed in test suite' && echo "TEST_SUITE_ERRORS=true" >> "$GITHUB_ENV" \ + || echo "TEST_SUITE_ERRORS=false" >> "$GITHUB_ENV" + - name: Check for errors in test suite + if: env.TEST_SUITE_ERRORS == 'true' && success() + run: | + echo "Test failures detected in test suite, check the output in earlier steps" + exit 1 + - name: Parse test maintenance off + id: maintenance-state + if: success() + # Check if maintenance is turned off + run: | + cat test-lp-output.txt | grep -q 'maintenance is turned off' && echo "MAINTENANCE=off" >> "$GITHUB_ENV" \ + || echo "MAINTENANCE=on" >> "$GITHUB_ENV" + - name: Check if maintenance is turned off + if: env.MAINTENANCE == 'off' && success() + run: echo "Maintenance is turned off for one or more files" + # Only upload artifact if maintenance is on + - name: Upload stats artifact + uses: actions/upload-artifact@v4 + if: success() && env.MAINTENANCE == 'on' + with: + name: stats_current_test_info + path: data/stats_current_test_info.yml \ No newline at end of file diff --git a/.gitignore b/.gitignore index 15d27cc968..79dcdce94f 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ resources/ node_modules/ package-lock.json .hugo_build.lock +.vscode # macOS files *.DS_Store diff --git a/content/install-guides/ambaviz.md b/content/install-guides/ambaviz.md index be77d85c07..9e86cb2e89 100644 --- a/content/install-guides/ambaviz.md +++ b/content/install-guides/ambaviz.md @@ -27,6 +27,10 @@ tool_install: true # Set to true to be listed in main selection pag multi_install: false # Set to true if first page of multi-page article, else false multitool_install_part: false # Set to true if a sub-page of a multi-page article, else false layout: installtoolsall # DO NOT MODIFY. Always true for tool install articles +test_maintenance: true +test_images: +- ubuntu:latest + --- [Arm AMBA Viz](https://www.arm.com/products/development-tools/embedded-and-software/amba-viz) is a tool to visualize AMBA events to accelerate SoC verification. This guide is for SoC verification and validation engineers to efficiently analyze hardware signals in their designs. @@ -49,23 +53,23 @@ AMBA Viz requires a Linux host machine with Java 11 or JavaFX. Extract the software from the bundle to the desired install location. For example: -```command +```console tar -xf ambaviz.tar.gz ``` Navigate to the newly-created `ambaviz-` folder, and run the following script to set up environment variables: #### sh/bash -```command +```console source sourceMe.sh ``` #### csh -```command +```console sourceMe.csh ``` The AMBA Viz Release Notes provide full installation instructions, located in the extracted directory at: -```command +```console docs/public/assets/pdfs/ambaviz-release-note.pdf ``` @@ -78,17 +82,17 @@ License set up instructions are available in the [Arm License install guide](/in ## Get started Typically, AMBA Viz is launched with a waveform file: -```command +```console ambaviz -f ``` A proprietary `AVDB` waveform format is recommended to improve the performance of AMBA Viz. To convert `VCD` or `FSDB` files to this format, use the `wave2avdb` script, for example: -```command +```console wave2avdb -d cmn600 -f waves.vcd -o waves.avdb ``` The User Guide provides full usage instructions, located in the extracted directory at: -```command +```console docs/public/assets/pdfs/ambaviz-user-guide.pdf ``` diff --git a/content/install-guides/ams.md b/content/install-guides/ams.md index e0487d8822..fd03755b88 100644 --- a/content/install-guides/ams.md +++ b/content/install-guides/ams.md @@ -28,6 +28,9 @@ tool_install: true # Set to true to be listed in main selection pag multi_install: false # Set to true if first page of multi-page article, else false multitool_install_part: false # Set to true if a sub-page of a multi-page article, else false layout: installtoolsall # DO NOT MODIFY. Always true for tool install articles +test_maintenance: true +test_images: + - ubuntu:latest --- [Arm Performance Studio](https://developer.arm.com/Tools%20and%20Software/Arm%20Performance%20Studio%20for%20Mobile) (formally known as `Arm Mobile Studio`) is a performance analysis tool suite for various application developers: @@ -57,7 +60,7 @@ Run the supplied `Arm_Performance_Studio__windows_x86-64.exe` installer ### Linux Unpack the supplied `Arm Performance Studio` bundle to the desired location. For example: -```command +```console tar -xf Arm_Performance_Studio_2024.3_linux_x86-64.tgz ``` ### macOS diff --git a/content/install-guides/anaconda.md b/content/install-guides/anaconda.md index 2610dd428e..aaf905a029 100644 --- a/content/install-guides/anaconda.md +++ b/content/install-guides/anaconda.md @@ -22,11 +22,11 @@ tool_install: true weight: 1 --- -[Anaconda Distribution](https://www.anaconda.com/products/distribution) is a popular open-source Python distribution. +[Anaconda Distribution](https://www.anaconda.com/products/distribution) is a popular open-source Python distribution. It includes access to a repository with over 8,000 open-source data science and machine learning packages. -The `conda` command can be used to quickly install and use Python packages. +The `conda` command can be used to quickly install and use Python packages. Follow the instructions below to install and use Anaconda Distribution on an Arm server. @@ -46,11 +46,12 @@ aarch64 If you see a different result, you are not using an Arm computer running 64-bit Linux. -The installer requires some desktop related libraries. The dependencies can be met by installing a desktop environment. +The installer requires some desktop related libraries. The dependencies can be met by installing a desktop environment. For Ubuntu/Debian, run the command: ```console +sudo apt update sudo apt install xfce4 -y ``` @@ -60,7 +61,7 @@ For Amazon Linux, run the command: sudo amazon-linux-extras install mate-desktop1.x ``` -## How do I download the latest Anaconda distribution? +## How do I download the latest Anaconda distribution? To download the latest Anaconda distribution, run: @@ -84,7 +85,7 @@ sh ./Anaconda3-2024.10-1-Linux-aarch64.sh -b The install takes a couple of minutes to complete. -The batch installation will not set up the shell. +The batch installation will not set up the shell. To set up the shell, run: @@ -133,7 +134,7 @@ Run the example code: python ./tf.py ``` -The expected output format is below. Your version may be slightly different. +The expected output format is below. Your version may be slightly different. ```output 2.12.0 @@ -180,6 +181,6 @@ tensor([[0.9287, 0.5931, 0.0239], ``` -You are ready to use Anaconda Distribution. +You are ready to use Anaconda Distribution. Explore the many machine learning articles and examples using TensorFlow and PyTorch. diff --git a/content/install-guides/ansible.md b/content/install-guides/ansible.md index 38885594d2..7afc03ab21 100644 --- a/content/install-guides/ansible.md +++ b/content/install-guides/ansible.md @@ -8,10 +8,10 @@ minutes_to_complete: 10 multi_install: false multitool_install_part: false official_docs: https://docs.ansible.com/ansible/latest/index.html +test_maintenance: true test_images: - ubuntu:latest test_link: null -test_maintenance: false title: Ansible tool_install: true weight: 1 @@ -19,7 +19,7 @@ weight: 1 Ansible is an open source, command-line automation used to configure systems and deploy software. -Ansible command-line tools can be installed on a variety of Linux distributions. +Ansible command-line tools can be installed on a variety of Linux distributions. [General installation information](https://docs.ansible.com/ansible/latest/installation_guide/installation_distros.html) is available which covers all supported operating systems, but it doesn't talk about Arm-based hosts. @@ -41,7 +41,7 @@ aarch64 If you see a different result, you are not using an Arm-based machine running 64-bit Linux. -## How do I download and install Ansible for Ubuntu on Arm? +## How do I download and install Ansible for Ubuntu on Arm? The easiest way to install the latest version of Ansible for Ubuntu on Arm is to use the PPA (Personal Package Archive). @@ -54,7 +54,7 @@ sudo add-apt-repository --yes --update ppa:ansible/ansible sudo apt install ansible -y ``` -Confirm the Ansible command line tools are installed by running: +Confirm the Ansible command line tools are installed by running: ```bash ansible-playbook --version diff --git a/content/install-guides/aperf.md b/content/install-guides/aperf.md index 974cbd17ad..0c037cb095 100644 --- a/content/install-guides/aperf.md +++ b/content/install-guides/aperf.md @@ -7,7 +7,7 @@ multitool_install_part: false official_docs: https://github.com/aws/aperf test_images: - ubuntu:latest -test_maintenance: false +test_maintenance: true title: AWS Perf (APerf) tool_install: true weight: 1 @@ -17,11 +17,11 @@ APerf (AWS Perf) is an open source command line performance analysis tool which APerf was created by AWS to help with Linux performance analysis. -In addition to the CLI, APerf includes an HTML view to visualize the collected data. +In addition to the CLI, APerf includes an HTML view to visualize the collected data. ## Before you begin -APerf works on Linux, and is available as a single binary. +APerf works on Linux, and is available as a single binary. APerf works best if `perf` is installed. Refer to the [Perf for Linux on Arm](/install-guides/perf) install guide for instructions. @@ -43,9 +43,9 @@ If you see a different result, you are not using an Arm computer running 64-bit ## Download and install APerf -The easiest way to install APerf is to download a release from GitHub, extract it, and setup your `PATH` environment variable or copy the executable to a directory already in your search path. +The easiest way to install APerf is to download a release from GitHub, extract it, and setup your `PATH` environment variable or copy the executable to a directory already in your search path. -Visit the [releases page](https://github.com/aws/aperf/releases/) to see a list of available releases. +Visit the [releases page](https://github.com/aws/aperf/releases/) to see a list of available releases. You can also download a release from the command line: @@ -59,7 +59,7 @@ Extract the release: tar xvfz aperf-v0.1.12-alpha-aarch64.tar.gz ``` -Add the path to `aperf` in your `.bashrc` file. +Add the path to `aperf` in your `.bashrc` file. ```console echo 'export PATH="$PATH:$HOME/aperf-v0.1.12-alpha-aarch64"' >> ~/.bashrc @@ -69,12 +69,12 @@ source ~/.bashrc Alternatively, you can copy the `aperf` executable to a directory already in your search path. ```bash { target="ubuntu:latest" } -sudo cp aperf-v0.1.12-alpha-aarch64/aperf /usr/local/bin +sudo cp aperf-v0.1.12-alpha-aarch64/aperf /usr/local/bin ``` Confirm `aperf` is installed by printing the version: -```bash { target="ubuntu:latest" } +```bash { target="ubuntu:latest" } aperf --version ``` @@ -86,33 +86,33 @@ aperf 0.1.0 (4b910d2) ## Verify APerf is working -### Create and view a report +### Create and view a report To confirm APerf is working, start it for 10 seconds and take a sample every 1 second. -```bash { target="ubuntu:latest" } -aperf record -i 1 -p 10 -r run1 --profile +```console +sudo aperf record -i 1 -p 10 -r run1 --profile ``` -After 10 seconds `aperf` completes and you see a directory named `run1` and a tar file named `run1.tar.gz`. +After 10 seconds `aperf` completes and you see a directory named `run1` and a tar file named `run1.tar.gz`. Next, generate a report from the recorded data: -```bash { target="ubuntu:latest" } -aperf report -r run1 -n report1 +```console +sudo aperf report -r run1 -n report1 ``` -The name of the report is `report1` and you will see a `report1` directory and a tar file named `report1.tar.gz`. +The name of the report is `report1` and you will see a `report1` directory and a tar file named `report1.tar.gz`. The tar files are useful if you want to copy them to another machine. Using a web browser, open the file `index.html` in the `report1/` directory. To open the file use `Ctrl+O` for Linux and Windows and use `⌘+O` for macOS. -The report is now visible in the browser. +The report is now visible in the browser. -There are a number of tabs on the left side showing the collected data. +There are a number of tabs on the left side showing the collected data. -You can browse the data and see what has been collected. +You can browse the data and see what has been collected. ![APerf #center](/install-guides/_images/aperf0.png) @@ -124,21 +124,21 @@ The Kernel Config and Sysctl Data tabs are blank unless you click No. To demonstrate comparing 2 runs, create a second run with `aperf record`: -```bash { target="ubuntu:latest" } -aperf record -i 1 -p 10 -r run2 --profile +```console +sudo aperf record -i 1 -p 10 -r run2 --profile ``` -After 10 seconds `aperf` completes and you see a directory named `run2` and a tar file named `run2.tar.gz`. +After 10 seconds `aperf` completes and you see a directory named `run2` and a tar file named `run2.tar.gz`. Generate a report with both the first and second runs included: -```bash { target="ubuntu:latest" } -aperf report -r run1 -r run2 -n compare +```console +sudo aperf report -r run1 -r run2 -n compare ``` -The name of the report is `compare` and you will see a `compare` directory and a tar file named `compare.tar.gz`. +The name of the report is `compare` and you will see a `compare` directory and a tar file named `compare.tar.gz`. -Open the `index.html` file in the `compare/` directory to see the 2 runs side by side. +Open the `index.html` file in the `compare/` directory to see the 2 runs side by side. A screenshot is shown below: diff --git a/content/install-guides/arduino-pico.md b/content/install-guides/arduino-pico.md index 287eca0214..72b0b52f4e 100644 --- a/content/install-guides/arduino-pico.md +++ b/content/install-guides/arduino-pico.md @@ -10,6 +10,7 @@ layout: installtoolsall minutes_to_complete: 15 official_docs: https://docs.aws.amazon.com/greengrass/v2/developerguide/quick-installation.html prerequisites: Arduino IDE +test_maintenance: true test_images: - ubuntu:latest tool_install: true @@ -19,7 +20,7 @@ multitool_install_part: false weight: 1 --- -You can install the Arduino IDE and Arduino core software for the Raspberry Pi Pico and the Raspberry Pi Pico W. +You can install the Arduino IDE and Arduino core software for the Raspberry Pi Pico and the Raspberry Pi Pico W. Arduino core is the software stack that powers Arduino devices and development boards. While the Raspberry Pi Pico isn't an Arduino board, it uses the same RP2040 SoC as the Arduino RP2040 Connect, and therefore can run the same Arduino core software. @@ -27,7 +28,7 @@ Arduino core is the software stack that powers Arduino devices and development b First, you need to install the Arduino IDE on your laptop or desktop. You can download it for your operating system from [the Arduino Software website](https://www.arduino.cc/en/software). Follow the provided instructions for installing the IDE. -Start the IDE by clicking the Arduino IDE icon. +Start the IDE by clicking the Arduino IDE icon. ## Install board support package @@ -43,9 +44,9 @@ When the `Boards Manager` opens search for `pico` and the `Arduino Mbed OS RP204 ### Raspberry Pi Pico W -The `Boards Manager` package for for `Arduino Mbed OS RP2040 Boards` does not include the Raspberry Pi Pico W. +The `Boards Manager` package for for `Arduino Mbed OS RP2040 Boards` does not include the Raspberry Pi Pico W. -If you want to use the Pico W go to `File -> Preferences` (or `Arduino IDE -> Settings` on macOS) and enter the URL below into the `Additional Boards Manager URLs` field: +If you want to use the Pico W go to `File -> Preferences` (or `Arduino IDE -> Settings` on macOS) and enter the URL below into the `Additional Boards Manager URLs` field: ```console https://github.com/earlephilhower/arduino-pico/releases/download/global/package_rp2040_index.json @@ -61,9 +62,9 @@ Once the support package is installed, you need to tell the Arduino IDE which su ## Upload to your board -Because the Raspberry Pi Pico doesn't come with the Arduino core software installed, the Arduino IDE won't recognize it. +Because the Raspberry Pi Pico doesn't come with the Arduino core software installed, the Arduino IDE won't recognize it. -To fix that, you must upload a sketch. A sketch is another name for an Arduino software application. +To fix that, you must upload a sketch. A sketch is another name for an Arduino software application. Go to `File -> Examples -> 01.Basics -> Blink` and load the sketch. @@ -75,4 +76,4 @@ You should see the LED on your Raspberry Pi Pico blink on and off every second. If you have trouble uploading a sketch, unplug the board, press and hold the `BOOTSEL` button on the board, plug it in, and then release the button. {{% /notice %}} -You are ready to start writing your own Arduino sketches for Raspberry Pi Pico. \ No newline at end of file +You are ready to start writing your own Arduino sketches for Raspberry Pi Pico. \ No newline at end of file diff --git a/content/install-guides/armclang.md b/content/install-guides/armclang.md index 8cc621c832..b0eb127200 100644 --- a/content/install-guides/armclang.md +++ b/content/install-guides/armclang.md @@ -13,12 +13,8 @@ multitool_install_part: false official_docs: https://developer.arm.com/documentation/100748 test_images: - ubuntu:latest -- fedora:latest test_link: null -test_maintenance: false -test_status: -- passed -- passed +test_maintenance: true title: Arm Compiler for Embedded tool_install: true weight: 1 @@ -54,7 +50,7 @@ All compiler versions can be used standalone or [integrated](#armds) into your A See also: [What should I do if I want to download a legacy release of Arm Compiler?](https://developer.arm.com/documentation/ka005184) -See [Arm Product Download Hub](/install-guides/pdh/) for additional information on usage. +See [Arm Product Download Hub](../pdh) for additional information on usage. ### Install compiler packages @@ -64,22 +60,17 @@ win-x86_64\setup.exe ``` To install on Linux hosts, `untar` the downloaded package and run the install script (note the exact filenames are version and host dependent). For example: -#### x86_64 -```console -mkdir tmp -mv ARMCompiler6.22_standalone_linux-x86_64.tar.gz tmp -cd tmp -tar xvfz ARMCompiler6.22_standalone_linux-x86_64.tar.gz -./install_x86_64.sh --i-agree-to-the-contained-eula --no-interactive -d /home/$USER/ArmCompilerforEmbedded6.22 -``` -#### aarch64 +#### Linux +The `uname -m` call is used to determine whether your machine is running `aarch64` or `x86_64`, and target the downloaded package accordingly. + ```console mkdir tmp -mv ARMCompiler6.22_standalone_linux-aarch64.tar.gz tmp +mv ARMCompiler6.22_standalone_linux-`uname -m`.tar.gz tmp cd tmp -tar xvfz ARMCompiler6.22_standalone_linux-aarch64.tar.gz -./install_aarch64.sh --i-agree-to-the-contained-eula --no-interactive -d /home/$USER/ArmCompilerforEmbedded6.22 +tar xvfz ARMCompiler6.22_standalone_linux-`uname -m`.tar.gz +./install_`uname -m`.sh --i-agree-to-the-contained-eula --no-interactive -d /home/$USER/ArmCompilerforEmbedded6.22 ``` + Remove the install data when complete. ```console cd .. @@ -99,16 +90,17 @@ armclang --version ### Arm Tools Artifactory {#artifactory} -The Arm Compiler for Embedded, as well as other tools and utilities are available in the [Arm Tools Artifactory](https://www.keil.arm.com/artifacts/). The Keil Studio VS Code [Extensions](/install-guides/keilstudio_vs/) use the artifactory to fetch and install and the necessary components. +The Arm Compiler for Embedded, as well as other tools and utilities are available in the [Arm Tools Artifactory](https://www.keil.arm.com/artifacts/). The Keil Studio VS Code [Extensions](../keilstudio_vs) use the artifactory to fetch and install and the necessary components. Available packages can also be fetched directly from the artifactory. This is particularly useful for automated CI/CD flows. -```command +```bash wget https://artifacts.tools.arm.com/arm-compiler/6.22/45/standalone-linux-armv8l_64-rel.tar.gz ``` Note that the artifactory packages do not have their own installers. You should manually extract files and configure, for example: -```command + +```bash mkdir ArmCompilerforEmbedded6.22 tar xvzf ./standalone-linux-armv8l_64-rel.tar.gz -C ./ArmCompilerforEmbedded6.22 --strip-components=1 rm ./standalone-linux-armv8l_64-rel.tar.gz diff --git a/content/install-guides/armds.md b/content/install-guides/armds.md index 8b4c8b8c66..1c2392b8dc 100644 --- a/content/install-guides/armds.md +++ b/content/install-guides/armds.md @@ -14,6 +14,10 @@ additional_search_terms: ### Estimated completion time in minutes (please use integer multiple of 5) minutes_to_complete: 10 +test_maintenance: true +test_images: + - ubuntu:latest + author_primary: Ronan Synnott ### Link to official documentation @@ -36,7 +40,7 @@ Full host platform requirements are given in the [Getting Started Guide](https:/ ## Download installer packages -The installer will depend on the [edition](https://developer.arm.com/Tools%20and%20Software/Arm%20Development%20Studio#Editions) of Development Studio that you are entitled to. +The installer will depend on the [edition](https://developer.arm.com/Tools%20and%20Software/Arm%20Development%20Studio#Editions) of Development Studio that you are entitled to. The version is denoted by `year.index`, where `index` is a number (for example `2023.1`). You can also generate an Evaluation license from this installation (`Help` > `Arm License Manager`), with capabilities broadly similar to the UBL Gold Edition. diff --git a/content/install-guides/armie.md b/content/install-guides/armie.md index 09bf151f02..3ea15b3642 100644 --- a/content/install-guides/armie.md +++ b/content/install-guides/armie.md @@ -13,7 +13,7 @@ multitool_install_part: false official_docs: https://developer.arm.com/documentation/102190 test_images: - ubuntu:latest -test_maintenance: false +test_maintenance: true title: Arm Instruction Emulator (armie) tool_install: true weight: 1 @@ -21,7 +21,7 @@ weight: 1 [Arm Instruction Emulator](https://developer.arm.com/Tools%20and%20Software/Arm%20Instruction%20Emulator) is a software tool that runs on 64-bit Arm platforms and emulates [Scalable Vector Extension(SVE)](https://developer.arm.com/documentation/102476/latest/instructions). This tool allows you to run your compiled SVE application binaries on hardware that is not SVE-enabled. {{% notice SVE hardware %}} -AWS Graviton 3 and Graviton 4 processors are available and recommended for SVE application development. +AWS Graviton 3 and Graviton 4 processors are available and recommended for SVE application development. {{% /notice %}} ## Before you begin @@ -39,7 +39,7 @@ aarch64 ``` If you see a different result, you are not using an Arm computer running 64-bit Linux. -You must ensure that either [Environment Modules](https://modules.readthedocs.io/en/latest/index.html) or the [Lmod Environment Module System](https://lmod.readthedocs.io/en/latest/) are installed on your Linux machine. The GNU Compiler (GCC) is also required. +You must ensure that either [Environment Modules](https://modules.readthedocs.io/en/latest/index.html) or the [Lmod Environment Module System](https://lmod.readthedocs.io/en/latest/) are installed on your Linux machine. The GNU Compiler (GCC) is also required. For Ubuntu Linux install the required packages. @@ -48,9 +48,9 @@ sudo apt-get install build-essential -y sudo apt-get install environment-modules -y ``` -## Download +## Download -You can download the appropriate Arm Instruction Emulator package for your host Linux platform from [Product Downloads section](https://developer.arm.com/downloads/-/arm-instruction-emulator) of the Arm website. +You can download the appropriate Arm Instruction Emulator package for your host Linux platform from [Product Downloads section](https://developer.arm.com/downloads/-/arm-instruction-emulator) of the Arm website. For Ubuntu Linux download the installer package using `wget` @@ -60,19 +60,18 @@ wget https://developer.arm.com/-/media/Files/downloads/hpc/arm-instruction-emula ## Install -To install the Arm Instruction Emulator, extract the downloaded package and run the install script. +To install the Arm Instruction Emulator, extract the downloaded package and run the install script. -Extract the downloaded package. +Extract the downloaded package. ```bash -tar xf ARM-Instruction-Emulator_22.0_AArch64_Ubuntu_18.04.tar.gz -cd arm-instruction-emulator_22.0_Ubuntu-18.04 +tar -xf ARM-Instruction-Emulator_22.0_AArch64_Ubuntu_18.04.tar.gz ``` Run the install script. ```bash -sudo ./arm-instruction-emulator_22.0_Ubuntu-18.04.sh -a +sudo ./arm-instruction-emulator_22.0_Ubuntu-18.04/arm-instruction-emulator_22.0_Ubuntu-18.04.sh -a ``` Set up the environment for example in your .bashrc and add module files. @@ -85,25 +84,25 @@ source ~/.bashrc To list available modules: -```bash { env_source="~/.bashrc" } +```console module avail ``` To configure Arm Compiler for Linux: -```bash { env_source="~/.bashrc" } +```console module load armie22/22.0 ``` To confirm `armie` is installed, print the version. -```bash { env_source="~/.bashrc" } +```console armie --version ``` ## Setting up product license -Arm Instruction Emulator does not require a license. +Arm Instruction Emulator does not require a license. ## Get started diff --git a/content/install-guides/armpl.md b/content/install-guides/armpl.md index 38d80d3794..c4d77c315c 100644 --- a/content/install-guides/armpl.md +++ b/content/install-guides/armpl.md @@ -14,6 +14,10 @@ additional_search_terms: ### Estimated completion time in minutes (please use integer multiple of 5) minutes_to_complete: 10 +test_maintenance: true +test_images: + - ubuntu:latest + ### Link to official documentation official_docs: https://developer.arm.com/documentation/101004 author_primary: Pareena Verma @@ -115,20 +119,20 @@ The instructions shown below are for deb based installers for GCC users. In a terminal, run the command shown below to download the debian package: -```console +```bash wget https://developer.arm.com/-/media/Files/downloads/hpc/arm-performance-libraries/24-10/linux/arm-performance-libraries_24.10_deb_gcc.tar ``` Use `tar` to extract the file and then change directory: -```console + +```bash tar -xf arm-performance-libraries_24.10_deb_gcc.tar cd arm-performance-libraries_24.10_deb/ -``` Run the installation script as a super user: -```console +```bash sudo ./arm-performance-libraries_24.10_deb.sh --accept ``` diff --git a/content/install-guides/aws-cli.md b/content/install-guides/aws-cli.md index c37fd37736..763cf3b798 100644 --- a/content/install-guides/aws-cli.md +++ b/content/install-guides/aws-cli.md @@ -12,18 +12,15 @@ multitool_install_part: false official_docs: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html test_images: - ubuntu:latest -test_link: null -test_maintenance: false -test_status: -- passed +test_maintenance: true title: AWS CLI tool_install: true weight: 1 --- -[AWS CLI](https://docs.aws.amazon.com/cli/index.html) is a cross-platform command-line tool that can be installed on development computers. The AWS Command Line Interface (AWS CLI) is a unified tool that provides a consistent interface for interacting with all parts of Amazon Web Services. +[AWS CLI](https://docs.aws.amazon.com/cli/index.html) is a cross-platform command-line tool that can be installed on development computers. The AWS Command Line Interface (AWS CLI) is a unified tool that provides a consistent interface for interacting with all parts of Amazon Web Services. -It is available for a variety of operating systems and Linux distributions, supports the Arm architecture and has multiple ways to install it. +It is available for a variety of operating systems and Linux distributions, supports the Arm architecture and has multiple ways to install it. ## What should I do before installing AWS CLI? @@ -54,12 +51,12 @@ sudo apt update sudo apt install unzip -y ``` -Download the zip file with `curl`, extract the installer, and run it. +Download the zip file with `curl`, extract the installer, and run it. ```bash { target="ubuntu:latest" } curl "https://awscli.amazonaws.com/awscli-exe-linux-aarch64.zip" -o "awscliv2.zip" unzip awscliv2.zip -sudo ./aws/install +sudo ./aws/install --update ``` Confirm the CLI version 2 is available by invoking the `aws` command to print the version. @@ -68,6 +65,6 @@ Confirm the CLI version 2 is available by invoking the `aws` command to print th aws --version ``` -Review [AWS CLI v2 is now generally available](https://aws.amazon.com/blogs/developer/aws-cli-v2-is-now-generally-available/) to review the new features in version 2. +Review [AWS CLI v2 is now generally available](https://aws.amazon.com/blogs/developer/aws-cli-v2-is-now-generally-available/) to review the new features in version 2. You now have the latest version of the AWS CLI installed. Follow [this guide](/install-guides/aws_access_keys/) to generate and configure access keys needed to use the AWS CLI. diff --git a/content/install-guides/aws-copilot.md b/content/install-guides/aws-copilot.md index 0345bf04ef..8680f0ed03 100644 --- a/content/install-guides/aws-copilot.md +++ b/content/install-guides/aws-copilot.md @@ -12,10 +12,7 @@ multitool_install_part: false official_docs: https://aws.github.io/copilot-cli/ test_images: - ubuntu:latest -test_link: null -test_maintenance: false -test_status: -- passed +test_maintenance: true title: AWS Copilot CLI tool_install: true weight: 1 @@ -23,7 +20,7 @@ weight: 1 AWS Copilot CLI is an open source command line interface for running containers on AWS App Runner, Amazon Elastic Container Service (ECS), and AWS Fargate. -It is available for a variety of operating systems and Linux distributions and supports the Arm architecture. +It is available for a variety of operating systems and Linux distributions and supports the Arm architecture. ## Before you begin @@ -49,7 +46,7 @@ arm64 ## Download and install AWS Copilot CLI -Copilot requires Docker. Refer to the [Docker](/install-guides/docker/) install guide for installation instructions. +Copilot requires Docker. Refer to the [Docker](/install-guides/docker/) install guide for installation instructions. If you are using Docker on Linux you will need to install QEMU to build container images for both the `arm64` and the `amd64` architectures. diff --git a/content/install-guides/aws-greengrass-v2.md b/content/install-guides/aws-greengrass-v2.md index 370ea75604..2d49679759 100644 --- a/content/install-guides/aws-greengrass-v2.md +++ b/content/install-guides/aws-greengrass-v2.md @@ -10,6 +10,7 @@ layout: installtoolsall minutes_to_complete: 15 official_docs: https://docs.aws.amazon.com/greengrass/v2/developerguide/quick-installation.html prerequisites: AWS Account with IAM use role +test_maintenance: false test_images: - ubuntu:latest tool_install: true @@ -31,7 +32,7 @@ The instructions provide the fastest and simplest configuration for deploying AW Before installing AWS IoT Greengrass on your device you first need to create an AWS IAM role with sufficient permissions to create Greengrass Things, Groups, and Roles. -You will also create and save an access key and secret access key for AWS CLI access. +You will also create and save an access key and secret access key for AWS CLI access. ### Before you begin @@ -110,13 +111,13 @@ Log in to the AWS console, set the AWS region you want to use in upper right cor 5. Replace `account-id` on lines 16 and 17 with your AWS account ID -You can find your account ID by clicking on your user name in the top-right corner of the AWS console. +You can find your account ID by clicking on your user name in the top-right corner of the AWS console. ![Role Permissions Editor #center](/install-guides/_images/gg-role-permissions.png) 6. Name the new policy `GGDeploymentAccess` -7. Back on the group creation page, click the refresh button then search for and select `GGDeploymentAccess` +7. Back on the group creation page, click the refresh button then search for and select `GGDeploymentAccess` ![Group Policy Selection #center](/install-guides/_images/gg-group-policy.png) @@ -130,11 +131,11 @@ You can find your account ID by clicking on your user name in the top-right corn 12. Select `Command Line Interface (CLI)` for your key type, ignoring the warnings for now (you should delete they keys when you're done testing). -13. Copy your `Access key` and `Secret access key`. +13. Copy your `Access key` and `Secret access key`. ![Access Keys #center](/install-guides/_images/gg-access-keys.png) -You will use the credentials in the next section. +You will use the credentials in the next section. ## Download and install AWS IoT Greengrass @@ -160,7 +161,7 @@ export AWS_REGION="us-east-1" Replace `us-east-1` with the AWS region you want to use. {{% /notice %}} -Download the zip file with `curl`, extract the installer, and run it. +Download the zip file with `curl`, extract the installer, and run it. This will install the AWS IoT Greengrass v2 software on your device, and and register the device with the Greengrass service. @@ -204,12 +205,12 @@ systemctl status greengrass ## View your device in the AWS console -In your browser, go to the AWS console and navigate to the IoT Greengrass console. +In your browser, go to the AWS console and navigate to the IoT Greengrass console. You will see the new device listed in the Greengrass core devices. -Click on the device name to see more device details. +Click on the device name to see more device details. ![Greengrass Devices #center](/install-guides/_images/greengrass-devices.png) -You are now ready to use AWS IoT Greengrass v2 on your device. \ No newline at end of file +You are now ready to use AWS IoT Greengrass v2 on your device. \ No newline at end of file diff --git a/content/install-guides/azure-cli.md b/content/install-guides/azure-cli.md index 64ece50ec6..24bf7057c0 100644 --- a/content/install-guides/azure-cli.md +++ b/content/install-guides/azure-cli.md @@ -10,7 +10,6 @@ multitool_install_part: false official_docs: https://learn.microsoft.com/en-us/cli/azure test_images: - ubuntu:latest -test_link: null test_maintenance: true test_status: - passed @@ -19,9 +18,9 @@ tool_install: true weight: 1 --- -[Azure CLI](https://learn.microsoft.com/en-us/cli/azure/) is a cross-platform command-line tool that can be installed locally on development computers. Azure CLI is used to connect to Azure and execute administrative commands on Azure resources. +[Azure CLI](https://learn.microsoft.com/en-us/cli/azure/) is a cross-platform command-line tool that can be installed locally on development computers. Azure CLI is used to connect to Azure and execute administrative commands on Azure resources. -It is available for a variety of operating systems and Linux distributions and has multiple ways to install it. +It is available for a variety of operating systems and Linux distributions and has multiple ways to install it. ## Before you begin @@ -74,17 +73,20 @@ If you prefer installing the Azure CLI using Python3, follow the instructions be ## Download and Install using pip -Another way to install Azure CLI for Ubuntu on Arm is to use Python pip. +Another way to install Azure CLI for Ubuntu on Arm is to use Python pip. -Install Python pip. +Install Python pip and create a virtual environment. -```bash { target="ubuntu:latest" } -sudo apt install python3-pip python-is-python3 -y +```bash +sudo apt update +sudo apt install python3-pip python-is-python3 python3-venv -y +python -m venv azure-cli-venv ``` -Download and install Azure CLI. +Activate it and install Azure CLI. -```bash { target="ubuntu:latest" } +```bash +source azure-cli-venv/bin/activate pip install azure-cli ``` diff --git a/content/install-guides/bolt.md b/content/install-guides/bolt.md index 148df9bd2b..47acc51346 100644 --- a/content/install-guides/bolt.md +++ b/content/install-guides/bolt.md @@ -14,7 +14,7 @@ official_docs: https://github.com/llvm/llvm-project/tree/main/bolt test_images: - ubuntu:latest -test_maintenance: false +test_maintenance: true layout: installtoolsall tool_install: true @@ -33,7 +33,7 @@ This article provides quick instructions to download and install BOLT. The instr [Install Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) using the documentation for your operating system. -Many Linux distributions include Git so you may not need to install it. +Many Linux distributions include Git so you may not need to install it. 2. Install CMake @@ -96,26 +96,34 @@ Thread model: posix InstalledDir: /usr/bin ``` +5. Install xz-utils + +```bash +sudo apt-get install xz-utils -y +``` + ## Install BOLT -You can install BOLT in 2 different ways, by building the source code or by downloading a binary release from GitHub. +You can install BOLT in 2 different ways, by building the source code or by downloading a binary release from GitHub. ### Option 1: Download, build, and install BOLT from source code 1. Clone the repository -```console -cd $HOME +```bash git clone https://github.com/llvm/llvm-project.git ``` -2. Build BOLT +2. Build BOLT and run it. -```console +```bash cd llvm-project mkdir build cd build -cmake -G Ninja ../llvm -DLLVM_TARGETS_TO_BUILD="X86;AArch64" -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=ON -DLLVM_ENABLE_PROJECTS="bolt" +cmake -G Ninja ../llvm -DLLVM_TARGETS_TO_BUILD="X86;AArch64" -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=ON -DLLVM_ENABLE_PROJECTS="bolt;clang;lld" +``` + +```console ninja bolt ``` @@ -123,12 +131,12 @@ Build time depends on your machine configuration, and it may take several minute 3. Add the path to BOLT in your `.bashrc` file -```console +```bash echo 'export PATH="$PATH:$HOME/llvm-project/build/bin"' >> ~/.bashrc source ~/.bashrc ``` -You are now ready to [verify BOLT is installed](#verify). +You are now ready to [verify BOLT is installed](#verify). ### Option 2: Download and install BOLT using a binary release @@ -136,20 +144,19 @@ You are now ready to [verify BOLT is installed](#verify). For Arm Linux use the file with `aarch64` in the name: -```bash { target="ubuntu:latest" } -cd $HOME +```bash wget https://github.com/llvm/llvm-project/releases/download/llvmorg-17.0.5/clang+llvm-17.0.5-aarch64-linux-gnu.tar.xz ``` 2. Extract the downloaded file -```bash { target="ubuntu:latest" } -tar xvf clang+llvm-17.0.5-aarch64-linux-gnu.tar.xz +```bash +tar -xvf clang+llvm-17.0.5-aarch64-linux-gnu.tar.xz ``` 3. Add the path to BOLT in your `.bashrc` file -```bash { target="ubuntu:latest" } +```bash echo 'export PATH="$PATH:$HOME/clang+llvm-17.0.5-aarch64-linux-gnu/bin"' >> ~/.bashrc source ~/.bashrc ``` @@ -160,7 +167,7 @@ source ~/.bashrc Check the `perf2bolt` command: -```bash { target="ubuntu:latest" } +```console perf2bolt ``` @@ -173,7 +180,7 @@ Must specify at least 1 positional argument: See: perf2bolt --help Check the `llvm-bolt` command: -```bash { target="ubuntu:latest" } +```console llvm-bolt ``` @@ -186,7 +193,7 @@ Must specify at least 1 positional argument: See: llvm-bolt --help 2. Print the BOLT version -```bash { target="ubuntu:latest" } +```console llvm-bolt --version ``` @@ -208,6 +215,6 @@ BOLT revision 99c15eb49ba0b607314b3bd221f0760049130d97 x86-64 - 64-bit X86: EM64T and AMD64 ``` -You will see additional Registered Targets if you downloaded a binary release. +You will see additional Registered Targets if you downloaded a binary release. You are ready to use BOLT on your Linux machine. diff --git a/content/install-guides/gcloud.md b/content/install-guides/gcloud.md index 5e93110f0a..5ed6ae3c32 100644 --- a/content/install-guides/gcloud.md +++ b/content/install-guides/gcloud.md @@ -1,5 +1,5 @@ --- -additional_search_terms: +additional_search_terms: - cloud - google cloud - google @@ -14,19 +14,19 @@ multitool_install_part: false official_docs: https://cloud.google.com/sdk/docs/install-sdk test_images: - ubuntu:latest -test_maintenance: false -title: Google Cloud Platform (GCP) CLI +test_maintenance: true +title: Google Cloud Platform (GCP) CLI tool_install: true weight: 1 --- The Google Cloud CLI, `gcloud`, allows you to run commands in your Google Cloud account. -`gcloud` is available for Windows, macOS, Linux and supports the Arm architecture. +`gcloud` is available for Windows, macOS, Linux and supports the Arm architecture. ## What should I consider before installing gcloud? -Use the documentation link to find alternative installation options. +Use the documentation link to find alternative installation options. This article provides a quick solution to install `gcloud` for Ubuntu on Arm. @@ -54,7 +54,7 @@ Download and install the required software packages. sudo apt-get install -y curl apt-transport-https ca-certificates gnupg ``` -Install `gcloud` from the Google repository. +Install `gcloud` from the Google repository. ```bash { target="ubuntu:latest" } echo "deb https://packages.cloud.google.com/apt cloud-sdk main" | sudo tee -a /etc/apt/sources.list.d/google-cloud-sdk.list diff --git a/content/install-guides/multipass.md b/content/install-guides/multipass.md index b336cf38a4..abdcfbc29b 100644 --- a/content/install-guides/multipass.md +++ b/content/install-guides/multipass.md @@ -21,6 +21,10 @@ author_primary: Jason Andrews ### Link to official documentation official_docs: https://multipass.run/docs +test_images: +- ubuntu:latest +test_maintenance: true + ### PAGE SETUP weight: 1 # Defines page ordering. Must be 1 for first (or only) page. tool_install: true # Set to true to be listed in main selection page, else false @@ -32,15 +36,15 @@ layout: installtoolsall # DO NOT MODIFY. Always true for tool install ar A computer running macOS with Apple Silicon or an Arm Linux computer with KVM enabled is required to complete the installation. {{% /notice %}} -[Multipass](https://multipass.run/) provides cloud style virtual machines (VMs). Multipass is popular among developers for efficient, local testing. When run on macOS with Apple Silicon or on Linux with a Raspberry Pi 5, Multipass provides a similar experience to cloud instances. A local, software compatible equivalent of an Arm cloud instance on your desk with good performance is an important option for developers. +[Multipass](https://multipass.run/) provides cloud style virtual machines (VMs). Multipass is popular among developers for efficient, local testing. When run on macOS with Apple Silicon or on Linux with a Raspberry Pi 5, Multipass provides a similar experience to cloud instances. A local, software compatible equivalent of an Arm cloud instance on your desk with good performance is an important option for developers. Multipass provides a clear CLI to easily start virtual machine instances, do development tasks, and clean the VMs from your computer. ## Before you begin -Multipass runs on a variety of platforms and host operating systems. The information below covers running Multipass on macOS with Apple Silicon and Arm Linux with the goal of creating a compatible Ubuntu Linux environment for developers working on cloud instances. +Multipass runs on a variety of platforms and host operating systems. The information below covers running Multipass on macOS with Apple Silicon and Arm Linux with the goal of creating a compatible Ubuntu Linux environment for developers working on cloud instances. -Multipass uses the terms virtual machine and instance synonymously. +Multipass uses the terms virtual machine and instance synonymously. ## Installation on macOS @@ -60,15 +64,15 @@ Install the download using the package command. sudo installer -pkg multipass-1.14.1-rc1+mac.14+gf2381bfe9.mac-Darwin.pkg -target / ``` -The getting started instructions below use the command line interface. If you prefer to use the graphical interface start it from the macOS Launchpad, the initial screen is shown below. You can use the UI to create, start, and stop virtual machines. +The getting started instructions below use the command line interface. If you prefer to use the graphical interface start it from the macOS Launchpad, the initial screen is shown below. You can use the UI to create, start, and stop virtual machines. ![Connect #center](/install-guides/_images/multipass-gui.png) Multipass is now installed. Proceed to [Get Started with Multipass](#getstarted). -## Installation on Arm Linux +## Installation on Arm Linux -Multipass can be used on Arm Linux computers such as the Raspberry Pi 5. +Multipass can be used on Arm Linux computers such as the Raspberry Pi 5. Running Multipass on Linux requires the KVM hypervisor. KVM does not typically work on virtual machines, it requires bare metal. @@ -80,7 +84,7 @@ Install and run the `kvm-ok` command to confirm KVM is available. Install `kvm-ok` on Debian based Linux distributions using: -```console +```bash sudo apt install cpu-checker -y ``` @@ -106,11 +110,11 @@ INFO: For more detailed results, you should run this as root HINT: sudo /usr/sbin/kvm-ok ``` -If KVM is available, proceed with the install. +If KVM is available, proceed with the install. -### Install +### Install -You may need to install the Snap daemon, `snapd`, before installing Multipass. +You may need to install the Snap daemon, `snapd`, before installing Multipass. If you are not sure if it is running, execute the command: @@ -118,26 +122,26 @@ If you are not sure if it is running, execute the command: snap version ``` -If the command is found and version information is printed, then `snapd` is running. +If the command is found and version information is printed, then `snapd` is running. If you need to install `snapd` run: -```console +```bash sudo apt install snapd -y ``` LXD is also required for Multipass. -```console +```bash sudo snap install lxd ``` {{% notice Note %}} -You can select from three Multipass releases: stable, beta, or edge. The default version is stable. +You can select from three Multipass releases: stable, beta, or edge. The default version is stable. Add `--beta` or `--edge` to the install command below to select these more recent versions. {{% /notice %}} -```console +```bash sudo snap install multipass ``` @@ -147,13 +151,13 @@ Multipass is now installed. To confirm multipass is installed run the `version` command. -```console +```bash multipass version ``` If the `multipass` command is not found, you can add `/snap/bin` to the Bash search path using: -```console +```bash export PATH=$PATH:/snap/bin ``` @@ -161,8 +165,8 @@ Multipass runs Ubuntu images. The last three LTS (long-term support) versions ar To see the available images run the `find` command. Any of the listed images can be used to create a new instance. -```console -multipass find +```bash +sudo multipass find ``` The output from `find` will be similar to the below. @@ -185,9 +189,9 @@ ros2-humble 0.1 A development and ### Launching instances -The default values for launching instances allocate 1 CPU, create a small disk (5 Gb), and limited memory (1 Gb). By default, the name of the instance is automatically assigned. +The default values for launching instances allocate 1 CPU, create a small disk (5 Gb), and limited memory (1 Gb). By default, the name of the instance is automatically assigned. -Most developers are likely to want to modify the defaults. +Most developers are likely to want to modify the defaults. Use the command below to launch a virtual machine instance with non-default values. @@ -257,7 +261,7 @@ For example, to mount a host directory called `dev` and have it appear in the in multipass mount dev m1u:/home/ubuntu/dev ``` -There are also options to adjust the user and group IDs as needed to avoid permission problems. +There are also options to adjust the user and group IDs as needed to avoid permission problems. Use the `umount` command to unmount the directory. @@ -269,7 +273,7 @@ Directories can be dynamically mounted and unmounted without stopping the instan ### Stop and Start -Multipass instances can be stopped and started quickly. +Multipass instances can be stopped and started quickly. To stop the instance. @@ -287,9 +291,9 @@ multipass start m1u ### Cleanup -Multipass instances are easy to delete. There is one extra level of protection to recover deleted instances before they are fully deleted. +Multipass instances are easy to delete. There is one extra level of protection to recover deleted instances before they are fully deleted. -Use the `delete` command to delete. +Use the `delete` command to delete. ```console multipass delete m1u @@ -302,7 +306,7 @@ multipass recover m1u Use the `purge` command to permanently remove all deleted instances. ```console -multipass purge +multipass purge ``` {{% notice Note %}} Purged instances are no longer recoverable. diff --git a/content/install-guides/papi.md b/content/install-guides/papi.md index 4ef68f94d2..b6b84385ed 100644 --- a/content/install-guides/papi.md +++ b/content/install-guides/papi.md @@ -8,6 +8,7 @@ additional_search_terms: test_images: - ubuntu:latest +test_maintenance: true ### FIXED, DO NOT MODIFY weight: 1 # Defines page ordering. Must be 1 for first (or only) page. @@ -19,7 +20,7 @@ layout: installtoolsall # DO NOT MODIFY. Always true for tool install ar Performance Application Programming Interface (PAPI) provides a consistent library of functions for accessing performance counters from an application. -You can use PAPI in your source code to access performance counters and profile specific sections of your application. +You can use PAPI in your source code to access performance counters and profile specific sections of your application. PAPI is available as source code on GitHub. @@ -43,7 +44,7 @@ If you see a different result, you are not using an Arm computer running 64-bit You need `gcc` and `make` to build PAPI. -Use the Linux package manager to install the required software packages on your Linux distribution. +Use the Linux package manager to install the required software packages on your Linux distribution. For Debian based distributions (including Ubuntu) run: @@ -58,24 +59,27 @@ sudo apt install -y make gcc ```bash { target="ubuntu:latest" } git clone https://github.com/icl-utk-edu/papi/ -cd papi/src ``` 2. Configure and compile the source code: ```bash { target="ubuntu:latest" } -./configure && make +cd papi/src +chmod +x configure +./configure +make ``` 3. Configure and compile the source code: ```bash { target="ubuntu:latest" } +cd papi/src sudo make install ``` 4. Copy the test program below and paste it into a text file named `papi-test.c``: -```C +```C { file_name="papi-test.c" } #include #include #include diff --git a/content/install-guides/perf.md b/content/install-guides/perf.md index 3e516e0e8d..d69ab029cd 100644 --- a/content/install-guides/perf.md +++ b/content/install-guides/perf.md @@ -19,6 +19,10 @@ author_primary: Jason Andrews ### Link to official documentation official_docs: https://perf.wiki.kernel.org/index.php/Main_Page +test_images: +- ubuntu:latest +test_maintenance: true + ### PAGE SETUP weight: 1 # Defines page ordering. Must be 1 for first (or only) page. tool_install: true # Set to true to be listed in main selection page, else false @@ -31,7 +35,7 @@ Linux Perf is a command line performance analysis tool. The source code is part Perf can be used on a wide variety of Arm Linux systems including laptops, desktops, cloud virtual machines, Windows on Arm with WSL (Windows Subsystem for Linux), and ChromeOS with Linux enabled. -Perf is best installed using a Linux package manager, but if a suitable package is not available you can build it from source code. Both situations are covered below. +Perf is best installed using a Linux package manager, but if a suitable package is not available you can build it from source code. Both situations are covered below. ## Before you begin @@ -39,7 +43,7 @@ Follow the instructions below to install Perf on an Arm Linux system. Confirm you are using an Arm machine by running: -```console +```bash uname -m ``` @@ -55,11 +59,11 @@ Perf is dependent on the version of the Linux kernel. To find your version run: -```console +```bash uname -r ``` -The output will be a string with the first two numbers providing the major and minor kernel version numbers. +The output will be a string with the first two numbers providing the major and minor kernel version numbers. For example: @@ -67,13 +71,13 @@ For example: 5.15.0-79-generic ``` -This indicates kernel version 5.15. +This indicates kernel version 5.15. ## Install Perf -The Perf source code is part of the Linux kernel source tree. +The Perf source code is part of the Linux kernel source tree. -There are two ways to install Perf on Arm Linux machines: +There are two ways to install Perf on Arm Linux machines: - Use a [Linux package manager](#packman) - Build the [source code](#source) @@ -85,7 +89,7 @@ Use the tabs below and copy the commands for your Linux package manager: {{< tabpane code=true >}} {{< tab header="Ubuntu" language="bash">}} -sudo apt update && sudo apt install linux-tools-generic linux-tools-$(uname -r) -y +sudo apt update && sudo apt install linux-tools-generic linux-tools-$(uname -r) -y {{< /tab >}} {{< tab header="Debian/Raspberry Pi OS" language="bash">}} sudo apt install linux-perf -y @@ -95,9 +99,9 @@ sudo dnf install perf -y {{< /tab >}} {{< /tabpane >}} -If the package manager completes successfully you can skip the next section and proceed to [test](#test) Perf. +If the package manager completes successfully you can skip the next section and proceed to [test](#test) Perf. -If the package manager does not complete successfully, it usually means there was no package available for your specific kernel version as shown by `uname -r`. +If the package manager does not complete successfully, it usually means there was no package available for your specific kernel version as shown by `uname -r`. There are hundreds of packages, and the package name must match the output of `uname -r` exactly. This is most common on Arm single board computers (SBCs) where the Linux kernel has been customized. @@ -105,13 +109,13 @@ If there is no match, you can install Perf using the source code as described in ### Build the source code {#source} -If there is no package available for your kernel version you can build Perf from source code. +If there is no package available for your kernel version you can build Perf from source code. -Building Perf from source requires `gcc`, `flex`, `bison`, `git`, and `make`. Install these on your system. +Building Perf from source requires `gcc`, `flex`, `bison`, `git`, and `make`. Install these on your system. For Debian and Ubuntu run: -```console +```bash sudo apt install gcc flex bison make git -y ``` @@ -119,13 +123,13 @@ Use `git` to get the source code. Use `--branch` to specify the Linux kernel sou For example, if your kernel version is 5.15, use: -```console +```bash git clone --depth=1 --branch v5.15 git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git ``` Change to the `linux` directory and build: -```console +```bash cd linux make -C tools/perf ``` @@ -144,7 +148,7 @@ sudo cp tools/perf/perf /usr/local/bin Regardless of how you installed Perf, run the `version` command: -```console +```bash perf version ``` @@ -156,7 +160,7 @@ perf version 5.15.116 You can also try the `list` command to confirm `perf` is working as expected: -```console +```bash perf list ``` @@ -190,7 +194,6 @@ branch: [Mispredicted or not predicted branch speculatively executed] br_pred [Predictable branch speculatively executed] - ``` Perf is not working correctly if you see output similar to the messages below. To fix the errors you need to [build Perf from source](#source). @@ -219,7 +222,7 @@ E: linux-perf-5.15 is not installed. ### Generate a test Perf report Generate a simple Perf report. For example: -```command +```console perf stat -a pwd ``` The `pwd` command output will be shown as well as the report: @@ -262,9 +265,9 @@ Typically the value must be 2 or less to collect Perf metrics. To set this until the next reboot, run the following command: -```console +```bash sudo sysctl -w kernel.perf_event_paranoid=2 -`````` +``` To permanently set the paranoid level, add the following line to the file `/etc/sysctl.conf` @@ -274,13 +277,13 @@ kernel.perf_event_paranoid=2 ### Additional Perf commands -There are five common commands used in performance analysis. +There are five common commands used in performance analysis. * **stat** provides performance counter statistics for the overall execution of a program * **record** samples the program and records the samples into a data file (perf.data by default) -* **report** generates a report of where the samples occurred +* **report** generates a report of where the samples occurred * **annotate** displays the annotated code showing the source and assembly code for the samples @@ -288,17 +291,17 @@ There are five common commands used in performance analysis. Arm systems use a kernel driver to expose PMU hardware counters. The driver needs to be enabled in the Linux kernel in order to collect the hardware events. -To check if the driver is running use the `dmesg` command: +To check if the driver is running use the `dmesg` command: ```bash -dmesg | grep "PMU driver" +sudo dmesg | grep "PMU driver" ``` {{% notice Note%}} Depending on your system, you might need to use `sudo` to run the `dmesg` command. {{% /notice %}} -If you see output similar to the message below, the Arm PMU driver is installed. +If you see output similar to the message below, the Arm PMU driver is installed. ```output [ 0.046063] hw perfevents: enabled with armv8_pmuv3_0 PMU driver, 3 counters available @@ -306,9 +309,9 @@ If you see output similar to the message below, the Arm PMU driver is installed. The number of counters available could be between 1 and 7 depending on processor types and virtualization. -If you see multiple instances of the PMU driver, it means the hardware is a [big.LITTLE](https://www.arm.com/en/technologies/big-little) system with different processors, each has it's own PMU. +If you see multiple instances of the PMU driver, it means the hardware is a [big.LITTLE](https://www.arm.com/en/technologies/big-little) system with different processors, each has it's own PMU. -If the message is not in the kernel message log, check both the PMU driver device tree entry and the kernel configuration parameters listed above. +If the message is not in the kernel message log, check both the PMU driver device tree entry and the kernel configuration parameters listed above. The important kernel parameters are: @@ -320,4 +323,4 @@ CONFIG_ARM_PMU=y CONFIG_HW_PERF_EVENTS=y ``` -You are now ready to use Perf on your Arm Linux system. +You are now ready to use Perf on your Arm Linux system. diff --git a/content/install-guides/pulumi.md b/content/install-guides/pulumi.md index 55c4e5d4a0..1f2130053f 100644 --- a/content/install-guides/pulumi.md +++ b/content/install-guides/pulumi.md @@ -4,6 +4,7 @@ minutes_to_complete: 5 official_docs: https://www.pulumi.com/docs/ author_primary: Jason Andrews +test_maintenance: true test_images: - ubuntu:latest @@ -56,50 +57,56 @@ source $HOME/.bashrc Confirm `pulumi` is now in the search path: -```bash { env_source="~/.bashrc" output_lines = "2"} +```bash which pulumi -/home/ubuntu/.pulumi/bin/pulumi +``` + +```output +/usr/local/bin/pulumi ``` Print the version: -```bash { env_source="~/.bashrc" output_lines = "2"} +```bash pulumi version -v3.78.0 +``` + +```output +v3.135.1 ``` You are ready to use Pulumi on your Linux machine. ## Get started {#start} -Pulumi keeps your projects and state information in Pulumi Cloud, making it easy to access them from anywhere. If you want to use Pulumi Cloud visit [app.pulumi.com](https://app.pulumi.com/) and sign up. +Pulumi keeps your projects and state information in Pulumi Cloud, making it easy to access them from anywhere. If you want to use Pulumi Cloud visit [app.pulumi.com](https://app.pulumi.com/) and sign up. -It's not necessary to use Pulumi Cloud to get started, you can store project information on your local computer. +It's not necessary to use Pulumi Cloud to get started, you can store project information on your local computer. Below is a simple example to try out Pulumi. -The example demonstrates using Docker to pull a container image from Docker Hub to your local machine using Python. +The example demonstrates using Docker to pull a container image from Docker Hub to your local machine using Python. -To run the example, you need to install Docker. Refer to the [Docker install guide](/install-guides/docker/) for instructions. +To run the example, you need to install Docker. Refer to the [Docker install guide](/install-guides/docker/) for instructions. -You also need Python. Make sure you have `python` and `pip` installed. +You also need Python. Make sure you have `python` and `pip` installed. For `Ubuntu 22.04` on Arm you can run the commands below to install: -```bash { env_source="~/.bashrc" } +```bash sudo apt install python-is-python3 -y sudo apt install python3-pip -y ``` Create a new directory for the example: -```bash { env_source="~/.bashrc" } +```bash mkdir pulumi-test ; cd pulumi-test ``` -Log in to your local machine, a shortcut to use `~/.pulumi` to store project data. +Log in to your local machine, a shortcut to use `~/.pulumi` to store project data. -```bash { env_source="~/.bashrc" } +```bash pulumi login --local ``` @@ -110,14 +117,14 @@ For the example you need to create 3 files: Use a text editor to copy the code below to a file named `requirements.txt`. -```python +```output { file_name="requirements.txt" } pulumi>=3.0.0 pulumi-docker>=4.0.0 ``` Use a text editor to copy the lines below to a file named `Pulumi.yaml` -```yaml +```yaml { file_name="Pulumi.yaml" } name: alpine-pull runtime: python description: A pulumi application pull the alpine image @@ -125,7 +132,7 @@ description: A pulumi application pull the alpine image Use a text editor to copy the lines below to a file named `__main__.py` -```python +```python { file_name="__main.py__" } import pulumi import pulumi_docker as docker @@ -138,13 +145,13 @@ pulumi.export('digest', image.repo_digest) With the three files created, install the required Python packages: -```bash { env_source="~/.bashrc" } +```bash pip install -r requirements.txt ``` Run the Python script to pull the container image: -```bash { env_source="~/.bashrc" } +```console pulumi up ``` @@ -154,9 +161,9 @@ There are 4 prompts to respond to: 2. Enter a name for the stack. -3. When prompted, enter a passphrase for the stack (twice). +3. When prompted, enter a passphrase for the stack (twice). -4. Answer `yes` to the final question to create the stack. +4. Answer `yes` to the final question to create the stack. An example output for `pulumi up` is shown below: @@ -164,12 +171,12 @@ An example output for `pulumi up` is shown below: Please choose a stack, or create a new one: [Use arrows to move, type to filterPlease choose a stack, or create a new one: Please enter your desired stack name: test1 Created stack 'test1' -Enter your passphrase to protect config/secrets: -Re-enter your passphrase to confirm: +Enter your passphrase to protect config/secrets: +Re-enter your passphrase to confirm: Previewing update (test1): - Type Name Plan - + pulumi:pulumi:Stack alpine-pull-test1 create - + └─ docker:index:RemoteImage alpineImage create + Type Name Plan + + pulumi:pulumi:Stack alpine-pull-test1 create + + └─ docker:index:RemoteImage alpineImage create Outputs: @@ -180,9 +187,9 @@ Resources: Do you want to perform this update? yes Updating (test1): - Type Name Status - + pulumi:pulumi:Stack alpine-pull-test1 created (0.07s) - + └─ docker:index:RemoteImage alpineImage created (0.03s) + Type Name Status + + pulumi:pulumi:Stack alpine-pull-test1 created (0.07s) + + └─ docker:index:RemoteImage alpineImage created (0.03s) Outputs: @@ -197,7 +204,7 @@ Duration: 1s After the Python script runs you have the container on your machine. Confirm this using the `docker images` command: -```bash +```console docker images ``` @@ -208,3 +215,4 @@ REPOSITORY TAG IMAGE ID CREATED SIZE alpine latest f6648c04cd6c 2 days ago 7.66MB ``` + diff --git a/content/install-guides/pytorch.md b/content/install-guides/pytorch.md index 4510732cba..8d907d4e9a 100644 --- a/content/install-guides/pytorch.md +++ b/content/install-guides/pytorch.md @@ -45,11 +45,11 @@ aarch64 If you see a different result, then you are not using an Arm computer running 64-bit Linux. -PyTorch requires Python 3, and this can be installed with `pip`. +PyTorch requires Python 3 and can be installed with `pip`. For Ubuntu, run: -```console +```bash sudo apt install python-is-python3 python3-pip python3-venv -y ``` @@ -72,7 +72,7 @@ source venv/bin/activate In your active virtual environment, install PyTorch: ```bash -pip install torch torchvision torchaudio +sudo pip install torch torchvision torchaudio ``` ## Get started @@ -81,7 +81,7 @@ Test PyTorch: Use a text editor to copy and paste the code below into a text file named `pytorch.py`: -```console +```python { file_name="pytorch.py" } import torch print(torch.__version__) x = torch.rand(5,3) @@ -91,8 +91,8 @@ exit() Run the example code: -```console -python ./pytorch.py +```bash +python pytorch.py ``` The expected output is similar to: @@ -108,7 +108,7 @@ tensor([[0.1334, 0.7932, 0.4396], To get more information about the build options for PyTorch, run: -```console +```python python -c "import torch; print(*torch.__config__.show().split(\"\n\"), sep=\"\n\")" ``` @@ -123,13 +123,14 @@ PyTorch built with: - LAPACK is enabled (usually provided by MKL) - NNPACK is enabled - CPU capability usage: NO AVX + - Build settings: BLAS_INFO=open, BUILD_TYPE=Release, CXX_COMPILER=/opt/rh/devtoolset-10/root/usr/bin/c++, CXX_FLAGS=-ffunction-sections -fdata-sections -D_GLIBCXX_USE_CXX11_ABI=0 -fabi-version=11 -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOCUPTI -DLIBKINETO_NOROCTRACER -DLIBKINETO_NOXPUPTI=ON -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -O2 -fPIC -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Werror=bool-operation -Wnarrowing -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-unused-parameter -Wno-strict-overflow -Wno-strict-aliasing -Wno-stringop-overflow -Wsuggest-override -Wno-psabi -Wno-error=old-style-cast -Wno-missing-braces -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=open, TORCH_VERSION=2.5.1, USE_CUDA=OFF, USE_CUDNN=OFF, USE_CUSPARSELT=OFF, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_GLOO=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=OFF, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, USE_ROCM_KERNEL_ASSERT=OFF, ``` The configuration output is an advanced option to check the tools and structure used to build PyTorch. ## BFloat16 floating-point number format -Recent Arm processors support the BFloat16 (BF16) number format in PyTorch. For example, AWS Graviton3 processors support BFloat16. +Recent Arm processors support the BFloat16 (BF16) number format in PyTorch. For example, AWS Graviton3 processors support BFloat16. To check if your system includes BFloat16, use the `lscpu` command: @@ -157,11 +158,11 @@ export DNNL_DEFAULT_FPMATH_MODE=BF16 LRU cache capacity is used to avoid redundant primitive creation latency overhead. -This caching feature increases memory usage. If needed, you can lower the value to reduce memory usage. +This caching feature increases memory usage. If needed, you can lower the value to reduce memory usage. You should tune the capacity to an optimal value for your use case. -Use an environment variable to set the value. The recommended starting value is: +Use an environment variable to set the value. The recommended starting value is: ```console export LRU_CACHE_CAPACITY=1024 @@ -173,11 +174,11 @@ Transparent huge pages (THP) provide an alternative method of utilizing huge pag To check if THP is available on your system, run: -```console +```bash cat /sys/kernel/mm/transparent_hugepage/enabled ``` -The setting in brackets is your current setting. +The setting in brackets is your current setting. The most common output, `madvise`, is shown below: @@ -187,7 +188,7 @@ always [madvise] never If the setting is `never`, you can change to `madvise` by running: -```console +```bash echo madvise | sudo tee /sys/kernel/mm/transparent_hugepage/enabled ``` @@ -209,7 +210,7 @@ pip install transformers datasets Use a text editor to save the code below as `profile-vit.py`: -```python +```python { file_name="profile.py" } import torch from transformers import ViTFeatureExtractor, ViTForImageClassification from datasets import load_dataset @@ -245,7 +246,7 @@ print(prof.key_averages().table(sort_by="self_cpu_time_total")) Run the example and check the performance information printed: -```console +```bash python ./profile-vit.py ``` @@ -297,7 +298,7 @@ Self CPU time total: 786.880ms Experiment with the two environment variables for BFloat16 and THP and observe the performance differences. -You can set each variable and run the test again and observe the new profile data and run time. +You can set each variable and run the test again and observe the new profile data and run time. ## Profiling example with dynamic quantization diff --git a/content/install-guides/rust.md b/content/install-guides/rust.md index b6c21ade2c..e92f5e617d 100644 --- a/content/install-guides/rust.md +++ b/content/install-guides/rust.md @@ -10,6 +10,7 @@ additional_search_terms: test_images: - ubuntu:latest +test_maintenance: true ### FIXED, DO NOT MODIFY weight: 1 # Defines page ordering. Must be 1 for first (or only) page. @@ -19,9 +20,9 @@ multitool_install_part: false # Set to true if a sub-page of a multi-page arti layout: installtoolsall # DO NOT MODIFY. Always true for tool install articles --- -[Rust](https://www.rust-lang.org/) is an open source programming language. +[Rust](https://www.rust-lang.org/) is an open source programming language. -This install guide is for Linux application developers wishing to use Rust. +This install guide is for Linux application developers wishing to use Rust. If you wish to use Rust to build embedded applications for Arm, refer to [Rust for Embedded Applications](/install-guides/rust_embedded/) instead. @@ -62,7 +63,7 @@ These Linux distributions use `yum` as the package manager. Use the `yum` command to install the required software packages. If the machine has `sudo` you can use it. -```bash { target="fedora:latest" } +```console sudo yum update -y sudo yum install -y curl gcc ``` @@ -113,7 +114,7 @@ To configure your current shell, run: source "$HOME/.cargo/env" ``` -The latest version of Rust is now installed. +The latest version of Rust is now installed. The installer updates `$HOME/.bashrc` and `SHOME/.profile` to set up the environment. Start a new shell or run the following command to continue: diff --git a/content/install-guides/rust_embedded.md b/content/install-guides/rust_embedded.md index b36c43bb91..7d6c7e5004 100644 --- a/content/install-guides/rust_embedded.md +++ b/content/install-guides/rust_embedded.md @@ -7,6 +7,10 @@ additional_search_terms: - compiler - rust +test_images: +- ubuntu:latest +test_maintenance: true + ### FIXED, DO NOT MODIFY weight: 1 # Defines page ordering. Must be 1 for first (or only) page. tool_install: true # Set to true to be listed in main selection page, else false @@ -15,9 +19,9 @@ multitool_install_part: false # Set to true if a sub-page of a multi-page arti layout: installtoolsall # DO NOT MODIFY. Always true for tool install articles --- -[Rust](https://www.rust-lang.org/) is an open source programming language. +[Rust](https://www.rust-lang.org/) is an open source programming language. -This install guide is for developers using Rust for their embedded applications. +This install guide is for developers using Rust for their embedded applications. If you are using Rust to build Linux applications on an Arm Linux platform, refer to [Rust for Linux Applications](/install-guides/rust/) instead. @@ -32,7 +36,7 @@ For a thorough review of all options, refer to the official documentation. See [ Run the following command to download and install Rust: ```bash -curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs -y | sh ``` Start a new shell or run the following command to continue: @@ -55,11 +59,11 @@ cargo 1.78.0 (54d8815d0 2024-03-26) ### Add Arm cross-compilation support Add cross compilation support for the required Arm Architectures. For example, to add support for Armv7-M architecture, you can use: -```command +```bash rustup target add thumbv7m-none-eabi ``` For a full list of supported architectures, use: -```command +```bash rustup target list ``` @@ -67,7 +71,7 @@ rustup target list To generate a project from a template, you need `cargo-generate`. To install and rebuild use: -```command +```bash sudo apt install -y libssl-dev pkg-config sudo apt install -y build-essential cargo install cargo-generate diff --git a/content/install-guides/ssh.md b/content/install-guides/ssh.md index 89830863f8..7f33ca74bc 100644 --- a/content/install-guides/ssh.md +++ b/content/install-guides/ssh.md @@ -17,6 +17,10 @@ author_primary: Jason Andrews ### Link to official documentation official_docs: https://www.openssh.com/manual.html +test_images: +- ubuntu:latest +test_maintenance: true + ### PAGE SETUP weight: 1 # Defines page ordering. Must be 1 for first (or only) page. tool_install: true # Set to true to be listed in main selection page, else false @@ -25,26 +29,26 @@ multitool_install_part: false # Set to true if a sub-page of a multi-page arti layout: installtoolsall # DO NOT MODIFY. Always true for tool install articles --- -Secure Shell (SSH) is the primary tool used to connect to remote Linux servers. It provides a secure shell on a remote machine, and is used frequently in cloud and server development. +Secure Shell (SSH) is the primary tool used to connect to remote Linux servers. It provides a secure shell on a remote machine, and is used frequently in cloud and server development. This section provides answers to the most frequently asked SSH setup questions related to server and cloud development. -Feel free to seek out additional SSH tutorials or add more information to this page. +Feel free to seek out additional SSH tutorials or add more information to this page. -## SSH +## SSH SSH is a client server application. -An SSH server, also called the SSH daemon, runs on a remote machine. +An SSH server, also called the SSH daemon, runs on a remote machine. -An SSH client runs on the local machine (the one you are typing on) and connects to the remote daemon. +An SSH client runs on the local machine (the one you are typing on) and connects to the remote daemon. ### Decide if the SSH daemon is already running For SSH to work, the SSH daemon must be running on the remote machine. Many Linux distributions install and run the SSH daemon automatically. To find out if the SSH daemon is already running running use the `ps` command. -```console +```bash ps -ef | grep ssh ``` If the result includes a line with `sshd` the daemon is running. @@ -52,7 +56,7 @@ If the result includes a line with `sshd` the daemon is running. root 1113 1 0 18:48 ? 00:00:00 /usr/sbin/sshd -D ``` Another way to check if the SSH daemon is running is to query the SSH service. -```console +```bash sudo systemctl status sshd ``` If the output displays "running", then the SSH daemon is already running. @@ -65,34 +69,34 @@ If the SSH daemon is not running on the remote Linux machine, install it using t For Ubuntu/Debian distributions: ```bash -sudo apt-get install openssh-server +sudo apt-get install openssh-server ``` For Red Hat and Amazon Linux distributions. -```bash -sudo yum install openssh-server +```console +sudo yum install openssh-server ``` ### Start and stop the SSH daemon {#startstop} -The commands below are for any Linux distribution using `systemd`. This includes Debian, Ubuntu, and Amazon Linux. +The commands below are for any Linux distribution using `systemd`. This includes Debian, Ubuntu, and Amazon Linux. To start the SSH daemon: -```console -sudo systemctl start ssh +```bash +sudo systemctl start ssh ``` To stop the SSH daemon: -```console -sudo systemctl stop ssh +```bash +sudo systemctl stop ssh ``` To restart the SSH daemon: -```console -sudo systemctl restart ssh +```bash +sudo systemctl restart ssh ``` ### Use a password with SSH For security reasons, cloud instances don’t enable password logins and there is no password set for the user accounts (such as `ubuntu` or `ec2-user`). -Password access is useful to connect when the private key is not available. +Password access is useful to connect when the private key is not available. To enable passwords edit the file `/etc/sshd_config` and set `PasswordAuthentication` to `yes`. @@ -100,16 +104,16 @@ To enable it from the command line, run this command: ```console sudo sed -i '/PasswordAuthentication no/c\PasswordAuthentication yes' /etc/ssh/sshd_config ``` -Restart the SSH daemon using the commands [above](#startstop). +Restart the SSH daemon using the commands [above](#startstop). -To use a password for SSH a password must be created. +To use a password for SSH a password must be created. To create a password for the user ubuntu: ```console sudo passwd ubuntu ``` -For improved security, set the security group of the cloud instance to allow port 22 traffic (SSH) from a minimal set of IP addresses, not anywhere on the internet. Use password access with caution. +For improved security, set the security group of the cloud instance to allow port 22 traffic (SSH) from a minimal set of IP addresses, not anywhere on the internet. Use password access with caution. ### SSH keys @@ -119,7 +123,7 @@ If a new key pair is needed use the `ssh-keygen` command to generate a key pair: ```console ssh-keygen ``` -Answer the questions. Pressing enter to accept all defaults works fine. +Answer the questions. Pressing enter to accept all defaults works fine. By default, the keys are created in `~/.ssh/id_rsa.pub` (public key) and `~/.ssh/id_rsa` (private key) @@ -131,13 +135,13 @@ Accessing an AWS EC2 instance running Ubuntu using: ```console ssh -i ubuntu@ ``` -To use SSH without specifying `-i ` every time create an SSH configuration for the remote machine. +To use SSH without specifying `-i ` every time create an SSH configuration for the remote machine. -Edit the file `~/.ssh/config` on the local machine. +Edit the file `~/.ssh/config` on the local machine. Pick a name for the remote machine, such as `myserver`, add the public IP address or DNS name as the Hostname. -User is the username on the remote machine and IdentityFile is the path to the private key on the local machine. +User is the username on the remote machine and IdentityFile is the path to the private key on the local machine. ```output Host myserver @@ -154,13 +158,13 @@ ssh myserver ### Add a new key pair -If you want to give access to somebody else without enabling password access or sharing your private key, you can add another key pair to the remote machine. You may also want to change the key pair used when the remote machine was created. +If you want to give access to somebody else without enabling password access or sharing your private key, you can add another key pair to the remote machine. You may also want to change the key pair used when the remote machine was created. -To add or change the key pair edit the file `~/.ssh/authorized_keys` on the remote machine. +To add or change the key pair edit the file `~/.ssh/authorized_keys` on the remote machine. -Add a new public key to `authorized_keys`. You can also delete the current public key and just use the new one. +Add a new public key to `authorized_keys`. You can also delete the current public key and just use the new one. -If you ran `ssh-keygen` on your local machine, the public key is at `~/.ssh/id_rsa.pub` +If you ran `ssh-keygen` on your local machine, the public key is at `~/.ssh/id_rsa.pub` Use the new private key on the local machine to connect. If you have `~/.ssh/id_rsa` on your local machine it will be used automatically and you can SSH to the remote machine. @@ -169,7 +173,7 @@ Use the new private key on the local machine to connect. If you have `~/.ssh/id_ You can use port forwarding to access a port on a remote computer which is blocked by a firewall or security group. This is helpful when your application is running on a remote computer with SSH access, but no other ports are open. For example, if you are running a web application on a cloud instance and it uses port 3000 you can SSH to the cloud instance with port forwarding and access the application. ```console -ssh -i -L 3000:localhost:3000 ubuntu@ +ssh -i -L 3000:localhost:3000 ubuntu@ ``` Once you SSH, you can access `localhost:3000` and the traffic is forwarded to the remote computer. diff --git a/content/install-guides/streamline-cli.md b/content/install-guides/streamline-cli.md index faa863ac7b..20916f3fc7 100644 --- a/content/install-guides/streamline-cli.md +++ b/content/install-guides/streamline-cli.md @@ -19,6 +19,10 @@ author_primary: Julie Gaskin ### Link to official documentation official_docs: https://developer.arm.com/documentation/109847/latest/ +test_images: +- ubuntu:latest +test_maintenance: true + ### PAGE SETUP weight: 1 # Defines page ordering. Must be 1 for first (or only) page. tool_install: true # Set to true to be listed in main selection page, else false @@ -114,6 +118,7 @@ The script can also be used to download a specific version, or install to a user * To download, but not install, a specific version: + ```sh python3 get-streamline-cli.py download --tool-version ``` @@ -163,14 +168,14 @@ You might need to adapt them slightly to other Linux distributions. To apply the patch to the latest 6.7 kernel, you can use `git`: -```sh -git apply v6.7-combined.patch +```console +git apply patch/v6.7-combined.patch ``` or `patch`: -```sh -patch -p 1 -i v6.7-combined.patch +```console +patch -p 1 -i patch/v6.7-combined.patch ``` ### Manual application to an RPM-based distribution diff --git a/content/install-guides/sysbox.md b/content/install-guides/sysbox.md index ae2075f29f..412c76a4e6 100644 --- a/content/install-guides/sysbox.md +++ b/content/install-guides/sysbox.md @@ -21,6 +21,10 @@ author_primary: Jason Andrews ### Link to official documentation official_docs: https://github.com/nestybox/sysbox/blob/master/docs/user-guide/README.md +test_images: +- ubuntu:latest +test_maintenance: true + ### PAGE SETUP weight: 1 # Defines page ordering. Must be 1 for first (or only) page. tool_install: true # Set to true to be listed in main selection page, else false @@ -29,21 +33,21 @@ multitool_install_part: false # Set to true if a sub-page of a multi-page arti layout: installtoolsall # DO NOT MODIFY. Always true for tool install articles --- -[Sysbox](https://github.com/nestybox/sysbox/blob/master/README.md) enables you to use Docker containers for workloads that typically require virtual machines. Containers run with Sysbox are able to run software that relies on the [systemd System and Service Manager](https://systemd.io/) that is not usually present in containers, and it does this without the need for a full virtual machine and hardware emulation. +[Sysbox](https://github.com/nestybox/sysbox/blob/master/README.md) enables you to use Docker containers for workloads that typically require virtual machines. Containers run with Sysbox are able to run software that relies on the [systemd System and Service Manager](https://systemd.io/) that is not usually present in containers, and it does this without the need for a full virtual machine and hardware emulation. -Running Docker inside Docker, and Kubernetes inside Docker, are also Sysbox use cases. Without Sysbox, these are difficult because the Docker daemon requires systemd. +Running Docker inside Docker, and Kubernetes inside Docker, are also Sysbox use cases. Without Sysbox, these are difficult because the Docker daemon requires systemd. In summary, Sysbox is a powerful container runtime that provides many of the benefits of virtual machines without the overhead of running a full VM. It is good for workloads that require the ability to run system-level software. ## What do I need to run Sysbox? -Sysbox runs on Linux and supports Arm. +Sysbox runs on Linux and supports Arm. Sysbox has limited support for older versions of Linux, but recent Linux versions are easily compatible. If you are unsure about your Linux distribution and Linux kernel version, you can check [Sysbox Distro Compatibility](https://github.com/nestybox/sysbox/blob/master/docs/distro-compat.md) -Sysbox is a container runtime, and so Docker is required before installing Sysbox. +Sysbox is a container runtime, and so Docker is required before installing Sysbox. In most cases, you can install Docker on Arm Linux with the commands: @@ -52,15 +56,15 @@ curl -fsSL get.docker.com -o get-docker.sh && sh get-docker.sh sudo usermod -aG docker $USER ; newgrp docker ``` -Refer to the [Docker install guide](/install-guides/docker/docker-engine/) for more information. +Refer to the [Docker install guide](/install-guides/docker/docker-engine/) for more information. -You can use Sysbox on a virtual machine from a [cloud service provider](/learning-paths/servers-and-cloud-computing/intro/find-hardware/), a Raspberry Pi 5, or any other Arm Linux-based computer. +You can use Sysbox on a virtual machine from a [cloud service provider](/learning-paths/servers-and-cloud-computing/intro/find-hardware/), a Raspberry Pi 5, or any other Arm Linux-based computer. ## How do I install Sysbox? Download the Sysbox official package from [Sysbox Releases](https://github.com/nestybox/sysbox/releases/) -You can download the Debian package for Arm from the command line: +You can download the Debian package for Arm from the command line: ```bash wget https://downloads.nestybox.com/sysbox/releases/v0.6.5/sysbox-ce_0.6.5-0.linux_arm64.deb @@ -90,11 +94,11 @@ If Sysbox is running, you see the output: ## How can I get set up with Sysbox quickly? -You can try Sysbox by creating a container image that includes systemd and Docker. +You can try Sysbox by creating a container image that includes systemd and Docker. Use a text editor to copy the text below to a file named `Dockerfile`: -```console +```console {file_name="Dockerfile"} FROM ubuntu:24.04 RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections @@ -116,7 +120,7 @@ EXPOSE 22 ENTRYPOINT [ "/sbin/init", "--log-level=err" ] ``` -Notice that Docker and the SSH server are installed, and port 22 is open for SSH connections. +Notice that Docker and the SSH server are installed, and port 22 is open for SSH connections. Build a container image using `docker`: @@ -126,11 +130,11 @@ docker build -t sysbox-test -f Dockerfile . Use Sysbox as the container runtime to create a new container: -```bash -docker run --runtime=sysbox-runc -it -P --hostname=sbox sysbox-test +```console +docker run --runtime=sysbox-runc -P --hostname=sbox sysbox-test ``` -The animated output below shows the Linux init process running. You can log in with the password `ubuntu`, or change it in the Dockerfile above. +The animated output below shows the Linux init process running. You can log in with the password `ubuntu`, or change it in the Dockerfile above. You can use Docker inside the container and the SSH server operates as expected. Both are possible because systemd is running in the container. @@ -138,9 +142,9 @@ You can use Docker inside the container and the SSH server operates as expected. ## How can I use SSH to connect to a Sysbox container? -To connect using SSH, you can identify the IP address of your Sysbox container in two alternative ways, from inside the container, or from outside the container. +To connect using SSH, you can identify the IP address of your Sysbox container in two alternative ways, from inside the container, or from outside the container. -To find the IP address from inside the container use the `ifconfig` command: +To find the IP address from inside the container use the `ifconfig` command: ```console ifconfig @@ -168,11 +172,11 @@ ssh ubuntu@172.20.0.2 Log in using the same `ubuntu` username and password. -You can also use the `docker` command to identify the IP address and port from outside the container. +You can also use the `docker` command to identify the IP address and port from outside the container. Run the command below from another shell outside of the Sysbox container: -```console +```bash docker ps ``` @@ -199,4 +203,4 @@ You can exit the Sysbox container using: sudo halt ``` -Sysbox behaves like a virtual machine and you can use it to run applications that require system services normally not available in containers. It is useful for testing and development tasks because the container changes are not saved, meaning that you can create a clean testing environment simply by restarting the Sysbox container. +Sysbox behaves like a virtual machine and you can use it to run applications that require system services normally not available in containers. It is useful for testing and development tasks because the container changes are not saved, meaning that you can create a clean testing environment simply by restarting the Sysbox container. diff --git a/content/install-guides/terraform.md b/content/install-guides/terraform.md index 429327f595..fe14076589 100644 --- a/content/install-guides/terraform.md +++ b/content/install-guides/terraform.md @@ -23,13 +23,13 @@ tool_install: true weight: 1 --- -[Terraform](https://www.terraform.io/) automates cloud infrastructure. It is an infrastructure as code tool. +[Terraform](https://www.terraform.io/) automates cloud infrastructure. It is an infrastructure as code tool. -Terraform is available for Windows, macOS, Linux and supports the Arm architecture. +Terraform is available for Windows, macOS, Linux and supports the Arm architecture. ## Before you begin -[General installation information](https://developer.hashicorp.com/terraform/downloads) is available which covers all supported operating systems. +[General installation information](https://developer.hashicorp.com/terraform/downloads) is available which covers all supported operating systems. This article provides a quick solution to install Terraform for Ubuntu on Arm. @@ -46,9 +46,9 @@ If you see a different result, you are not using an Arm computer running 64-bit ## Download and Install -The easiest way to install Terraform for Ubuntu on Arm is to use the zip file and copy the executable. +The easiest way to install Terraform for Ubuntu on Arm is to use the zip file and copy the executable. -The installation options with the Ubuntu package manager at time of writing do not work well, but please try them as they may improve. +The installation options with the Ubuntu package manager at time of writing do not work well, but please try them as they may improve. Make sure `unzip`, `curl`, and `wget` are available. diff --git a/content/install-guides/topdown-tool.md b/content/install-guides/topdown-tool.md index 0cfb11b489..3265736ebb 100644 --- a/content/install-guides/topdown-tool.md +++ b/content/install-guides/topdown-tool.md @@ -19,6 +19,10 @@ author_primary: Jason Andrews ### Link to official documentation official_docs: https://gitlab.arm.com/telemetry-solution/telemetry-solution +test_images: +- ubuntu:latest +test_maintenance: true + ### PAGE SETUP weight: 1 # Defines page ordering. Must be 1 for first (or only) page. tool_install: true # Set to true to be listed in main selection page, else false @@ -41,7 +45,7 @@ Follow the instructions below to install the Telemetry Solution on an Arm Linux 1. Confirm you are using an Arm machine by running: -```console +```bash uname -m ``` @@ -76,24 +80,23 @@ sudo apt install python3-pip python-is-python3 -y ```bash { target="ubuntu:latest" } git clone https://git.gitlab.arm.com/telemetry-solution/telemetry-solution.git -cd telemetry-solution/tools/topdown_tool ``` 2. Install the `topdown-tool` executable: Install `topdown-tool` in `/usr/local/bin` using: -```console +```bash +cd telemetry-solution/tools/topdown_tool sudo pip3 install -e . ``` {{% notice Note %}} If you are getting errors on the environment being externally managed, try creating a virtual environment. -``` +```bash sudo apt install python3-venv -y python3 -m venv topdown-venv source topdown-venv/bin/activate -pip3 install -e . ``` {{% /notice %}} diff --git a/content/install-guides/vnc.md b/content/install-guides/vnc.md index ef9503ba9e..c0cd7db61f 100644 --- a/content/install-guides/vnc.md +++ b/content/install-guides/vnc.md @@ -13,9 +13,14 @@ minutes_to_complete: 30 author_primary: Jason Andrews + ### Link to official documentation official_docs: https://tigervnc.org/ +test_images: +- ubuntu:latest +test_maintenance: true + ### PAGE SETUP weight: 1 # Defines page ordering. Must be 1 for first (or only) page. tool_install: true # Set to true to be listed in main selection page, else false @@ -28,11 +33,11 @@ Virtual Network Computing (VNC) is one of the common tools used to connect to a This section provides info about how to setup VNC on a remote Arm Linux machine. -Feel free to seek out additional VNC tutorials or add more information to this page. +Feel free to seek out additional VNC tutorials or add more information to this page. This installation only works on newer versions of Ubuntu and Debian. It was successfully tested on **Ubuntu 22.04** and is known to fail on **Ubuntu 20.04**. -## VNC +## VNC VNC is a client server application. A VNC server runs on a remote machine. A VNC client runs on the local machine and connects to the remote server. @@ -40,7 +45,7 @@ VNC is a client server application. A VNC server runs on a remote machine. A VNC To use VNC, a VNC server needs to be installed. There are multiple VNC servers which can be used. This recipe uses [TigerVNC](https://tigervnc.org/). -Desktop software is also needed. There are many options for this, but using [xfce4](https://www.xfce.org/) makes for a minimal install with good performance. +Desktop software is also needed. There are many options for this, but using [xfce4](https://www.xfce.org/) makes for a minimal install with good performance. Install the desktop software. @@ -58,11 +63,11 @@ sudo apt-get install tigervnc-standalone-server tigervnc-common -y Run the password command to set a password for VNC. This is not the password for the user account, just for the VNC client to connect to the VNC server. -```bash +```console vncpasswd ``` -Remember the password for later when the client is connected. +Remember the password for later when the client is connected. ### Configure the desktop startup @@ -75,7 +80,7 @@ unset DBUS_SESSION_BUS_ADDRESS exec startxfce4 ``` Make sure the `xstartup` file has executable permission. -```bash +```console chmod +x $HOME/.vnc/xstartup ``` @@ -119,14 +124,14 @@ sudo systemctl stop vncserver@1.service To restart the VNC service: ```console -sudo systemctl restart vncserver@1.service +sudo systemctl restart vncserver@1.service ``` ### Use port forwarding via SSH to connect -The default port for the first instance of VNC is `5901`. SSH port forwarding is the best solution for accessing the Linux desktop on a cloud machine. This way no additional ports need to be opened in the security group. +The default port for the first instance of VNC is `5901`. SSH port forwarding is the best solution for accessing the Linux desktop on a cloud machine. This way no additional ports need to be opened in the security group. -SSH to your remote Linux machine. Refer to [SSH](/install-guides/ssh/) for additional details. +SSH to your remote Linux machine. Refer to [SSH](/install-guides/ssh/) for additional details. Substitute your private key file and public IP address of the remote machine. @@ -142,6 +147,6 @@ localhost:5901 ``` You will be prompted for the password created earlier with `vncpasswd`. -A remote Linux Desktop should appear on your local computer. Make sure to close the VNC client first and then exit the SSH connection. +A remote Linux Desktop should appear on your local computer. Make sure to close the VNC client first and then exit the SSH connection. ![Linux desktop #center](/install-guides/_images/xfce4.png) diff --git a/content/learning-paths/cross-platform/_example-learning-path/appendix-3-test.md b/content/learning-paths/cross-platform/_example-learning-path/appendix-3-test.md index ca3b11bbfd..a95c3d1dcc 100644 --- a/content/learning-paths/cross-platform/_example-learning-path/appendix-3-test.md +++ b/content/learning-paths/cross-platform/_example-learning-path/appendix-3-test.md @@ -62,7 +62,8 @@ The framework will check the return code. If not 0, an error will be reported. If a specific return code is expected, it can be specified as follows: ```markdown - The file myfile.txt doesn't exist yet and this command returns 1: + The file myfile.txt doesn't exist yet + and this command should return 1: ```bash { ret_code="1" } test -f myfile.txt @@ -71,16 +72,21 @@ If a specific return code is expected, it can be specified as follows: #### Command output -When a command output is displayed in the instructions: +You can visualize the shell by specifying the `command_line` option. You can also test for expected output in the instructions by specifying the line(s) where the expected output should be displayed. This is done by adding the pipe symbol (`|`). Since the first line should contain the command itself, the indexing of the expected output lines starts at 2. You can specify a span (if the expected output is more than one line) with the dash symbol, for example `"| 2-10"`. ```markdown Let's check is this command return the expected output: - ```bash { command_line="root@localhost | 2 } + ```bash { command_line="root@localhost | 2" } echo "hello world" hello world ``` ``` +The code above renders to display the shell identity to the reader: +```bash { command_line="root@localhost | 2" } +echo "hello world" +hello world +``` The framework will check if the command returns the same output and report an error otherwise. @@ -115,7 +121,7 @@ It is important to note that the framework does run each code block as a separat This command will fail: - ```bash { ret_code="1" } + ```bash test -f myfile.txt ``` @@ -210,14 +216,14 @@ test_images: The `test_maintenance` field is a boolean that enables the framework. -The `test_images` field is a list of Docker container images the framework can pull to test the Learning Path instructions. Check [Docker Hub](https://hub.docker.com/) to explore available images. +The `test_images` field is a list of Docker container images the framework can pull to test the Learning Path instructions. Check [Docker Hub](https://hub.docker.com/) to explore available images. ## Run the framework From the project root folder, run: ```bash -./tools/maintenance.py -i content/learning-paths/servers-and-cloud-computing/mynewlearningpath +./tools/maintenance.py -i content/learning-paths/microcontrollers/my-new-learning-path ``` If the Learning Path contains sub-articles, the framework will run their instructions in order, depending on the sub-articles weight. @@ -228,48 +234,54 @@ Specify the `.md` file directly for single file tool install articles. ./tools/maintenance.py -i content/install-guides/mytool.md ``` -## Result summary +If the tests are successful, that will be communicated through the console. -The framework patches the metadata in the Learning Path's `_index.md` file or the .md file of the tool install to add a summary of the test status. - -```yaml -test_maintenance: true -test_images: -- ubuntu:latest -- fedora:latest -test_status: -- passed -- failed -``` +### Investigating failures -The field `test_status` is a list that indicated whether all tests passed for a corresponding Docker container image or if at least one test failed. +The framework will print information about any errors to the console. To display additional output, run with the `--debug` flag. -In the example above, the summary indicates that for this Learning Path all tests passed for the image `ubuntu:latest` but at least one test failed for the image `fedora:latest`. More information about the failures are stored in Junit XML files. - -## Visualize results +```bash +./tools/maintenance.py -i content/install-guides/mytool.md --debug +``` -Test results are stored in XML Junit files. One XML file is created by Learning Path sub-article. -It is possible to visualize the results in a web browser. The XML files can be converted with [xunit-viewer](https://www.npmjs.com/package/xunit-viewer). +### Saving the results -If not already installed, install [Node.js](https://nodejs.org/en/) and run: +If you want the results to be saved, add the `--stats-report` flag to the command. This will update a statistics file `stats_current_test_info.yml`, which publishes the result to the website. In order to do that, the Learning Path or Install Guide needs to exist as an entry in the statistics file. Find the category for your content. If it's an Install Guide, the name will be that of the .md file without the extension. If it's a Learning Path, you will use the name of the directory. For example: ``` -npm i -g xunit-viewer +install-guides: + mytool: + readable_title: My Tool + tests_and_status: + - ubuntu:latest: passed +microcontrollers: + my-new-learning-path: + readable_title: My new Learning Path + tests_and_status: + - ubuntu:latest: passed ``` -Then, launch the web server (e.g. on port 5050) on the folder where the XML Junit files have been created: - +```bash +./tools/maintenance.py -i content/install-guides/mytool.md --stats-report ``` -xunit-viewer -r content/learning-paths/servers-and-cloud-computing/mynewlearningpath/ -s -p 5050 + +```yaml +tests_and_status: + - ubuntu:latest: passed + - fedora:latest: failed ``` +The field `tests_and_status` is a list that indicated whether all tests passed for a corresponding Docker container image or if at least one test failed. + +In the example above, the summary indicates that for this Learning Path all tests passed for the image `ubuntu:latest` but at least one test failed for the image `fedora:latest`. More information about the failures can be found in the console. + ## Advanced usage for embedded development -#### Using the Corstone-300 FVP +### Using the Corstone-300 FVP -By default, the framework runs instructions on the Docker images specified by the [metadata](#edit-metadata). For embedded development, it is possible to build software in a container instance and then check its behaviour on the Corstone-300 FVP. +By default, the framework runs instructions on the Docker images specified by the [metadata](#edit-metadata). For embedded development, it is possible to build software in a container instance and then check its behaviour on the Corstone-300 FVP. -For this, all container instances used by the test framework mount a volume in `/shared`. This is where software for the target FVP can be stored. To check the execution, the FVP commands just need to be identified as a `fvp` section for the framework. +For this, all container instances used by the test framework mount a volume in `/shared`. This is where software for the target FVP can be stored. To check the execution, the FVP commands just need to be identified as a `fvp` section for the framework. For example: diff --git a/data/stats_current_test_info.yml b/data/stats_current_test_info.yml index bc8c31be7c..453526aeb8 100644 --- a/data/stats_current_test_info.yml +++ b/data/stats_current_test_info.yml @@ -29,19 +29,67 @@ sw_categories: tests_and_status: - ubuntu:latest: passed - fedora:latest: passed + ambaviz: + readable_title: Arm AMBA Viz + tests_and_status: + - ubuntu:latest: passed + ams: + readable_title: Arm Performance Studio + tests_and_status: + - ubuntu:latest: passed anaconda: readable_title: Anaconda tests_and_status: + - ubuntu:latest: failed + ansible: + readable_title: Ansible + tests_and_status: + - ubuntu:latest: passed + aperf: + readable_title: AWS Perf (APerf) + tests_and_status: + - ubuntu:latest: passed + arduino-pico: + readable_title: Arduino core for the Raspberry Pi Pico + tests_and_status: - ubuntu:latest: passed arm-gnu: readable_title: Arm GNU Toolchain tests_and_status: - ubuntu:latest: passed - fedora:latest: passed + armclang: + readable_title: Arm Compiler for Embedded + tests_and_status: + - ubuntu:latest: passed + armds: + readable_title: Arm Development Studio + tests_and_status: + - ubuntu:latest: passed + armie: + readable_title: Arm Instruction Emulator + tests_and_status: + - ubuntu:latest: passed + armpl: + readable_title: Arm Performance Libraries + tests_and_status: + - ubuntu:latest: passed + aws-cli: + readable_title: AWS CLI + tests_and_status: + - ubuntu:latest: passed + aws-copilot: + readable_title: AWS Copilot CLI + tests_and_status: + - ubuntu:latest: passed azure-cli: readable_title: Azure CLI tests_and_status: - ubuntu:latest: passed + bolt: + readable_title: BOLT + tests_and_status: + - ubuntu:latest: passed cross: readable_title: Cross-compiler tests_and_status: @@ -59,6 +107,10 @@ sw_categories: readable_title: Linaro Forge tests_and_status: - ubuntu:latest: passed + gcloud: + readable_title: Google Cloud Platform (GCP) CLI + tests_and_status: + - ubuntu:latest: passed gfortran: readable_title: GFortran tests_and_status: @@ -70,6 +122,10 @@ sw_categories: readable_title: Kubectl tests_and_status: - ubuntu:latest: passed + multipass: + readable_title: Multipass + tests_and_status: + - ubuntu:latest: passed native: readable_title: Native compiler tests_and_status: @@ -79,10 +135,42 @@ sw_categories: readable_title: Oracle Cloud Infrastructure (OCI) CLI tests_and_status: - ubuntu:latest: passed + papi: + readable_title: Performance API (PAPI) + tests_and_status: + - ubuntu:latest: passed + perf: + readable_title: Perf for Linux on Arm (LinuxPerf) + tests_and_status: + - ubuntu:latest: passed + pulumi: + readable_title: Pulumi + tests_and_status: + - ubuntu:latest: passed pytorch: readable_title: PyTorch tests_and_status: - ubuntu:latest: passed + rust: + readable_title: Rust for Linux Applications + tests_and_status: + - ubuntu:latest: passed + rust_embedded: + readable_title: Rust for Embedded Applications + tests_and_status: + - ubuntu:latest: passed + ssh: + readable_title: SSH + tests_and_status: + - ubuntu:latest: passed + streamline-cli: + readable_title: Streamline CLI Tools + tests_and_status: + - ubuntu:latest: passed + sysbox: + readable_title: Sysbox + tests_and_status: + - ubuntu:latest: passed swift: readable_title: Swift tests_and_status: [] @@ -90,6 +178,14 @@ sw_categories: readable_title: Terraform tests_and_status: - ubuntu:latest: passed + topdown-tool: + readable_title: Telemetry Solution (Topdown Methodology) + tests_and_status: + - ubuntu:latest: passed + vnc: + readable_title: VNC on Arm Linux + tests_and_status: + - ubuntu:latest: passed iot: {} laptops-and-desktops: {} mobile-graphics-and-gaming: {} diff --git a/tools/check.py b/tools/check.py index 0b61598ab9..767c3f170c 100644 --- a/tools/check.py +++ b/tools/check.py @@ -2,300 +2,306 @@ import logging import os +import shutil import subprocess import json -import yaml -from junit_xml import TestSuite, TestCase - - -''' -Parse header and patch file with test results -''' -def patch(article, results, lk): - with open(article, mode='r') as f: - content = f.read() - f.close() - header = [] - - for i in content: - start = content.find("---") + 3 - end = content.find("---", start) - - if end == start-3: - # No header - logging.debug("No header found in {}".format(article)) - return - else: - header = content[start:end] - markdown = content[end+3:] - data = yaml.safe_load(header, ) - - # Update status or create section - arr = [] - - # Check if this is a learning path - if isinstance(results, list) and data.get("test_images"): - for res in data["test_images"]: - failed = False - for el in (results): - if el[res] != 0: - logging.debug("Status on {}: FAILED".format(res)) - arr.append("failed") - failed = True - break - - if not failed: - logging.debug("Status on {}: passed".format(res)) - arr.append("passed") - elif data.get("test_images"): - for res in data["test_images"]: - if results[res] != 0: - logging.debug("Status on {}: FAILED".format(res)) - arr.append("failed") - else: - logging.debug("Status on {}: passed".format(res)) - arr.append("passed") - - data["test_status"] = arr - - data["test_link"] = lk - - # update markdown files with test results - with open(article, mode='w') as f: - f.write("---\n") - yaml.dump(data, f) - f.write("---") - f.close() - - # write the rest of the content - with open(article, mode='a') as f: - for i in markdown: - f.write(i) - f.close() - - -''' -Read json file and run commands in Docker -''' -def check(json_file, start, stop): +from junit_xml import TestCase +import alive_progress + +""" +Checks a dictionary for a given key. Helper function to avoid runtime errors. +""" +def dictionary_lookup(dictionary, key): + try: + # Try that the value exists, and that it is not None + value = dictionary[key] + assert value + return True + except Exception: + logging.debug(f"\"{key}\" was not found in dictionary {dictionary}.") + return False + + +""" +Initializes a Docker container and runs a few commands to set it up. + + - Install dependencies + - Set up user permissions + - Remove the default .bashrc on Ubuntu (since it returns when not interactive) + - Allow write permissions on shared folder + +If the passed image is not supported, an IOError is thrown. +""" +def init_container(i_img, img): + # Launch + container_name = f"test_{i_img}" + logging.info(f"Initializing {container_name} -> {img}") + init_docker_cmd = [f"docker run --rm -t -d -v $PWD/shared:/shared --name test_{i_img} {img}"] + logging.debug(init_docker_cmd) + subprocess.run(init_docker_cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) + + package_manager = "" + user = "" + if img.startswith(("ubuntu", "mongo", "arm-tools")): + package_manager = "apt" + user = "sudo" + elif "fedora" in img: + package_manager = "yum" + user = "wheel" + else: + raise SystemExit(f"Image {img} not supported") + + docker_cmd = [f"docker exec test_{i_img} {package_manager} update"] + logging.debug(docker_cmd) + subprocess.run(docker_cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) + + if "arm-tools" in img: + # These images already have a 'ubuntu' user account set up + pass + + docker_cmd = [ + f"docker exec {container_name} {package_manager} install -y sudo wget curl git", + f"docker exec {container_name} useradd user -m -G {user}", + f"docker exec {container_name} bash -c \"cat << EOF > /etc/sudoers.d/user\n user ALL=(ALL) NOPASSWD:ALL\nEOF\"", + f"docker exec {container_name} rm /home/user/.bashrc", + f"docker exec {container_name} chmod ugo+rw /shared" + ] + for cmd in docker_cmd: + logging.debug(cmd) + subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) + + return container_name + +""" +Checks the test for a number of commands and write it to the test command file. +""" +def write_commands_to_file(test_cmd_filename, test): + # Write series of commands in this file + cmd = "" + f = open(test_cmd_filename, "w") + + # Check if: + # - A file needs to be sourced + # - Working directory is specified + # - An environment variable is specified + cmd_args = { + "env_source":"source", + "cwd":"cd", + "env":"export" + } + for cmd_arg in cmd_args.keys(): + if cmd_arg in test: + # Retrieve the command as string + cmd_arg_test = test[cmd_arg] if isinstance(test[cmd_arg], str) else test[cmd_arg][0] + cmd = cmd_args[cmd_arg] + " " + cmd_arg_test + logging.debug(f"FINAL COMMAND: {cmd}") + write_cmd_to_file(f, test_cmd_filename, cmd) + + # Check if commands need to be run before the test + if "pre_cmd" in test: + pre_cmd = test["pre_cmd"] + cmd = pre_cmd + write_cmd_to_file(f, test_cmd_filename, cmd) + + # Check if the test has multiple lines + if test.get("ncmd"): + for cmd_line in range(0, test["ncmd"]): + if "expected" in test.keys(): + # Do not run output commands + if cmd_line in test["expected"]: + continue + cmd = test[f"{cmd_line}"] + write_cmd_to_file(f, test_cmd_filename, cmd) + + f.close() + return cmd + +""" +Write a command to a file and log it for debugging. +""" +def write_cmd_to_file(f, test_cmd_filename, cmd): + logging.debug(f"Command argument written to {test_cmd_filename}: {cmd}") + cmd_str = f"{cmd}\n" + f.write(cmd_str) + logging.info(cmd_str) + +""" +Parse JSON file with commands from the Markdown article, +run commands in Docker and log the result in the console. +""" +def check(json_file, start, stop, md_article): with open(json_file) as jf: data = json.load(jf) - # Start instances for all images - if start and data.get("image"): - for i, img in enumerate(data["image"]): - # Launch - logging.info("Container instance test_{} is {}".format(i, img)) - cmd = ["docker run --rm -t -d -v $PWD/shared:/shared --name test_{} {}".format(i, img)] - logging.debug(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) - - # Create user and configure - if "arm-tools" in img: - # These images already have a 'ubunutu' user account set up. - cmd = ["docker exec test_{} apt update".format(i)] - logging.debug(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) - elif "ubuntu" in img or "mongo" in img: - cmd = ["docker exec test_{} apt update".format(i)] - logging.debug(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) - cmd = ["docker exec test_{} apt install -y sudo wget curl git".format(i)] - logging.debug(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) - cmd = ["docker exec test_{} useradd user -m -G sudo".format(i)] - logging.debug(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) - cmd = ["docker exec test_{} bash -c \"cat << EOF > /etc/sudoers.d/user\n user ALL=(ALL) NOPASSWD:ALL\nEOF\"".format(i)] - logging.debug(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) - # The default .bashrc on Ubuntu returns when not interactive so removing it - cmd = ["docker exec test_{} rm /home/user/.bashrc".format(i)] - logging.debug(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) - # Allow write permissions on shared folder - cmd = ["docker exec test_{} chmod ugo+rw /shared".format(i)] - logging.debug(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) - elif "fedora" in img: - cmd = ["docker exec test_{} yum update".format(i)] - logging.debug(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) - cmd = ["docker exec test_{} yum install -y sudo wget curl git".format(i)] - logging.debug(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) - cmd = ["docker exec test_{} useradd user -m -G wheel".format(i)] - logging.debug(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) - cmd = ["docker exec test_{} bash -c \"cat << EOF > /etc/sudoers.d/user\n user ALL=(ALL) NOPASSWD:ALL\nEOF\"".format(i)] - logging.debug(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) - # Allow write permissions on shared folder - cmd = ["docker exec test_{} chmod ugo+rw /shared".format(i)] - logging.debug(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) - - logging.info("Container(s) initialization completed") + if dictionary_lookup(data, "test_images"): + test_images = data["test_images"] else: - logging.debug("Skip container(s) launch") + logging.info(f"No test_images could be parsed from {md_article}, skipping") + return {} - if data.get("image"): - # Create 1 test suite for each image - test_cases= [[] for img in data["image"]] - # Create array to store test result - results = {img:0 for img in data["image"]} - else: - test_cases = [] - results = {} + # Create one test suite for each image + test_cases= [[] for img in test_images] + # Create array to store test result + results = {img:0 for img in test_images} - # Check if there are tests - if not "ntests" in data.keys(): + # Check if there are tests / code blocks + if not dictionary_lookup(data, "ntests"): + logging.info(f"No tests were parsed from {md_article}, skipping") return results - # Run bash commands - print(data["ntests"]) - for i in range(0, data["ntests"]): - if not data.get("{}".format(i)): - continue - print(i) - t = data["{}".format(i)] - print(t) - - # Check if file name is specified - if "file_name" in t: - fn = t["file_name"] - else: - fn = ".tmpcmd" - - # Write series of commands in this file - c = "" - f = open(fn, "w") - # Check if a file needs to be sourced - if "env_source" in t: - env_source = t["env_source"] - c = "source " + env_source - logging.debug("Copying command to file to file {}: {}".format(fn, c)) - f.write("{}\n".format(c)) - # Check if env var are specified - if "env" in t: - env = t["env"] - for el in env: - c = "export " + el - logging.debug("Copying command to file to file {}: {}".format(fn, c)) - f.write("{}\n".format(c)) - # Check if commands need to be run beforehand - if "pre_cmd" in t: - pre_cmd = t["pre_cmd"] - c = pre_cmd - logging.debug("Copying command to file to file {}: {}".format(fn, c)) - f.write("{}\n".format(c)) - # Check if cwd is specified - if "cwd" in t: - c = "cd " + t["cwd"] - logging.debug("Copying command to file {}: {}".format(fn, c)) - f.write("{}\n".format(c)) - if t.get("ncmd"): - for j in range(0, t["ncmd"]): - if "expected" in t.keys(): - # Do not run output commands - if j == (int(eval(t["expected"]))-1): - break - c = t["{}".format(j)] - logging.debug("Copying command to file {}: {}".format(fn, c)) - f.write("{}\n".format(c)) - f.close() - - # Check if a target is specified - if "target" in t: - # get element index of instance - idx = data["image"].index(t["target"]) - inst = range(idx, idx+1) - else: - inst = range(0, len(data["image"])) - - username = "ubuntu" if "arm-tools" in data["image"][0] else "user" - for k in inst: - # Copy over the file with commands - cmd = ["docker cp {} test_{}:/home/{}/".format(fn, k, username)] - subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - logging.debug(cmd) - - # Check type - if t["type"] == "fvp": - # Only allow single line commands - if t["fvp_name"] == "FVP_Corstone_SSE-300_Ethos-U65": - cmd = t["0"].replace("FVP_Corstone_SSE-300_Ethos-U65", "docker run --rm -ti -v $PWD/shared:/shared -w {} -e ETHOS_U65=1 -e NON_INTERACTIVE=1 --name test_fvp flebeau/arm-corstone-300-fvp".format(t["cwd"])) + # Run code blocks + test_images = data["test_images"] + for n_image, test_image in zip(range(0, len(test_images)), test_images): + logging.info(f"--- Testing on {test_image} ---") + with alive_progress.alive_bar(data["ntests"], title=test_image, stats=False) as bar: + for n_test in range(0, data["ntests"]): + if dictionary_lookup(data, f"{n_test}"): + test = data[f"{n_test}"] + else: + logging.info(f"Error getting test from JSON file, skipping") + continue + + test_target = test.get("target") + if test_target and test_target != test_image: + pass + elif not test_target: + pass + elif test_target: + pass else: - cmd = t["0"].replace("FVP_Corstone_SSE-300_Ethos-U55", "docker run --rm -ti -v $PWD/shared:/shared -w {} -e NON_INTERACTIVE=1 --name test_fvp flebeau/arm-corstone-300-fvp".format(t["cwd"])) - elif t["type"] == "bash": - cmd = ["docker exec -u {} -w /home/{} test_{} bash {}".format(username, username, k, fn)] - else: - logging.debug("Omitting type: {}".format(t["type"])) - cmd = [] + bar(skipped=True) + continue + + if "file_name" in test: + test_cmd_filename = test["file_name"] + else: + test_cmd_filename = ".tmpcmd" + + cmd = write_commands_to_file(test_cmd_filename, test) + + username = "ubuntu" if "arm-tools" in test_images[0] else "user" + + test_type = test["type"] + # Check type + if test_type == "bash": + # chmod cmd file + run_command = [f"chmod +x {test_cmd_filename}"] + subprocess.run(run_command, shell=True, capture_output=True) + logging.debug(run_command) + # execute file as is with bash + run_command = [f"bash ./{test_cmd_filename}"] + elif test_type == "fvp": + # Start instance for image + if start: + container_name = init_container(i_img=n_image, img=test_image) + logging.info(f"{container_name} initialized") + else: + logging.debug("Parameter start is false, skipping container(s) initialization") + + # copy files to docker + docker_cmd = [f"docker cp {test_cmd_filename} test_{n_image}:/home/{username}/"] + subprocess.run(docker_cmd, shell=True, capture_output=True) + logging.debug(docker_cmd) + + + ethos_u65 = "" + fvp_name = test["fvp_name"] + if fvp_name == "FVP_Corstone_SSE-300_Ethos-U65": + ethos_u65 = "ETHOS_U65=1 -e" + test_cwd = test["cwd"] + # Only allow single line commands + run_command = test["0"].replace(f"{fvp_name}", + f"docker run --rm -ti -v $PWD/shared:/shared -w {test_cwd} -e \ + {ethos_u65} NON_INTERACTIVE=1 --name test_fvp flebeau/arm-corstone-300-fvp" + ) + else: + logging.info(f"Type '{test_type}' not supported for testing. Contact the maintainers if you think this is a mistake.") + bar(skipped=True) + continue + - if cmd != []: - logging.debug(cmd) - p = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - # create test case - test_cases[k].append(TestCase("{}_{}_test-{}".format(json_file.replace("_cmd.json",""), data["image"][k], i), c, 0, p.stdout.rstrip().decode("utf-8"), '')) + logging.debug(run_command) + process = subprocess.run(run_command, shell=True, capture_output=True) + process_output = process.stdout.rstrip().decode("utf-8") + process_error = process.stderr.rstrip().decode("utf-8") - ret_code = 0 - if "ret_code" in t.keys(): - ret_code = int(t["ret_code"]) + # Remove the file storing the command since we now ran it + os.remove(test_cmd_filename) + # Create test case + test_case_name = json_file.replace("_cmd.json","") + test_case = TestCase(f"{test_case_name}_{test_images[n_image]}_test-{n_image}", + cmd, 0, process_output, '') + test_cases[n_image].append(test_case) + test_ret_code = int(test["ret_code"]) if test.get("ret_code") else 0 + + test_passed = False # if success - if p.returncode == ret_code: + if process.returncode == test_ret_code: # check with expected result if any - if "expected" in t.keys(): - t_expected = t.get("{}".format(int(eval(t["expected"]))-1)) - if t_expected: - exp = t[t_expected] - # strip out '\n' and decode byte to string - if exp == p.stdout.rstrip().decode("utf-8"): - msg = "Test passed" + if "expected" in test.keys(): + for line in test["expected"]: + exp = test[str(line)] + if exp == process_output: + test_passed = True + msg = "PASSED" else: - msg = "ERROR (unexpected output. Expected {} but got {})".format(exp, p.stdout.rstrip().decode("utf-8")) - test_cases[k][-1].add_failure_info(msg) - results[data["image"][k]] = results[data["image"][k]]+1 + msg = f"ERROR. Expected '{exp}'" + test_cases[n_image][-1].add_failure_info(msg) + results[test_images[n_image]] = results[test_images[n_image]]+1 else: - msg = "Test passed" + test_passed = True + msg = "PASSED" else: - msg = "ERROR (command failed. Return code is {} but expected {})".format(p.returncode, ret_code) - test_cases[k][-1].add_failure_info(msg) - results[data["image"][k]] = results[data["image"][k]]+1 - - logging.debug("Test {}: {}".format(i, msg)) - logging.info("{:.0f}% of all tests completed on instance test_{}".format(i/data["ntests"]*100, k)) - - # Remove file with list of commands - os.remove(fn) - - logging.info("100% of all tests completed") - - # add to test suite and write junit results - ts = [] - for k in range(0, len(data["image"])): - ts.append(TestSuite("{} {}".format(json_file,data["image"][k]), test_cases[k])) - - with open(json_file.replace(".json", ".xml"), mode='w') as lFile: - TestSuite.to_file(lFile, ts, prettyprint=True) - lFile.close() - logging.info("Results written in {}".format(json_file.replace(".json", ".xml"))) + msg = f"ERROR. Expected return code {test_ret_code} but got {process.returncode}" + test_cases[n_image][-1].add_failure_info(msg) + results[test_images[n_image]] = results[test_images[n_image]]+1 + bar() + if not test_passed and process_error: + logging.info(f"{process_error}") + elif not test_passed and process_output: + logging.info(f"{process_output}") + else: + logging.debug(f"{process_output}") + logging.info(f"{msg}") + logging.info("---------") + result = "failed" if results[test_images[n_image]] else "passed" + logging.info(f"Tests {result} on {test_image}") + + # Remove command file if no tests existed + if os.path.exists(test_cmd_filename): + os.remove(test_cmd_filename) + + # Remove files that were generated from the tests, if any + untracked_files_process = subprocess.run("git ls-files --others --exclude-standard", shell=True, capture_output=True) + untracked_files = untracked_files_process.stdout.decode("utf-8").splitlines() + paths_to_remove = [ file_name for file_name in untracked_files \ + if "_cmd.json" not in file_name \ + and "test-lp-output.txt" not in file_name ] + + if paths_to_remove: + logging.info(f"Removing files that were created during testing from repository") + for path in paths_to_remove: + if os.path.isfile(path) or os.path.islink(path): + os.remove(path) + + elif os.path.isdir(path): + shutil.rmtree(path) + logging.debug(f"Removed {path}") # Stop instance if stop: - logging.info("Terminating container(s)...") - for i, img in enumerate(data["image"]): - cmd = ["docker stop test_{}".format(i)] - logging.debug(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) - - logging.info("Removing shared directory...") - cmd = ["sudo rm -rf shared"] - logging.debug(cmd) - subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) + logging.debug("Terminating container(s)") + for i_img, img in enumerate(test_images): + cleanup_cmd = [f"docker stop test_{i_img}"] + logging.debug(cleanup_cmd) + subprocess.run(cleanup_cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) + + logging.debug("Removing shared directory") + cleanup_cmd = ["rm -rf shared"] + logging.debug(cleanup_cmd) + subprocess.run(cleanup_cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) else: - logging.debug("Skip container(s) termination...") - - return results + logging.debug("Parameter stop is false, skipping container(s) termination") + return results \ No newline at end of file diff --git a/tools/maintenance.py b/tools/maintenance.py index cd76cc931d..c625d18163 100755 --- a/tools/maintenance.py +++ b/tools/maintenance.py @@ -3,9 +3,11 @@ import argparse import logging import os +import sys # Local import import report import parse +import patch import check import filter_checker @@ -16,9 +18,9 @@ level = { 10: "DEBUG", 20: "INFO", 30: "WARNING", 40: "ERROR" } -''' +""" Test Learning Path -''' +""" def check_lp(lp_path, link, debug): if not os.path.isdir(lp_path): lp_path = os.path.dirname(lp_path) @@ -27,8 +29,8 @@ def check_lp(lp_path, link, debug): if os.path.exists(lp_path+"/_index.md"): # check _index.md for maintenance options - idx_header = parse.header(lp_path+"/_index.md") - if idx_header["maintain"]: + idx_header = parse.header(lp_path+"/_index.md") + if idx_header["test_maintenance"]: # Parse all articles in folder to check them for k in os.listdir(lp_path): # Don't parse _index, _next-steps or _review @@ -37,17 +39,17 @@ def check_lp(lp_path, link, debug): logging.info("Parsing " + _k) cmd = parse.parse(_k) # Generate _cmd.json file with instructions - parse.save(_k, cmd, idx_header["maintain"], idx_header["img"]) - - + parse.save_commands_to_json(_k, cmd, idx_header["test_maintenance"], idx_header["test_images"]) + logging.info("Checking Learning Path " + lp_path) # Look for _cmd.json l = [i for i in os.listdir(lp_path) if i.endswith("_cmd.json")] # Build dict with weight value for each article - d = { i: parse.header(lp_path + "/" + i.replace("_cmd.json",""))["wght"] for i in l } + article_per_weight = { i: parse.header(lp_path + "/" + i.replace("_cmd.json",""))["weight"] for i in l } # Sort dict by value - res = [] - for idx, i in enumerate(sorted(d.items(), key=lambda item: item[1])): + test_image_results_list = [] + # sort LP by weight value to have it run sequentially + for idx, i in enumerate(sorted(article_per_weight.items(), key=lambda item: item[1])): logging.info("Checking " + i[0].replace("_cmd.json","")) # We want all the articles from the learning path to run in the same container # Launch the instance at the beginning, and terminate it at the end @@ -55,26 +57,26 @@ def check_lp(lp_path, link, debug): terminate = True if i[1] != -1 and idx != 0: launch = False - if i[1] != -1 and idx != len(d.keys())-1: + if i[1] != -1 and idx != len(article_per_weight.keys())-1: terminate = False - res.append(check.check(lp_path + "/" + i[0], start=launch, stop=terminate)) - - logging.info("Patching " + lp_path + "/_index.md with test results") - check.patch(lp_path + "/_index.md", res, link) + test_image_results = check.check(lp_path + "/" + i[0], start=launch, stop=terminate, md_article=lp_path) + test_image_results_list.append(test_image_results) if not debug: for i in os.listdir(lp_path): if i.endswith("_cmd.json"): os.remove(lp_path+"/"+i) else: - logging.warning("Learning Path {} maintenance is turned off. Add or set \"test_maintenance: true\" otherwise.".format(lp_path)) + logging.warning(f"Learning Path {lp_path} maintenance is turned off. Add or set \"test_maintenance: true\" otherwise.") + exit(0) else: logging.warning("No _index.md found in Learning Path") + return test_image_results -''' +""" Main function -''' +""" def main(): global verbosity, level @@ -84,6 +86,7 @@ def main(): arg_parser.add_argument('-l', '--link', metavar='URL', action='store', type=str, help='Specify URL to github actions report. Added when patching sources files with --instructions') arg_parser.add_argument('-p', '--patch', action='store_true', help='Patch categories _index.md with results when using --filter-checker') arg_parser.add_argument('-t', '--type', metavar='REPORT', action='store', default='all', type=str, help='Specify report type detailing the closed filter status when using --filter-checker. Can be either \'all\', \'subjects\', \'softwares\', \'oses\', \'tools\'') + arg_parser.add_argument('-sr', '--stats-report', action='store_true', help='Added when patching statistics file with --instructions') arg_group = arg_parser.add_mutually_exclusive_group() arg_group.add_argument('-f', '--filter-checker', action='store_true', help='Validates the correct closed schema filters are being used, reports any errors, and optionally updates _index.md files for each learning path category to reflect the currently supported filters.') @@ -101,6 +104,9 @@ def main(): logging.debug("Verbosity level is set to " + level[verbosity]) if args.instructions: + if not os.path.exists(args.instructions): + raise SystemExit(f"No such file or directory: {args.instructions}") + results_dict = {} # check if article is a csv file corresponding to a file list if args.instructions.endswith(".csv"): logging.info("Parsing CSV " + args.instructions) @@ -110,48 +116,53 @@ def main(): fn = line.split(",")[0] # Check if this article is a learning path if "/learning-paths/" in os.path.abspath(fn): - check_lp(fn, args.link, args.debug) + results_dict = check_lp(fn, args.link, args.debug) elif fn.endswith(".md"): logging.info("Parsing " + fn) # check if maintenance if enabled - if parse.header(fn)["maintain"]: + if parse.header(fn)["test_maintenance"]: cmd = parse.parse(fn) - parse.save(fn, cmd) + parse.save_commands_to_json(fn, cmd) logging.info("Checking " + fn) - res = check.check(fn+"_cmd.json", start=True, stop=True) - logging.info("Patching " + fn + " with test results") - check.patch(fn, res, args.link) + results_dict = check.check(fn+"_cmd.json", start=True, stop=True, md_article=fn) if not args.debug: os.remove(fn+"_cmd.json") else: - logging.warning("{} maintenance is turned off. Add or set \"test_maintenance: true\" otherwise.".format(fn)) + logging.warning(f"{fn} maintenance is turned off. Add or set \"test_maintenance: true\" otherwise.") + sys.exit(0) else: logging.error("Unknown type " + fn) elif args.instructions.endswith(".md"): # Check if this article is a learning path if "/learning-paths/" in os.path.abspath(args.instructions): - check_lp(args.instructions, args.link, args.debug) + results_dict = check_lp(args.instructions, args.link, args.debug) else: logging.info("Parsing " + args.instructions) # check if maintenance if enabled - if parse.header(args.instructions)["maintain"]: + if parse.header(args.instructions)["test_maintenance"]: cmd = parse.parse(args.instructions) - parse.save(args.instructions, cmd) - res = check.check(args.instructions+"_cmd.json", start=True, stop=True) - logging.info("Patching " + args.instructions + " with test results") - check.patch(args.instructions, res, args.link) + parse.save_commands_to_json(args.instructions, cmd) + results_dict = check.check(args.instructions+"_cmd.json", start=True, stop=True, md_article=args.instructions) if not args.debug: os.remove(args.instructions+"_cmd.json") else: - logging.warning("{} maintenance is turned off. Add or set \"test_maintenance: true\" otherwise.".format(args.instructions)) + logging.warning(f"{args.instructions} maintenance is turned off. Add or set \"test_maintenance: true\" otherwise.") + sys.exit(0) elif os.path.isdir(args.instructions) and "/learning-paths/" in os.path.abspath(args.instructions): - check_lp(args.instructions, args.link, args.debug) + results_dict = check_lp(args.instructions, args.link, args.debug) else: logging.error("-i/--instructions expects a .md file, a CSV with a list of files or a Learning Path directory") + if args.stats_report: + # If all test results are zero, all tests have passed + patch.patch(args.instructions, results_dict, args.link) + if all(results_dict.get(k) for k in results_dict): + # Errors exist + logging.info("Tests failed in test suite") + sys.exit(1) elif args.spelling: - logging.info("Checking spelling of {}".format(args.spelling)) + logging.info(f"Checking spelling of {args.spelling}") output = parse.spelling(args.spelling) - logging.info("Highlighing mispelling in {}".format(args.spelling)) + logging.info(f"Highlighing mispelling in {args.spelling}") f = open(args.spelling, "w") f.write(output) f.close() @@ -159,11 +170,11 @@ def main(): logging.info("Filter-check") filter_checker.checker(args.type, args.patch) elif args.query: - logging.info("Querying data and generating stats...") + logging.info("Querying data and generating stats") report.stats() logging.info("Stats updated in content/stats/data.json") elif args.report: - logging.info("Creating report of articles older than {} days".format(args.report)) + logging.info(f"Creating report of articles older than {args.report} days") report.report(args.report) diff --git a/tools/parse.py b/tools/parse.py index 02d9459ad9..c9361d7bfd 100644 --- a/tools/parse.py +++ b/tools/parse.py @@ -9,30 +9,29 @@ from inclusivewriting.suggestions import detect_and_get_suggestions from spellchecker import SpellChecker - -''' +""" Parse commands in markdown article and return list of commands -''' +""" def parse(article): with open(article) as file: content = file.read() file.close() - cmd = [] + cmds_list = [] for i in content: start = content.find("```") + 3 end = content.find("```", start) if start == -1 or end == -1: # No code section left - return cmd + return cmds_list else: - cmd.append(content[start:end]) + cmds_list.append(content[start:end]) content = content[end+3:] -''' +""" Parse file for spelling mistakes in text -''' +""" def spelling(article): language = "en" @@ -75,7 +74,7 @@ def spelling(article): for word in txt_list: # get rid of punctuation and make lower case word_clean = word.translate(str.maketrans('', '', string.punctuation.replace("-",""))) - if not word_clean == '': + if not word_clean == '': word_list.append(word_clean) new_text = txt @@ -91,7 +90,7 @@ def spelling(article): replacement_list = replacement_list + "\"" new_text, nsub = re.subn(" {} ".format(word), " {{{{< highlight green {} {} >}}}} ".format(word,replacement_list), new_text) icount += nsub - + for u in unknown_list: new_text, nsub = re.subn(" {} ".format(u), " {{{{< highlight yellow {} {} >}}}} ".format(u,spell.correction(u)), new_text) rcount += nsub @@ -102,20 +101,20 @@ def spelling(article): content = content[end+3:] # No code section left - logging.info("{} inclusive language issue(s) found.".format(icount)) - logging.info("{} spelling mistake(s) found.".format(rcount)) + logging.info(f"{icount} inclusive language issue(s) found.") + logging.info(f"{rcount} spelling mistake(s) found.") return output -''' +""" Parse header to check file or not Returns dict with the following elements: test_maintenance: bool value to check the article test_images: list of targets supported weight: int value with weight of article when in a learning path -''' +""" def header(article): - dict = {"maintain": False, "img": None, "weight": -1} + dict = {"test_maintenance": False, "test_images": None, "weight": -1} with open(article) as file: content = file.read() file.close() @@ -125,107 +124,104 @@ def header(article): end = content.find("---", start) if end == start-3: # No header - logging.debug("No header found in {}".format(article)) + logging.debug(f"No header found in {article}") return dict else: header = content[start:end] data = yaml.safe_load(header, ) if "test_maintenance" in data.keys(): - dict.update(maintain=data["test_maintenance"]) + dict.update(test_maintenance=data["test_maintenance"]) if "test_images" in data.keys(): - dict.update(img= data["test_images"]) + dict.update(test_images=data["test_images"]) if "weight" in data.keys(): - dict.update(wght=data["weight"]) - + dict.update(weight=data["weight"]) + return dict +""" +Extract the argument value and return in a dict with the argument key. +""" +def get_arg_to_key_dict(cmd, key): + value = cmd[0].split(f"{key}\"")[1].split("\"")[0] + return { key : value } + +""" +Parse all code blocks in a Markdown article and write to a JSON file. +""" +def save_commands_to_json(md_article, cmds_list, learningpath=False, img=None): -''' -Save list of command in json file -''' -def save(article, cmd, learningpath=False, img=None): - # Parse file header - hdr = header(article) + article_header = header(md_article) - if not hdr["maintain"] and not learningpath: - logging.info("File {} settings don't enable parsing.".format(article)) + if not article_header["test_maintenance"] and not learningpath: + logging.info(f"File {md_article} settings doesn't enable parsing") return if not img: - img = hdr["img"] + img = article_header["test_images"] - content = { "image": img, "weight": hdr["weight"]} + content = {"test_images": img, "weight": article_header["weight"]} logging.debug(content) - for i_idx,i in enumerate(cmd): - l = list(filter(None, i.split("\n"))) - # if fvp type, check for arguments - if not l: + for cmd_idx, cmd_str in enumerate(cmds_list): + cmd_lines = list(filter(None, cmd_str.split("\n"))) + if not cmd_lines: continue - elif "fvp" in l[0]: - content[i_idx] = {"type": "fvp"} + + cmd_lines_header = cmd_lines[0] + # if fvp type, check for arguments + if "fvp" in cmd_lines_header: + content[cmd_idx] = {"type": "fvp"} # check if current directory is specified - if "cwd" in l[0]: - cwd = l[0].split("cwd=\"")[1].split("\"")[0] - content[i_idx].update({"cwd": cwd }) - if "fvp_name" in l[0]: - model = l[0].split("fvp_name=\"")[1].split("\"")[0] - content[i_idx].update({"fvp_name": model }) + if "cwd" in cmd_lines_header: + cwd = cmd_lines_header.split("cwd=\"")[1].split("\"")[0] + content[cmd_idx].update({"cwd": cwd}) + if "fvp_name" in cmd_lines_header: + model = cmd_lines_header.split("fvp_name=\"")[1].split("\"")[0] + content[cmd_idx].update({"fvp_name": model }) else: - content[i_idx].update({"fvp_name": "FVP_Corstone_SSE-300_Ethos-U55" }) + content[cmd_idx].update({"fvp_name": "FVP_Corstone_SSE-300_Ethos-U55" }) # if bash type, check for arguments - elif "bash" in l[0]: - content[i_idx] = {"type": "bash"} - # check if return code is specified - if "ret_code" in l[0]: - ret = l[0].split("ret_code=\"")[1].split("\"")[0] - content[i_idx].update({"ret_code": ret }) - else: - content[i_idx].update({"ret_code": "0" }) - # check if a file needs to be sourced - if "env_source" in l[0]: - env = l[0].split("env_source=\"")[1].split("\"")[0] - content[i_idx].update({"env_source": env }) - # check if env var are specified - if "env=" in l[0]: - env = l[0].split("env=\"")[1].split("\"")[0] - env = env.split(";") - content[i_idx].update({"env": env }) - # check if commands need to be run beforehand - if "pre_cmd" in l[0]: - env = l[0].split("pre_cmd=\"")[1].split("\"")[0] - content[i_idx].update({"pre_cmd": env }) - # check if current directory is specified - if "cwd" in l[0]: - cwd = l[0].split("cwd=\"")[1].split("\"")[0] - content[i_idx].update({"cwd": cwd }) - # check target - if "target" in l[0]: - tgt = l[0].split("target=\"")[1].split("\"")[0] - content[i_idx].update({"target": tgt }) - # check if any expected result - if "|" in l[0]: - expected_result = l[0].split("| ")[1].split("\"")[0] - content[i_idx].update({"expected": expected_result }) + elif "bash" in cmd_lines_header: + # Equal sign on env so that it's not picked up by env_source + arg_list = ["ret_code", "env_source", "env=", "pre_cmd", "cwd", "target"] + content[cmd_idx] = {"type": "bash"} + for arg in arg_list: + if arg in cmd_lines_header: + arg_str = cmd_str.split(arg)[1].split("\"")[1] + content[cmd_idx].update({arg:arg_str}) + if "|" in cmd_lines_header: + expected_result = cmd_str.split("| ")[1].split("}")[0].split("-") + if len(expected_result) > 1: + expected_lines = list(range(*[int(x)-1 for x in expected_result])) + elif len(expected_result) == 1 and expected_result[0]: + expected_lines = [int(expected_result[0])-1] + else: + raise SystemExit( + """The expected output line(s) should be specified as one of two options: + A single number: | 2 + A range: | 2-10 + The code block is indexing starts at 1""") + content[cmd_idx].update({"expected": expected_lines }) # for other types, we're assuming source code # check if a file name is specified else: - content[i_idx] = {"type": l[0]} + content[cmd_idx] = {"type": cmd_lines_header} # check file name - if "file_name" in l[0]: - fn = l[0].split("file_name=\"")[1].split("\"")[0] - content[i_idx].update({"file_name": fn }) + if "file_name" in cmd_lines_header: + fn = cmd_lines_header.split("file_name=\"")[1].split("\"")[0] + content[cmd_idx].update({"file_name": fn }) - for j_idx,j in enumerate(l[1:]): - content[i_idx].update({j_idx: j}) - content[i_idx].update({ "ncmd": j_idx+1 }) - content.update({ "ntests": i_idx+1 }) + # Parse all the lines in the code block + for cmd_line_idx, cmd_line in enumerate(cmd_lines[1:]): + content[cmd_idx].update({cmd_line_idx: cmd_line}) + content[cmd_idx].update({ "ncmd": cmd_line_idx+1 }) + content.update({ "ntests": cmd_idx+1 }) - logging.debug(content[i_idx]) + logging.debug(content[cmd_idx]) - fn = article + "_cmd.json" + fn = md_article + "_cmd.json" logging.debug("Saving commands to " + fn) with open(fn, 'w') as f: diff --git a/tools/patch.py b/tools/patch.py new file mode 100644 index 0000000000..36513cf7c1 --- /dev/null +++ b/tools/patch.py @@ -0,0 +1,44 @@ +import yaml +from collections import defaultdict +from pathlib import PurePath +import re + +""" +Parse results and patch stats file with test results +""" +def patch(article_path: str, results: dict, link: str): + stats_file = "data/stats_current_test_info.yml" + + with open(stats_file, mode='r') as f: + data = yaml.safe_load(f) + f.close() + + article_path_pure = PurePath(re.sub(r"^.*?content/", "", article_path)) + article_path_parts = list(article_path_pure.parts) + if "learning-paths" in article_path_parts: + content_type, sw_category, content_title = article_path_parts + article_path = PurePath(article_path, "_index.md") + elif "install-guides" in article_path_parts: + content_type, content_title = article_path_parts + content_title = content_title.strip(".md") + sw_category = content_type + else: + raise SystemExit("Unknown content path, pass learning paths or install guides only") + + test_images = results.keys() + results_values = defaultdict(lambda: "failed") + results_values[0] = "passed" + + for image, i in zip(test_images, range(len(test_images))): + if content_title not in data["sw_categories"][sw_category]: + raise SystemExit(f"{content_title} does not exist in {stats_file}. Add it to update the stats report.") + + data["sw_categories"][sw_category][content_title]["tests_and_status"][i][image] = results_values[results[image]] + + if link: + data["sw_categories"][sw_category][content_title]["test_link"] = link + + + with open(stats_file, mode='w') as f: + yaml.dump(data, f) + f.close() \ No newline at end of file diff --git a/tools/report.py b/tools/report.py index 8b280c6682..f3120d54e3 100644 --- a/tools/report.py +++ b/tools/report.py @@ -21,22 +21,22 @@ -''' +""" Returns the date (yyyy-mm-dd) which a file in the given directory was last updated. If Learning Path, changes in any file in the directory will count. -''' +""" def get_latest_updated(directory, is_lp, item): article_path = directory if is_lp else f"{directory}/{item}" date = subprocess.run(["git", "log", "-1" ,"--format=%cs", str(article_path)], stdout=subprocess.PIPE) return date -''' +""" Recursive content search in a given directory. Returns: - list of articles older than a given period found - count of articles found - list of primary authors found -''' +""" def content_parser(directory, period): count = 0 art_list = {} @@ -72,9 +72,9 @@ def content_parser(directory, period): # check if article is older than the period if date < datetime.now() - timedelta(days = period): if is_lp: - art_list[directory + "/"] = "{} days ago".format((datetime.now() - date).days) + art_list[directory + "/"] = f"{(datetime.now() - date).days} days ago" else: - art_list[directory + "/" + item] = "{} days ago".format((datetime.now() - date).days) + art_list[directory + "/" + item] = f"{(datetime.now() - date).days} days ago" if "learning-paths" in directory: # no need to iterate further @@ -92,12 +92,12 @@ def content_parser(directory, period): return [art_list, count, auth_list] -''' +""" Initialize Plotly data structure for stats 1 graph on the left with data for install tool guides 1 graph on the right with data for learning paths Input: title for the graph -''' +""" def init_graph(title): data = { "data": [ @@ -190,9 +190,9 @@ def init_graph(title): return data -''' +""" Generate JSON data for stats page -''' +""" def stats(): global dname @@ -266,10 +266,10 @@ def stats(): f_contrib.close() -''' +""" List pages older than a period in days and save result as CSV Generate JSON file with data -''' +""" def report(period): global dname @@ -303,5 +303,4 @@ def report(period): writer.writeheader() for key in result.keys(): csvfile.write("%s, %s\n" % (key, result[key])) - logging.info(f"Results written to {function_start_directory}/{outdated_files_csv}") - + logging.info(f"Results written to {function_start_directory}/{outdated_files_csv}") \ No newline at end of file diff --git a/tools/requirements.txt b/tools/requirements.txt index 43764fce73..844fe2aa45 100644 --- a/tools/requirements.txt +++ b/tools/requirements.txt @@ -2,4 +2,7 @@ junit-xml pyyaml inclusivewriting pyspellchecker +better-profanity setuptools +alive-progress + diff --git a/tools/test_lp.sh b/tools/test_lp.sh new file mode 100755 index 0000000000..3becf4e28e --- /dev/null +++ b/tools/test_lp.sh @@ -0,0 +1,24 @@ +# Run by test-lp.yml GitHub Action +ALL_CHANGED_FILES=$@ + +# Install dependencies and run tests, +# if we found tests to run +echo "All changed content paths: ${ALL_CHANGED_FILES}" +# Keep full paths for install guides, +# get parent directory for learning paths +CONTENT_PATHS=() +for file in ${ALL_CHANGED_FILES[*]}; do + parentdir="$(dirname "$file")" + if echo "$parentdir" | grep -q "install-guides"; then + CONTENT_PATHS+=("$file") + else + CONTENT_PATHS+=("$parentdir") + fi +done +# Make sure each learning path is only tested once +CONTENT_PATHS_UNIQUE=($(printf "%s\n" "${CONTENT_PATHS[@]}" | sort -u)) +echo "Unique content paths: ${CONTENT_PATHS_UNIQUE[*]}" +# Run the tests +for file in ${CONTENT_PATHS_UNIQUE[*]}; do + tools/maintenance.py -i ${file} --stats-report +done