From 8cd2d0232f568ab4dadae0367db13470014b7f65 Mon Sep 17 00:00:00 2001 From: GitHub Actions Stats Bot <> Date: Mon, 23 Jun 2025 02:59:30 +0000 Subject: [PATCH 01/19] automatic update of stats files --- data/stats_current_test_info.yml | 5 +- data/stats_weekly_data.yml | 110 +++++++++++++++++++++++++++++++ 2 files changed, 112 insertions(+), 3 deletions(-) diff --git a/data/stats_current_test_info.yml b/data/stats_current_test_info.yml index 8caea4bc62..dcdd3fd9a1 100644 --- a/data/stats_current_test_info.yml +++ b/data/stats_current_test_info.yml @@ -1,5 +1,5 @@ summary: - content_total: 379 + content_total: 380 content_with_all_tests_passing: 0 content_with_tests_enabled: 61 sw_categories: @@ -63,8 +63,7 @@ sw_categories: tests_and_status: [] aws-q-cli: readable_title: Amazon Q Developer CLI - tests_and_status: - - ubuntu:latest: passed + tests_and_status: [] azure-cli: readable_title: Azure CLI tests_and_status: [] diff --git a/data/stats_weekly_data.yml b/data/stats_weekly_data.yml index 16a2337ba6..6bc46cd4f4 100644 --- a/data/stats_weekly_data.yml +++ b/data/stats_weekly_data.yml @@ -6337,3 +6337,113 @@ avg_close_time_hrs: 0 num_issues: 17 percent_closed_vs_total: 0.0 +- a_date: '2025-06-23' + content: + automotive: 2 + cross-platform: 33 + embedded-and-microcontrollers: 41 + install-guides: 102 + iot: 6 + laptops-and-desktops: 38 + mobile-graphics-and-gaming: 34 + servers-and-cloud-computing: 124 + total: 380 + contributions: + external: 97 + internal: 505 + github_engagement: + num_forks: 30 + num_prs: 6 + individual_authors: + adnan-alsinan: 2 + alaaeddine-chakroun: 2 + albin-bernhardsson: 1 + alex-su: 1 + alexandros-lamprineas: 1 + andrew-choi: 2 + andrew-kilroy: 1 + annie-tallund: 4 + arm: 3 + arnaud-de-grandmaison: 4 + arnaud-de-grandmaison.: 1 + aude-vuilliomenet: 1 + avin-zarlez: 1 + barbara-corriero: 1 + basma-el-gaabouri: 1 + ben-clark: 1 + bolt-liu: 2 + brenda-strech: 1 + chaodong-gong: 1 + chen-zhang: 1 + christophe-favergeon: 1 + christopher-seidl: 7 + cyril-rohr: 1 + daniel-gubay: 1 + daniel-nguyen: 2 + david-spickett: 2 + dawid-borycki: 33 + diego-russo: 2 + dominica-abena-o.-amanfo: 1 + elham-harirpoush: 2 + florent-lebeau: 5 + "fr\xE9d\xE9ric--lefred--descamps": 2 + gabriel-peterson: 5 + gayathri-narayana-yegna-narayanan: 1 + georgios-mermigkis: 1 + geremy-cohen: 2 + gian-marco-iodice: 1 + graham-woodward: 1 + han-yin: 1 + iago-calvo-lista: 1 + james-whitaker: 1 + jason-andrews: 103 + joe-stech: 6 + johanna-skinnider: 2 + jonathan-davies: 2 + jose-emilio-munoz-lopez: 1 + julie-gaskin: 5 + julio-suarez: 6 + jun-he: 1 + kasper-mecklenburg: 1 + kieran-hejmadi: 12 + koki-mitsunami: 2 + konstantinos-margaritis: 8 + kristof-beyls: 1 + leandro-nunes: 1 + liliya-wu: 1 + mark-thurman: 1 + masoud-koleini: 1 + mathias-brossard: 1 + michael-hall: 5 + na-li: 1 + nader-zouaoui: 2 + nikhil-gupta: 1 + nina-drozd: 1 + nobel-chowdary-mandepudi: 6 + odin-shen: 7 + owen-wu: 2 + pareena-verma: 46 + paul-howard: 3 + peter-harris: 1 + pranay-bakre: 5 + preema-merlin-dsouza: 1 + przemyslaw-wirkus: 2 + rin-dobrescu: 1 + roberto-lopez-mendez: 2 + ronan-synnott: 45 + shuheng-deng: 1 + thirdai: 1 + tianyu-li: 2 + tom-pilar: 1 + uma-ramalingam: 1 + varun-chari: 2 + visualsilicon: 1 + willen-yang: 1 + ying-yu: 2 + yiyang-fan: 1 + zach-lasiuk: 2 + zhengjun-xing: 2 + issues: + avg_close_time_hrs: 0 + num_issues: 14 + percent_closed_vs_total: 0.0 From 2c12f3cd03dd591c2cea766cd188c6fca1c64443 Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Tue, 24 Jun 2025 15:00:45 +0000 Subject: [PATCH 02/19] Updates --- .../go-benchmarking-with-sweet/_index.md | 16 ++++++---------- .../go-benchmarking-with-sweet/overview.md | 14 +++++++------- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md index 820701deea..9c96143bf8 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md @@ -1,21 +1,17 @@ --- -title: Go Benchmarks with Sweet and Benchstat - -draft: true -cascade: - draft: true +title: Benchmark Go performance with Sweet and Benchstat minutes_to_complete: 60 -who_is_this_for: This is an introductory topic for developers who are interested in measuring the performance of Go-based applications on Arm-based servers. +who_is_this_for: This introductory topic is for developers who want to measure and compare the performance of Go applications on Arm-based servers. learning_objectives: - - Learn how to start up Arm64 and x64 instances of GCP VMs - - Install Go, benchmarks, benchstat, and sweet on the two VMs - - Use sweet and benchstat to compare the performance of Go applications on the two VMs + - Launch Arm64 and x86_64 instances of GCP VMs + - Install Go, Sweet, and Benchstat on each VM + - Use Sweet and Benchstat to compare the performance of Go applications on the two VMs prerequisites: - - A [Google Cloud account](https://console.cloud.google.com/). This learning path can be run on on-prem or on any cloud provider instance, but specifically documents the process for running on Google Axion. + - A [Google Cloud account](https://console.cloud.google.com/). This learning path can be run on any cloud provider or on-premise, but it focuses on Google Cloud’s Axion Arm64-based instances. - A local machine with [Google Cloud CLI](/install-guides/gcloud/) installed. author: Geremy Cohen diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md index 4a5995608f..77accb24b9 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md @@ -6,9 +6,9 @@ weight: 10 layout: learningpathall --- -# Go Benchmarking Overview +## Overview of Go benchmarking tools -In this section, you will learn how to measure, collect, and compare Go performance data across different CPU architectures. This knowledge is essential for developers and system architects who need to make informed decisions about infrastructure choices for their Go applications. +This section introduces how to measure, collect, and compare Go performance data across different CPU architectures. This knowledge is essential for developers and system architects who need to make informed decisions about infrastructure choices for their Go applications. You'll gain hands-on experience with: @@ -19,15 +19,15 @@ You'll gain hands-on experience with: - **Benchstat**, a statistical comparison tool that analyzes benchmark results to identify meaningful performance differences between systems. Benchmarking is critical for modern software development because it allows you to: -- Quantify the performance impact of code changes -- Compare different hardware platforms objectively -- Make data-driven decisions about infrastructure investments -- Identify optimization opportunities in your applications +- Quantify the impact of code changes +- Compare performance across hardware platforms +- Make data-driven decisions about infrastructure decisions +- Identify optimization opportunities in your application code You'll use Intel c4-standard-8 and Arm-based c4a-standard-4 (both four-core) instances running on GCP to run and compare benchmarks using these tools. {{% notice Note %}} -Arm-based c4a-standard-4 instances and Intel-based c4-standard-8 instances both utilize four cores. Both instances are categorized by GCP as members of the **consistently high performing** series; the main difference between the two is that the c4a has 16 GB of RAM, while the c4 has 30 GB of RAM. We've chosen to keep CPU cores equivalent across the two instances of the same series to keep the comparison as close as possible. +Arm-based c4a-standard-4 instances and Intel-based c4-standard-8 instances both utilize four cores. Both instances are categorized by GCP as members of the **consistently high performing** series; the main difference between the two is that the c4a has 16 GB of RAM, while the c4 has 30 GB of RAM. This Learning Path keeps the CPU cores equivalent across the two instances of the same series to keep the comparison as close as possible. {{% /notice %}} From 788d32a01ae39d7cb291de35cb218ceb7159bdef Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Wed, 25 Jun 2025 14:49:55 +0000 Subject: [PATCH 03/19] Starting content dev --- .../go-benchmarking-with-sweet/_index.md | 2 +- .../go-benchmarking-with-sweet/add_c4a_vm.md | 2 +- .../installing_go_and_sweet.md | 8 +++++--- .../manual_run_benchmark.md | 8 ++++---- .../go-benchmarking-with-sweet/overview.md | 14 +++++++------- .../rexec_sweet_install.md | 14 ++++++++------ 6 files changed, 26 insertions(+), 22 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md index 9c96143bf8..d57b4348fa 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md @@ -11,7 +11,7 @@ learning_objectives: - Use Sweet and Benchstat to compare the performance of Go applications on the two VMs prerequisites: - - A [Google Cloud account](https://console.cloud.google.com/). This learning path can be run on any cloud provider or on-premise, but it focuses on Google Cloud’s Axion Arm64-based instances. + - A [Google Cloud account](https://console.cloud.google.com/). This Learning Path can be run on any cloud provider or on-premises, but it focuses on Google Cloud’s Axion Arm64-based instances. - A local machine with [Google Cloud CLI](/install-guides/gcloud/) installed. author: Geremy Cohen diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md index 106352dc7c..b2cd477bba 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md @@ -1,5 +1,5 @@ --- -title: Launching a Google Axion Instance +title: Launching a Google Axion instance weight: 20 ### FIXED, DO NOT MODIFY diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md index 9f8552fbea..235e59e006 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md @@ -14,7 +14,7 @@ Sweet is a Go benchmarking tool that provides a standardized way to run performa {{% notice Note %}} -Subsequent steps in the learning path assume you are running this script (installing) from your home directory (`$HOME`), resulting in the creation of a `$HOME/benchmarks/sweet` final install path. If you decide to install elsewhere, you will need to adjust the path accordingly when prompted to run the benchmark logic later in the learning path. +Subsequent steps in the learning path assume you are running this script (installing) from your home directory (`$HOME`), resulting in the creation of a `$HOME/benchmarks/sweet` final install path. If you install elsewhere, you need to adjust the path accordingly when prompted to run the benchmark logic later in the Learning Path. {{% /notice %}} @@ -25,7 +25,7 @@ Start by copying and pasting the script below on **both** of your GCP VMs. This ```bash #!/usr/bin/env bash -# Write the script to filesystem using a HEREDOC +# Write the install script to filesystem using a HEREDOC cat <<'EOF' > install_go_and_sweet.sh sudo apt-get -y update @@ -90,7 +90,9 @@ chmod 755 install_go_and_sweet.sh ``` -The end of the output should look like: +## Expected output from `sweet get` + +When sweet get completes successfully, you’ll see output similar to: ```output Sweet v0.3.0: Go Benchmarking Suite diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchmark.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchmark.md index 281d6bc3a8..d1c6a3b1ac 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchmark.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchmark.md @@ -8,8 +8,8 @@ layout: learningpathall In this section, you'll download the results of the benchmark you ran manually in the previous sections from each VM. You will use these results to understand how `sweet` and `benchstat` work together. -## Download Benchmark Results from each VM -Lets walk through the steps to manually download the sweet benchmark results from your initial run on each VM. +## Download benchmark results from each VM +Let's walk through the steps to manually download the sweet benchmark results from your initial run on each VM. 1. **Locate results:** Change directory to the `results/markdown` directory and list the files to see the `arm-benchmarks.result` file: @@ -21,11 +21,11 @@ Lets walk through the steps to manually download the sweet benchmark results fro 2. **Copy result path:** Copy the absolute pathname of `arm-benchmarks.result`. -3. **Download results:** Click `DOWNLOAD FILE`, and paste the **ABSOLUTE PATHNAME** you just copied for the filename, and then click `Download`. This will download the benchmark results to your local machine. +3. **Download results:** Click `DOWNLOAD FILE`, and paste the **ABSOLUTE PATHNAME** you just copied for the filename, and then click `Download`. This downloads the benchmark results to your local machine. ![](images/run_manually/6.png) -4. **Rename the file:** Once downloaded, on your local machine, rename this file to `c4a.result` so you can distinguish it from the x86 results you'll download later. This naming convention will help you clearly identify which results came from which architecture. You'll know the file downloaded successfully if you see the file in your Downloads directory with the name `c4a.result`, as well as the confirmation dialog in your browser: +4. **Rename the file:** Once downloaded, on your local machine, rename this file to `c4a.result` so you can distinguish it from the x86 results you'll download later. This naming convention helps you clearly identify which results came from which architecture. You'll know the file downloaded successfully if you see the file in your Downloads directory with the name `c4a.result`, as well as the confirmation dialog in your browser: ![](images/run_manually/7.png) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md index 77accb24b9..d1aac7fe40 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md @@ -6,28 +6,28 @@ weight: 10 layout: learningpathall --- -## Overview of Go benchmarking tools +## Overview of Go benchmarking tools -This section introduces how to measure, collect, and compare Go performance data across different CPU architectures. This knowledge is essential for developers and system architects who need to make informed decisions about infrastructure choices for their Go applications. +This section shows you how to measure, collect, and compare Go performance data across different CPU architectures. These techniques help developers and system architects make informed infrastructure decisions for their Go applications. You'll gain hands-on experience with: -- **Go Benchmarks**, a collection of pre-written benchmark definitions that standardizes performance tests for popular Go applications, leveraging Go's built-in benchmark support. +- **Go Benchmarks**, standardized definitions for popular Go applications, using Go’s built-in testing framework. -- **Sweet**, a benchmark runner that automates running Go benchmarks across multiple environments, collecting and formatting results for comparison. +- **Sweet**, a benchmark runner that automates execution and formats results for comparison across multiple environments. - **Benchstat**, a statistical comparison tool that analyzes benchmark results to identify meaningful performance differences between systems. Benchmarking is critical for modern software development because it allows you to: - Quantify the impact of code changes - Compare performance across hardware platforms -- Make data-driven decisions about infrastructure decisions +- Make data-driven decisions about infrastructure - Identify optimization opportunities in your application code -You'll use Intel c4-standard-8 and Arm-based c4a-standard-4 (both four-core) instances running on GCP to run and compare benchmarks using these tools. +In this learning path, you'll compare performance using two four-core GCP instance types: the Intel-based c4-standard-8 and the Arm-based c4a-standard-4. {{% notice Note %}} -Arm-based c4a-standard-4 instances and Intel-based c4-standard-8 instances both utilize four cores. Both instances are categorized by GCP as members of the **consistently high performing** series; the main difference between the two is that the c4a has 16 GB of RAM, while the c4 has 30 GB of RAM. This Learning Path keeps the CPU cores equivalent across the two instances of the same series to keep the comparison as close as possible. +Arm-based c4a-standard-4 instances and Intel-based c4-standard-8 instances both utilize four cores. Both instances are categorized by GCP as members of the **consistently high-performing** series; the main difference between the two is that c4a has 16 GB of RAM, while c4 has 30 GB of RAM. This Learning Path uses equivalent core counts to ensure a fair performance comparison. {{% /notice %}} diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md index 86882fcfd8..4a8ee53a27 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md @@ -1,5 +1,5 @@ --- -title: Installing the Automated Benchmark and Benchstat Runner +title: Installing the automated benchmark and Benchstat runner weight: 53 ### FIXED, DO NOT MODIFY @@ -10,13 +10,15 @@ In the last section, you learned how to run benchmarks and benchstat manually. N ## Introducing rexec_sweet.py -The `rexec_sweet.py` script is a powerful automation tool that simplifies the benchmarking workflow. This tool connects to your GCP instances, runs the benchmarks, collects the results, and generates comprehensive reports—all in one seamless operation. It provides several key benefits: +The `rexec_sweet.py` script is a powerful automation tool that simplifies the benchmarking workflow. This tool connects to your GCP instances, runs the benchmarks, collects the results, and generates comprehensive reports - all in one seamless operation. + +It provides several key benefits: - **Automation**: Runs benchmarks on multiple VMs without manual SSH connections - **Consistency**: Ensures benchmarks are executed with identical parameters - **Visualization**: Generates HTML reports with interactive charts for easier analysis -The only dependency you are responsible for satisfying before the script runs is completion of the "Installing Go and Sweet" sections of this learning path. Additional dependencies are dynamically loaded at install time by the install script. +The only dependency you are responsible for satisfying before the script runs is completion of the "Installing Go and Sweet" sections of this Learning Path. Additional dependencies are dynamically loaded at install time by the install script. ## Setting up rexec_sweet @@ -40,19 +42,19 @@ The only dependency you are responsible for satisfying before the script runs is ./install.sh ``` - If the install.sh script detects that you already have dependencies installed, it may ask you if you wish to reinstall them with the following prompt as shown: + If the install.sh script detects that you already have dependencies installed, it might ask you if you want to reinstall them: ```output pyenv: /Users/gercoh01/.pyenv/versions/3.9.22 already exists continue with installation? (y/N) ``` - If you see this prompt, enter `N` (not `Y`!) to continue with the installation without modifying the existing installed dependencies. + If you see this prompt, enter `N` to continue with the installation without modifying the existing installed dependencies. 4. **Verify VM status:** Make sure the GCP VM instances you created in the previous section are running. If not, start them now, and give them a few minutes to come up. {{% notice Note %}} -The install script will prompt you to authenticate with Google Cloud Platform (GCP) using the gcloud command-line tool at the end of install. If after installing you have issues running the script and/or get GCP authentication errors, you can manually authenticate with GCP by running the following command: `gcloud auth login` +The install script prompts you to authenticate with Google Cloud Platform (GCP) using the gcloud command-line tool at the end of install. If after installing you have issues running the script and/or get GCP authentication errors, you can manually authenticate with GCP by running the following command: `gcloud auth login` {{% /notice %}} From 2768a14df623b01809d5802c28f848ecf38ca183 Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Thu, 26 Jun 2025 08:45:49 +0000 Subject: [PATCH 04/19] Content improvements --- .../go-benchmarking-with-sweet/_index.md | 8 ++--- .../go-benchmarking-with-sweet/add_c4a_vm.md | 32 +++++++++---------- .../installing_go_and_sweet.md | 8 ++--- .../go-benchmarking-with-sweet/overview.md | 10 +++--- 4 files changed, 29 insertions(+), 29 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md index d57b4348fa..f043602728 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md @@ -5,10 +5,10 @@ minutes_to_complete: 60 who_is_this_for: This introductory topic is for developers who want to measure and compare the performance of Go applications on Arm-based servers. -learning_objectives: - - Launch Arm64 and x86_64 instances of GCP VMs - - Install Go, Sweet, and Benchstat on each VM - - Use Sweet and Benchstat to compare the performance of Go applications on the two VMs +learning_objectives: + - Provision Arm64 and x86_64 VM instances on Google Cloud + - Install Go, Sweet, and Benchstat on each VM instance + - Run benchmarks and use Benchstat to compare Go application performance across architectures prerequisites: - A [Google Cloud account](https://console.cloud.google.com/). This Learning Path can be run on any cloud provider or on-premises, but it focuses on Google Cloud’s Axion Arm64-based instances. diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md index b2cd477bba..3fb06becaa 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md @@ -6,28 +6,28 @@ weight: 20 layout: learningpathall --- -## Overview -In this section, you'll learn how to spin up the first of two different VMs used for benchmarking Go tests, an Arm-based Google Axion c4a-standard-4 (c4a for short). +## Launch an Arm-based c4a-standard-4 instance +In this section, you'll launch the first of two VMs used for benchmarking Go applications: the Arm-based c4a-standard-4 instance on Google Cloud, also referred to as c4a. -## Creating the c4a-standard-4 Instance +## Creating the c4a-standard-4 instance -1. **Access Google Cloud Console:** Navigate to [https://console.cloud.google.com/welcome](https://console.cloud.google.com/welcome) +1. **Access Google Cloud console:** Navigate to [https://console.cloud.google.com/welcome](https://console.cloud.google.com/welcome) -2. **Search for VM instances:** Click into the Search field. +2. **Search for VM instances:** Select the search field. -3. **Find VM Instances:** Start typing `vm` until the UI auto-completes `VM Instances`, then click it. +3. **Find VM Instances:** Start typing `vm` until the UI auto-completes `VM Instances`, then select it. ![](images/launch_c4a/3.png) The VM Instances page appears. -4. **Create a new instance:** Click `Create instance` +4. **Create a new instance:** Select **Create instance** ![](images/launch_c4a/4.png) The Machine configuration page appears. -5. **Name your instance:** Click the `Name` field, and enter "c4a" for the `Name`. +5. **Name your instance:** Select the **Name** field, and enter "c4a". ![](images/launch_c4a/5.png) @@ -35,33 +35,33 @@ The Machine configuration page appears. ![](images/launch_c4a/7.png) -7. **View machine types:** Scroll down to the Machine type dropdown, and click it to show all available options. +7. **View machine types:** Scroll down to the Machine type dropdown, and select it to show all available options. ![](images/launch_c4a/8.png) -8. **Choose machine size:** Select "c4a-standard-4" under the Standard tab. +8. **Choose machine size:** Select **c4a-standard-4** under the Standard tab. ![](images/launch_c4a/9.png) -9. **Configure storage:** Click the "OS and Storage" tab. +9. **Configure storage:** select the **OS and Storage** tab. ![](images/launch_c4a/10.png) -10. **Modify storage settings:** Click "Change" +10. **Modify storage settings:** select **Change** ![](images/launch_c4a/11.png) -11. **Set disk size:** Double-click the "Size (GB)" field, then enter "1000" for the value. +11. **Set disk size:** Select the **Size (GB)** field, then enter "1000" for the value. ![](images/launch_c4a/16.png) -12. **Confirm storage settings:** Click "Select" to continue. +12. **Confirm storage settings:** Select **Select** to continue. ![](images/launch_c4a/18.png) -13. **Launch the instance:** Click "Create" to bring up the instance. +13. **Launch the instance:** select **Create** to bring up the instance. ![](images/launch_c4a/19.png) -After a few seconds, your c4a instance starts up, and you are ready to continue to the next section. In the next step, you will launch the second VM, an Intel-based Emerald Rapids c4-standard-8 (c4 for short), which will serve as the comparison system for our benchmarking tests. +After a few seconds, your c4a instance starts up, and you are ready to continue to the next section. In the next section, you will launch the second VM, an Intel-based Emerald Rapids c4-standard-8 (abbreviated to c4), which serves as the comparison system for our benchmarking tests. diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md index 235e59e006..7e16dab4bd 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md @@ -1,5 +1,5 @@ --- -title: Installing Go and Sweet +title: Install Go, Sweet, and Benchstat weight: 40 ### FIXED, DO NOT MODIFY @@ -10,15 +10,15 @@ In this section, you'll install Go, Sweet, and the Benchstat comparison tool on ## Installation Script -Sweet is a Go benchmarking tool that provides a standardized way to run performance tests across different systems. Benchstat is a companion tool that analyzes and compares benchmark results, helping you understand performance differences between systems. Together, these tools will allow you to accurately measure and compare Go performance on Arm and x86 architectures. +Sweet is a Go benchmarking tool that provides a standardized way to run performance tests across systems. Benchstat is a companion tool that compares benchmark results to highlight meaningful performance differences. Together, these tools help you evaluate Go performance on both Arm and x86 architectures. {{% notice Note %}} -Subsequent steps in the learning path assume you are running this script (installing) from your home directory (`$HOME`), resulting in the creation of a `$HOME/benchmarks/sweet` final install path. If you install elsewhere, you need to adjust the path accordingly when prompted to run the benchmark logic later in the Learning Path. +Subsequent steps in the learning path assume you are running this script (installing) from your home directory (`$HOME`), resulting in the creation of a `$HOME/benchmarks/sweet` final install path. If you install to a different directory, update the paths in later steps to match your custom location. {{% /notice %}} -Start by copying and pasting the script below on **both** of your GCP VMs. This script checks the architecture of your running VM, installs the required Go package on your VM. It then installs sweet, benchmarks, and the benchstat tools. +Start by copying and pasting the script below on both of your GCP VMs. This script automatically detects your system architecture, installs the appropriate Go version, and sets up Sweet, Benchstat, and the Go benchmark suite. **You don't need to run it after pasting**, just paste it into your home directory and press enter to install all needed dependencies: diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md index d1aac7fe40..f2e8bc0dfc 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md @@ -12,22 +12,22 @@ This section shows you how to measure, collect, and compare Go performance data You'll gain hands-on experience with: -- **Go Benchmarks**, standardized definitions for popular Go applications, using Go’s built-in testing framework. +- **Go Benchmarks** - standardized definitions for popular Go applications, using Go’s built-in testing framework. -- **Sweet**, a benchmark runner that automates execution and formats results for comparison across multiple environments. +- **Sweet** - a benchmark runner that automates execution and formats results for comparison across multiple environments. -- **Benchstat**, a statistical comparison tool that analyzes benchmark results to identify meaningful performance differences between systems. +- **Benchstat** - a statistical comparison tool that analyzes benchmark results to identify meaningful performance differences between systems. Benchmarking is critical for modern software development because it allows you to: - Quantify the impact of code changes -- Compare performance across hardware platforms +- Compare performance across hardware architectures - Make data-driven decisions about infrastructure - Identify optimization opportunities in your application code In this learning path, you'll compare performance using two four-core GCP instance types: the Intel-based c4-standard-8 and the Arm-based c4a-standard-4. {{% notice Note %}} -Arm-based c4a-standard-4 instances and Intel-based c4-standard-8 instances both utilize four cores. Both instances are categorized by GCP as members of the **consistently high-performing** series; the main difference between the two is that c4a has 16 GB of RAM, while c4 has 30 GB of RAM. This Learning Path uses equivalent core counts to ensure a fair performance comparison. +Arm-based c4a-standard-4 instances and Intel-based c4-standard-8 instances both utilize four cores. Both instances are categorized by GCP as members a series that demonstrates consistent high performance; the main difference between the two is that c4a has 16 GB of RAM, while c4 has 30 GB of RAM. This Learning Path uses equivalent core counts to ensure a fair performance comparison. {{% /notice %}} From e96e32f5b71b09321e93a49faa5391ac14083cd5 Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Thu, 26 Jun 2025 09:27:19 +0000 Subject: [PATCH 05/19] Updates --- .../installing_go_and_sweet.md | 16 +++++----- .../running_benchmarks.md | 31 ++++++++++--------- 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md index 7e16dab4bd..a0b0a6189d 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md @@ -6,21 +6,19 @@ weight: 40 layout: learningpathall --- -In this section, you'll install Go, Sweet, and the Benchstat comparison tool on both VMs. - -## Installation Script +In this section, you'll install Go, Sweet, and Benchstat on both virtual machines. Sweet is a Go benchmarking tool that provides a standardized way to run performance tests across systems. Benchstat is a companion tool that compares benchmark results to highlight meaningful performance differences. Together, these tools help you evaluate Go performance on both Arm and x86 architectures. - {{% notice Note %}} Subsequent steps in the learning path assume you are running this script (installing) from your home directory (`$HOME`), resulting in the creation of a `$HOME/benchmarks/sweet` final install path. If you install to a different directory, update the paths in later steps to match your custom location. {{% /notice %}} +## Installation script Start by copying and pasting the script below on both of your GCP VMs. This script automatically detects your system architecture, installs the appropriate Go version, and sets up Sweet, Benchstat, and the Go benchmark suite. -**You don't need to run it after pasting**, just paste it into your home directory and press enter to install all needed dependencies: +Paste the full block into your terminal. This creates and runs an installer script directly from your home directory: ```bash #!/usr/bin/env bash @@ -32,7 +30,7 @@ sudo apt-get -y update sudo apt-get -y install git build-essential # Detect architecture - this allows the same script to work on both -# our Arm (c4a) and x86 (c4) VMs without modification +# Arm (c4a) and x86 (c4) VMs without modification ARCH=$(uname -m) case "$ARCH" in arm64|aarch64) @@ -90,9 +88,9 @@ chmod 755 install_go_and_sweet.sh ``` -## Expected output from `sweet get` +## Expected output from sweet get -When sweet get completes successfully, you’ll see output similar to: +When `sweet get` completes successfully, you’ll see output similar to: ```output Sweet v0.3.0: Go Benchmarking Suite @@ -111,7 +109,7 @@ Usage: sweet get [flags] ``` -## Verify Installation +## Verify installation To test that everything is installed correctly, set the environment variables shown below on each VM: diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/running_benchmarks.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/running_benchmarks.md index 8ddf05bec3..3730750135 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/running_benchmarks.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/running_benchmarks.md @@ -1,14 +1,14 @@ --- -title: Benchmark Types and Metrics +title: Benchmark types and metrics weight: 50 ### FIXED, DO NOT MODIFY layout: learningpathall --- -With setup complete, you can now run and analyze the benchmarks. Before you do, it's good to understand all the different pieces in more detail. +Now that setup is complete, it's important to understand the available benchmarks and the performance metrics you'll be analyzing in order to compare system performance effectively. -## Choosing a Benchmark to Run +## Available benchmarks Whether running manually or automatically, the benchmarking process consists of two main steps: @@ -18,7 +18,8 @@ Whether running manually or automatically, the benchmarking process consists of Sweet comes ready to run with the following benchmarks: -| Benchmark | Description | Command | + +| Benchmark | Description | Example command | |-----------------|-------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| | **biogo-igor** | Processes pairwise alignment data using the biogo library, grouping repeat feature families and outputting results in JSON format. | `sweet run -count 10 -run="biogo-igor" config.toml` | | **biogo-krishna** | Pure-Go implementation of the PALS algorithm for pairwise sequence alignment, measuring alignment runtime performance. | `sweet run -count 10 -run="biogo-krishna" config.toml` | @@ -31,18 +32,18 @@ Sweet comes ready to run with the following benchmarks: | **markdown** | Parses and renders Markdown documents to HTML using a Go-based markdown library to evaluate parsing and rendering throughput. | `sweet run -count 10 -run="markdown" config.toml` | | **tile38** | Stress-tests a Tile38 geospatial database with WITHIN, INTERSECTS, and NEARBY queries to measure spatial query performance. | `sweet run -count 10 -run="tile38" config.toml` | -## Metrics Summary +## Performance metrics When running benchmarks, several key metrics are collected to evaluate performance. The following summarizes the most common metrics and their significance: -### Seconds per Operation - Lower is better +### Seconds per operation - lower is better This metric measures the time taken to complete a single operation, indicating the raw speed of execution. It directly reflects the performance efficiency of a system for a specific task, making it one of the most fundamental benchmarking metrics. A system with lower seconds per operation completes tasks faster. This metric primarily reflects CPU performance but can also be influenced by memory access speeds and I/O operations. If seconds per operation is the only metric showing significant difference while memory metrics are similar, the performance difference is likely CPU-bound. -### Operations per Second - Higher is better +### Operations per second (higher is better) This metric provides a clear measure of system performance capacity, making it essential for understanding raw processing power and scalability potential. A system performing more operations per second has greater processing capacity. This metric reflects overall system performance including CPU speed, memory access efficiency, and I/O capabilities. @@ -51,7 +52,7 @@ If operations per second is substantially higher while memory usage remains prop This metric is essentially the inverse of "seconds per operation" and provides a more intuitive way to understand throughput capacity. -### Average RSS Bytes - Lower is better +### Average RSS bytes - lower is better Resident Set Size (RSS) represents the portion of memory occupied by a process that is held in RAM (not swapped out). It shows the typical memory footprint during operation, indicating memory efficiency and potential for scalability. @@ -59,7 +60,7 @@ Lower average RSS indicates more efficient memory usage. A system with lower ave If one VM has significantly higher seconds per operation but lower RSS, it may be trading speed for memory efficiency. Systems with similar CPU performance but different RSS values indicate different memory optimization approaches; lower RSS with similar CPU performance suggests better memory management, which is a critical indicator of performance in memory-constrained environments. -### Peak RSS Bytes - Lower is better +### Peak RSS bytes (lower is better) Peak RSS bytes is the maximum Resident Set Size reached during execution, representing the worst-case memory usage scenario. The peak RSS metric helps to understand memory requirements and potential for memory-related bottlenecks during intensive operations. @@ -67,7 +68,7 @@ Lower peak RSS indicates better handling of memory-intensive operations. High pe Large differences between average and peak RSS suggest memory usage volatility. A system with lower peak RSS but similar performance is better suited for memory-constrained environments. -### Peak VM Bytes - Lower is better +### Peak VM bytes (lower is better) Peak VM Bytes is the maximum Virtual Memory size used, including both RAM and swap space allocated to the process. @@ -75,7 +76,7 @@ Lower peak VM indicates more efficient use of the total memory address space. Hi If peak VM is much higher than peak RSS, the system is relying heavily on virtual memory management. Systems with similar performance but different VM usage patterns may have different memory allocation strategies. High VM with performance degradation suggests potential memory-bound operations due to excessive paging. -## Summary of Efficiency Indicators +## Summary of efficiency indicators When comparing metrics across two systems, keep the following in mind: @@ -84,16 +85,16 @@ A system is likely CPU-bound if seconds per operation differs significantly whil A system is likely memory-bound if performance degrades as memory metrics increase, especially when peak RSS approaches available physical memory. -### Efficiency Indicators +### Efficiency indicators The ideal system shows lower values across all metrics - faster execution with smaller memory footprint. Systems with similar seconds per operation but significantly different memory metrics indicate different optimization priorities. -### Scalability Potential +### Scalability potential Lower memory metrics (especially peak values) suggest better scalability for concurrent workloads. Systems with lower seconds per operation but higher memory usage may perform well for single tasks but scale poorly. -### Optimization Targets +### Optimization targets Large gaps between average and peak memory usage suggest opportunities for memory optimization. High seconds per operation with low memory usage suggests CPU optimization potential. -## Best Practices when benchmarking across different instance types +## Best practices when benchmarking across different instance types Here are some general tips to keep in mind as you explore benchmarking across different apps and instance types: From 87d67b6e526ea18a450d4c1494a0ea2c54550f0a Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Thu, 26 Jun 2025 09:57:47 +0000 Subject: [PATCH 06/19] Updates --- .../rexec_sweet_install.md | 8 +++---- .../rexec_sweet_run.md | 21 +++++++++---------- .../running_benchmarks.md | 4 ++-- 3 files changed, 16 insertions(+), 17 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md index 4a8ee53a27..ebcf131c94 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md @@ -1,5 +1,5 @@ --- -title: Installing the automated benchmark and Benchstat runner +title: Install the automated benchmark and benchstat runner weight: 53 ### FIXED, DO NOT MODIFY @@ -10,7 +10,7 @@ In the last section, you learned how to run benchmarks and benchstat manually. N ## Introducing rexec_sweet.py -The `rexec_sweet.py` script is a powerful automation tool that simplifies the benchmarking workflow. This tool connects to your GCP instances, runs the benchmarks, collects the results, and generates comprehensive reports - all in one seamless operation. +`rexec_sweet.py` is a script that automates the benchmarking workflow: it connects to your GCP instances, runs benchmarks, collects results, and generates HTML reports - all in one step. It provides several key benefits: @@ -29,7 +29,7 @@ The only dependency you are responsible for satisfying before the script runs is cd rexec_sweet ``` -2. **Clone the repository inside the directory:** Get the `rexec_sweet.py` script from the GitHub repository: +2. **Clone the repository:** Get the `rexec_sweet.py` script from the GitHub repository: ```bash git clone https://github.com/geremyCohen/go_benchmarks.git @@ -51,7 +51,7 @@ The only dependency you are responsible for satisfying before the script runs is If you see this prompt, enter `N` to continue with the installation without modifying the existing installed dependencies. -4. **Verify VM status:** Make sure the GCP VM instances you created in the previous section are running. If not, start them now, and give them a few minutes to come up. +4. **Verify VM status:** Make sure the GCP VM instances you created in the previous section are running. If not, start them now, and wait a few minutes for them to finish booting. {{% notice Note %}} The install script prompts you to authenticate with Google Cloud Platform (GCP) using the gcloud command-line tool at the end of install. If after installing you have issues running the script and/or get GCP authentication errors, you can manually authenticate with GCP by running the following command: `gcloud auth login` diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md index b2cfbf4ba5..96a7075637 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md @@ -1,14 +1,13 @@ --- -title: Running the Automated Benchmark and Benchstat Runner +title: Run the automated benchmark and benchstat runner weight: 54 ### FIXED, DO NOT MODIFY layout: learningpathall --- +With `rexec_sweet` installed, your benchmarking instances running, and your local machine authenticated with GCP, you're ready to run automated benchmarks across your configured environments. -With `rexec_sweet` installed, your benchmarking instances running, and your localhost authenticated with GCP, you'll now see how to run benchmarks in an automated fashion. - -## Run an Automated Benchmark and Analysis +## Run an automated benchmark and analysis 1. **Run the script:** Execute the `rexec_sweet` script from your local terminal: @@ -16,7 +15,7 @@ With `rexec_sweet` installed, your benchmarking instances running, and your loca rexec_sweet ``` -2. **Select a benchmark:** The script will prompt you for the name of the benchmark you want to run. Press enter to run the default benchmark, which is `markdown` (this is the recommended benchmark to run the first time.) +2. **Select a benchmark:** When prompted, press **Enter** to run the default benchmark (markdown), recommended for your first run. ```bash Available benchmarks: @@ -45,11 +44,11 @@ Do you want to run the first two instances found with default install directorie 4. **Choose your configuration:** You have two options: - - **Use default settings:** If you want to run benchmarks on the instances labeled with "will be used as nth instance", and you installed Go and Sweet into the default directories as noted in the tutorial, you can press Enter to accept the defaults. + - **Use default settings:** If you want to run benchmarks on the instances labeled with "will be used as nth instance", and you installed Go and Sweet into the default path (~/benchmarks/sweet), you can press **Enter** to accept the defaults. - - **Custom configuration:** If you are running more than two instances, and the script doesn't suggest the correct two to autorun, or you installed Go and Sweet to non-default folders, select "n" and press Enter. The script will then prompt you to select the instances and runtime paths. + - **Custom configuration:** If you are running more than two instances, and the script doesn't suggest the correct two to autorun, or you installed Go and Sweet to non-default folders, select "n" and press **Enter**. The script will then prompt you to select the instances and runtime paths. -In this example, we'll manually select the instances and paths as shown below: +In this example, you'll manually select the instances and paths as shown below: ```output Available instances: @@ -73,8 +72,8 @@ Output directory: /private/tmp/a/go_benchmarks/results/c4-c4a-markdown-20250610T ... ``` -Upon entering instance names and paths for the VMs, the script will automatically: - - Run the benchmark on both VMs +After selecting instances and paths, the script will: + - Run the selected benchmark on both VMs - Run `benchstat` to compare the results - Push the results to your local machine @@ -90,7 +89,7 @@ Report generated in results/c4-c4a-markdown-20250610T190407 5. **View the report:** Once on your local machine, `rexec_sweet` will generate an HTML report that will open automatically in your web browser. - If you close the tab or browser, you can always reopen the report by navigating to the `results` subdirectory of the current working directory of the `rexec_sweet.py` script, and opening `report.html`. + If you close the report, you can reopen it by navigating to the results subdirectory and opening report.html in your browser. ![](images/run_auto/2.png) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/running_benchmarks.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/running_benchmarks.md index 3730750135..68c8d0ac4b 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/running_benchmarks.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/running_benchmarks.md @@ -36,7 +36,7 @@ Sweet comes ready to run with the following benchmarks: When running benchmarks, several key metrics are collected to evaluate performance. The following summarizes the most common metrics and their significance: -### Seconds per operation - lower is better +### Seconds per operation (lower is better) This metric measures the time taken to complete a single operation, indicating the raw speed of execution. It directly reflects the performance efficiency of a system for a specific task, making it one of the most fundamental benchmarking metrics. @@ -52,7 +52,7 @@ If operations per second is substantially higher while memory usage remains prop This metric is essentially the inverse of "seconds per operation" and provides a more intuitive way to understand throughput capacity. -### Average RSS bytes - lower is better +### Average RSS bytes (lower is better) Resident Set Size (RSS) represents the portion of memory occupied by a process that is held in RAM (not swapped out). It shows the typical memory footprint during operation, indicating memory efficiency and potential for scalability. From a3f0447b7e79069abd2e781b6d8783bdfad06fc1 Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Thu, 26 Jun 2025 10:17:22 +0000 Subject: [PATCH 07/19] Updates --- .../go-benchmarking-with-sweet/add_c4a_vm.md | 4 +++- .../go-benchmarking-with-sweet/overview.md | 2 +- .../go-benchmarking-with-sweet/rexec_sweet_install.md | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md index 3fb06becaa..8e27d7a9d4 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md @@ -63,5 +63,7 @@ The Machine configuration page appears. ![](images/launch_c4a/19.png) -After a few seconds, your c4a instance starts up, and you are ready to continue to the next section. In the next section, you will launch the second VM, an Intel-based Emerald Rapids c4-standard-8 (abbreviated to c4), which serves as the comparison system for our benchmarking tests. +After a few seconds, your c4a instance starts up, and you are ready to continue to the next section. + +In the next section, you will launch the second VM, an Intel-based Emerald Rapids c4-standard-8 (abbreviated to c4), which serves as the comparison system for our benchmarking tests. diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md index f2e8bc0dfc..f974104362 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md @@ -24,7 +24,7 @@ Benchmarking is critical for modern software development because it allows you t - Make data-driven decisions about infrastructure - Identify optimization opportunities in your application code -In this learning path, you'll compare performance using two four-core GCP instance types: the Intel-based c4-standard-8 and the Arm-based c4a-standard-4. +In this Learning Path, you'll compare performance using two four-core GCP instance types: the Intel-based c4-standard-8 and the Arm-based c4a-standard-4. {{% notice Note %}} Arm-based c4a-standard-4 instances and Intel-based c4-standard-8 instances both utilize four cores. Both instances are categorized by GCP as members a series that demonstrates consistent high performance; the main difference between the two is that c4a has 16 GB of RAM, while c4 has 30 GB of RAM. This Learning Path uses equivalent core counts to ensure a fair performance comparison. diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md index ebcf131c94..1c48346da4 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md @@ -18,7 +18,7 @@ It provides several key benefits: - **Consistency**: Ensures benchmarks are executed with identical parameters - **Visualization**: Generates HTML reports with interactive charts for easier analysis -The only dependency you are responsible for satisfying before the script runs is completion of the "Installing Go and Sweet" sections of this Learning Path. Additional dependencies are dynamically loaded at install time by the install script. +Before running the script, ensure you've completed the "Install Go, Sweet, and Benchstat" step. All other dependencies are installed automatically by the setup script. ## Setting up rexec_sweet From 3f3c9142978efe86486f5cca4756e5ead6a7da98 Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Mon, 30 Jun 2025 09:55:06 +0000 Subject: [PATCH 08/19] Updates --- .../go-benchmarking-with-sweet/add_c4_vm.md | 2 +- .../manual_run_benchstat.md | 22 ++++++++++++------- .../rexec_sweet_install.md | 2 +- .../rexec_sweet_run.md | 2 +- 4 files changed, 17 insertions(+), 11 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md index 70b37ecf2e..7815895716 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md @@ -1,5 +1,5 @@ --- -title: Launching a Intel Emerald Rapids Instance +title: Launching an Intel Emerald Rapids Instance weight: 30 ### FIXED, DO NOT MODIFY diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md index 66ff075f26..871477b6fc 100755 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md @@ -1,32 +1,38 @@ --- -title: Manually running benchstat +title: Manually run Benchstat weight: 52 ### FIXED, DO NOT MODIFY layout: learningpathall --- -You've successfully run and downloaded the benchmark results from both your Arm-based and x86-based VMs. In this section, you'll compare them to each other using the benchstat tool. +You've successfully run and downloaded the benchmark results from both your Arm-based and x86-based VMs. In this section, you'll use Benchstat to compare performance between the two instances. -## Inspecting the Results Files +## Inspecting the results files -With the results files downloaded to your local machine, if you're curious to what they look like, you can inspect them to understand better what `benchstat` is analyzing. +If you're curious about the format of the results files, you can open them to better understand what `benchstat` is analyzing. -1. **View raw results:** Open the `c4a.result` file in a text editor, and you'll see something like this: +1. View raw results + +Open the `c4a.result` file in a text editor, and you'll see something like this: ![](images/run_manually/11.png) The file contains the results of the `markdown` benchmark run on the Arm-based c4a VM, showing time and memory stats taken for each iteration. If you open the `c4.result` file, you'll see similar results for the x86-based c4 VM. -2. **Close the editor:** Close the text editor when done. +2. Close the editor + +Close the text editor when done. -## Running Benchstat to Compare Results +## Run Benchstat to compare results To compare the results, you'll use `benchstat` to analyze the two result files you downloaded. Since all the prerequisites are already installed on the `c4` and `c4a` instances, benchstat will be run from one of those instances. -1. **Create working directory:** Make a temporary benchstat directory to hold the results files on either the c4a or c4 instance, and change directory into it: +1. Create working directory + +Make a temporary benchstat directory to hold the results files on either the c4a or c4 instance, and change directory into it: ```bash mkdir benchstat_results diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md index 1c48346da4..7030f3f759 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md @@ -1,5 +1,5 @@ --- -title: Install the automated benchmark and benchstat runner +title: Install the automated benchmark and Benchstat runner weight: 53 ### FIXED, DO NOT MODIFY diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md index 96a7075637..c38f3f3d23 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md @@ -1,5 +1,5 @@ --- -title: Run the automated benchmark and benchstat runner +title: Run the automated benchmark and Benchstat runner weight: 54 ### FIXED, DO NOT MODIFY From b8f7fe2c399e4448d77e2e5ae506446683e25353 Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Mon, 30 Jun 2025 10:50:12 +0000 Subject: [PATCH 09/19] Updates --- .../go-benchmarking-with-sweet/_index.md | 2 +- .../go-benchmarking-with-sweet/add_c4_vm.md | 2 +- .../go-benchmarking-with-sweet/add_c4a_vm.md | 32 +++++++++---------- .../installing_go_and_sweet.md | 9 ++++-- .../manual_run_benchmark.md | 2 +- .../manual_run_benchstat.md | 20 +++++++++--- .../go-benchmarking-with-sweet/overview.md | 10 ++++-- 7 files changed, 47 insertions(+), 30 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md index f043602728..4b2e191e5e 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/_index.md @@ -12,7 +12,7 @@ learning_objectives: prerequisites: - A [Google Cloud account](https://console.cloud.google.com/). This Learning Path can be run on any cloud provider or on-premises, but it focuses on Google Cloud’s Axion Arm64-based instances. - - A local machine with [Google Cloud CLI](/install-guides/gcloud/) installed. + - A local machine with [Google Cloud CLI](/install-guides/gcloud/) installed author: Geremy Cohen diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md index 7815895716..06a2062352 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md @@ -1,5 +1,5 @@ --- -title: Launching an Intel Emerald Rapids Instance +title: Launch an Intel Emerald Rapids Instance weight: 30 ### FIXED, DO NOT MODIFY diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md index 8e27d7a9d4..7dae54c562 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md @@ -1,5 +1,5 @@ --- -title: Launching a Google Axion instance +title: Launch a Google Axion instance weight: 20 ### FIXED, DO NOT MODIFY @@ -7,59 +7,59 @@ layout: learningpathall --- ## Launch an Arm-based c4a-standard-4 instance -In this section, you'll launch the first of two VMs used for benchmarking Go applications: the Arm-based c4a-standard-4 instance on Google Cloud, also referred to as c4a. +In this section, you'll launch the first of the two VMs used for benchmarking Go applications: the Arm-based c4a-standard-4 instance on Google Cloud, also referred to as c4a. -## Creating the c4a-standard-4 instance +## Create the c4a-standard-4 instance -1. **Access Google Cloud console:** Navigate to [https://console.cloud.google.com/welcome](https://console.cloud.google.com/welcome) +To access the Google Cloud console, navigate to [https://console.cloud.google.com/welcome](https://console.cloud.google.com/welcome). -2. **Search for VM instances:** Select the search field. +Now search for VM instances, by using the search field. -3. **Find VM Instances:** Start typing `vm` until the UI auto-completes `VM Instances`, then select it. +To find VM instances, start typing "vm" until the UI auto-completes `VM Instances`, then select it. ![](images/launch_c4a/3.png) The VM Instances page appears. -4. **Create a new instance:** Select **Create instance** +Now create a new instance, by selecting **Create instance**. ![](images/launch_c4a/4.png) The Machine configuration page appears. -5. **Name your instance:** Select the **Name** field, and enter "c4a". +To name your instance, select the **Name** field, and enter "c4a". ![](images/launch_c4a/5.png) -6. **Select machine series:** Scroll down to the Machine series section, and select the C4A radio button. +Now select machine series by scrolling down to the Machine series section, and selecting the C4A radio button. ![](images/launch_c4a/7.png) -7. **View machine types:** Scroll down to the Machine type dropdown, and select it to show all available options. +To view machine types, scroll down to the Machine type dropdown, and select it to show all available options. ![](images/launch_c4a/8.png) -8. **Choose machine size:** Select **c4a-standard-4** under the Standard tab. +Now choose machine size by selecting **c4a-standard-4** under the Standard tab. ![](images/launch_c4a/9.png) -9. **Configure storage:** select the **OS and Storage** tab. +To configure storage, select the **OS and Storage** tab. ![](images/launch_c4a/10.png) -10. **Modify storage settings:** select **Change** +To modify storage settings, select **Change**. ![](images/launch_c4a/11.png) -11. **Set disk size:** Select the **Size (GB)** field, then enter "1000" for the value. +To set disk size, select the **Size (GB)** field, then enter "1000" for the value. ![](images/launch_c4a/16.png) -12. **Confirm storage settings:** Select **Select** to continue. +Now confirm storage settings by selecting **Select** to continue. ![](images/launch_c4a/18.png) -13. **Launch the instance:** select **Create** to bring up the instance. +To launch the instance, select **Create** to bring up the instance. ![](images/launch_c4a/19.png) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md index a0b0a6189d..c747426dae 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/installing_go_and_sweet.md @@ -6,12 +6,15 @@ weight: 40 layout: learningpathall --- -In this section, you'll install Go, Sweet, and Benchstat on both virtual machines. +In this section, you'll install Go, Sweet, and Benchstat on both virtual machines: -Sweet is a Go benchmarking tool that provides a standardized way to run performance tests across systems. Benchstat is a companion tool that compares benchmark results to highlight meaningful performance differences. Together, these tools help you evaluate Go performance on both Arm and x86 architectures. +* Sweet is a Go benchmarking tool that provides a standardized way to run performance tests across systems. +* Benchstat is a companion tool that compares benchmark results to highlight meaningful performance differences. + +Together, these tools help you evaluate Go performance on both Arm and x86 architectures. {{% notice Note %}} -Subsequent steps in the learning path assume you are running this script (installing) from your home directory (`$HOME`), resulting in the creation of a `$HOME/benchmarks/sweet` final install path. If you install to a different directory, update the paths in later steps to match your custom location. +Subsequent steps in this Learning Path assume you are running this script (installing) from your home directory (`$HOME`), resulting in the creation of a `$HOME/benchmarks/sweet` final install path. If you install to a different directory, update the paths in later steps to match your custom location. {{% /notice %}} ## Installation script diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchmark.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchmark.md index d1c6a3b1ac..22c3d9fadc 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchmark.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchmark.md @@ -1,5 +1,5 @@ --- -title: Manually running benchmarks +title: Manually run benchmarks weight: 51 ### FIXED, DO NOT MODIFY diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md index 871477b6fc..41b7931480 100755 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md @@ -39,15 +39,21 @@ Make a temporary benchstat directory to hold the results files on either the c4a cd benchstat_results ``` -2. **Upload result files:** Click the `UPLOAD FILE` button in the GCP console, and upload the `c4a.results` AND `c4.results` files you downloaded earlier. (This uploads them to your home directory, not to the current directory.) +2. Upload result files + +Click the `UPLOAD FILE` button in the GCP console, and upload the `c4a.results` AND `c4.results` files you downloaded earlier. (This uploads them to your home directory, not to the current directory.) ![](images/run_manually/16.png) -3. **Verify upload:** You'll know it worked correctly via the confirmation dialog in your terminal: +3. Verify upload + +You'll know it worked correctly via the confirmation dialog in your terminal: ![](images/run_manually/17.png) -4. **Move files to working directory:** Move the results files to the `benchstat_results` directory, and confirm their presence: +4. Move files to working directory + +Move the results files to the `benchstat_results` directory, and confirm their presence: ```bash mv ~/c4a.results ~/c4.results . @@ -60,7 +66,9 @@ Make a temporary benchstat directory to hold the results files on either the c4a c4.results c4a.results ``` -5. **Run benchstat:** Now you can run `benchstat` to compare the two results files: +5. Run benchstat + +Now you can run `benchstat` to compare the two results files: ```bash export GOPATH=$HOME/go @@ -69,7 +77,9 @@ Make a temporary benchstat directory to hold the results files on either the c4a benchstat c4a.results c4.results > c4a_vs_c4.txt ``` -6. **View comparison results:** Run the `cat` command to view the results: +6. View comparison results + +Run the `cat` command to view the results: ```bash cat c4a_vs_c4.txt diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md index f974104362..e4327d9c18 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/overview.md @@ -18,16 +18,20 @@ You'll gain hands-on experience with: - **Benchstat** - a statistical comparison tool that analyzes benchmark results to identify meaningful performance differences between systems. -Benchmarking is critical for modern software development because it allows you to: +Benchmarking is critical for modern software development because it allows you to do the following: - Quantify the impact of code changes - Compare performance across hardware architectures - Make data-driven decisions about infrastructure - Identify optimization opportunities in your application code -In this Learning Path, you'll compare performance using two four-core GCP instance types: the Intel-based c4-standard-8 and the Arm-based c4a-standard-4. +In this Learning Path, you'll compare performance using two four-core GCP instance types: + +* The Arm-based c4a-standard-4 +* The Intel-based c4-standard-8 {{% notice Note %}} -Arm-based c4a-standard-4 instances and Intel-based c4-standard-8 instances both utilize four cores. Both instances are categorized by GCP as members a series that demonstrates consistent high performance; the main difference between the two is that c4a has 16 GB of RAM, while c4 has 30 GB of RAM. This Learning Path uses equivalent core counts to ensure a fair performance comparison. +Arm-based c4a-standard-4 instances and Intel-based c4-standard-8 instances both utilize four cores. Both instances are categorized by GCP as members a series that demonstrates consistent high performance. +The main difference between the two is that c4a has 16 GB of RAM, while c4 has 30 GB of RAM. This Learning Path uses equivalent core counts to ensure a fair performance comparison. {{% /notice %}} From 387f8871cdd08dc7da6b0603f7003d841fa0d5ef Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Mon, 30 Jun 2025 11:17:33 +0000 Subject: [PATCH 10/19] Updates --- .../go-benchmarking-with-sweet/add_c4_vm.md | 2 +- .../go-benchmarking-with-sweet/add_c4a_vm.md | 26 +++++++------------ 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md index 06a2062352..a08936becd 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md @@ -6,7 +6,7 @@ weight: 30 layout: learningpathall --- -## Section Overview +## Section overview In this section, you will set up the second benchmarking system, an Intel Emerald Rapids `c4-standard-8` instance. ## Creating the Instance diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md index 7dae54c562..b3aaea8594 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md @@ -7,39 +7,33 @@ layout: learningpathall --- ## Launch an Arm-based c4a-standard-4 instance -In this section, you'll launch the first of the two VMs used for benchmarking Go applications: the Arm-based c4a-standard-4 instance on Google Cloud, also referred to as c4a. +In this section, you'll launch the first of two VMs used for benchmarking Go applications: the Arm-based c4a-standard-4 instance on Google Cloud, (referred to as "c4a"). ## Create the c4a-standard-4 instance To access the Google Cloud console, navigate to [https://console.cloud.google.com/welcome](https://console.cloud.google.com/welcome). -Now search for VM instances, by using the search field. - -To find VM instances, start typing "vm" until the UI auto-completes `VM Instances`, then select it. +In the search bar at the top, start typing `vm`, then select **VM Instances** when it appears. ![](images/launch_c4a/3.png) -The VM Instances page appears. - -Now create a new instance, by selecting **Create instance**. + On the **VM Instances** page, click **Create instance**. ![](images/launch_c4a/4.png) -The Machine configuration page appears. - -To name your instance, select the **Name** field, and enter "c4a". + In the **Name** field, enter `c4a`. ![](images/launch_c4a/5.png) -Now select machine series by scrolling down to the Machine series section, and selecting the C4A radio button. +Now select machine series by scrolling down to the Machine series section, and selecting the **C4A** radio button. ![](images/launch_c4a/7.png) -To view machine types, scroll down to the Machine type dropdown, and select it to show all available options. +To view machine types, scroll down to the **Machine type** dropdown, and select it to show all available options. ![](images/launch_c4a/8.png) -Now choose machine size by selecting **c4a-standard-4** under the Standard tab. +Now choose machine size by selecting **c4a-standard-4** under the **Standard** tab. ![](images/launch_c4a/9.png) @@ -51,7 +45,7 @@ To modify storage settings, select **Change**. ![](images/launch_c4a/11.png) -To set disk size, select the **Size (GB)** field, then enter "1000" for the value. +To set disk size, select the **Size (GB)** field and enter "1000" for the value. ![](images/launch_c4a/16.png) @@ -63,7 +57,7 @@ To launch the instance, select **Create** to bring up the instance. ![](images/launch_c4a/19.png) -After a few seconds, your c4a instance starts up, and you are ready to continue to the next section. +After a few seconds, your c4a instance is up and running, and you are ready to continue to the next section. -In the next section, you will launch the second VM, an Intel-based Emerald Rapids c4-standard-8 (abbreviated to c4), which serves as the comparison system for our benchmarking tests. +In the next section, you'll launch the second VM, an Intel-based Emerald Rapids c4-standard-8 (referred to as "c4"), which serves as the comparison system for benchmarking. From 94398226f242474ddd410e1a35e050bed12028a2 Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Mon, 30 Jun 2025 11:33:19 +0000 Subject: [PATCH 11/19] Further updates --- .../go-benchmarking-with-sweet/add_c4_vm.md | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md index a08936becd..d0b472411d 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md @@ -1,32 +1,34 @@ --- -title: Launch an Intel Emerald Rapids Instance +title: Launch an Intel Emerald Rapids instance weight: 30 ### FIXED, DO NOT MODIFY layout: learningpathall --- -## Section overview +## Launch an x86-based `c4-standard-8` instance In this section, you will set up the second benchmarking system, an Intel Emerald Rapids `c4-standard-8` instance. ## Creating the Instance -To create the second system, follow the previous lesson's c4a install instructions, but make the following changes: +Follow the same steps from the previous section where you launched the c4a instance, but make the following changes for the Intel-based c4-standard-8: -1. **Name your instance:** For the `Name` field, enter "c4". +* Where you name the instance, in the **Name** field, enter "c4". -2. **Select machine series:** Scroll down to the Machine series section, and select the C4 radio button. +* Where you select the machine series, scroll down and select the C4 radio button. ![](images/launch_c4/3.png) -3. **View machine types:** Scroll down to the Machine type dropdown, and click it to show all available options. +For the machine types section, scroll down to the **Machine type** dropdown, and click it to show all available options. ![](images/launch_c4/4.png) -4. **Choose machine size:** Select "c4-standard-8" under the Standard tab. +Where you choose machine size, select "c4-standard-8" under the **Standard** tab. ![](images/launch_c4/5.png) -{{% notice Note %}} Don't forget to set the disk size for this c4 to 1000GB under the "OS and Storage" tab like you did for the c4a.{{% /notice %}} +{{% notice Note %}} +Be sure to set the disk size to **1000 GB** in the **OS and Storage** tab, just as you did for the `c4a` instance. +{{% /notice %}} After the c4 instance starts up, you are ready to continue to the next section, where you'll install the benchmarking software. From 136b42740c10fc9502c2e045aa4b3e088658eb27 Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Mon, 30 Jun 2025 13:13:48 +0000 Subject: [PATCH 12/19] Marking up images --- .../go-benchmarking-with-sweet/add_c4_vm.md | 23 ++++++++--------- .../go-benchmarking-with-sweet/add_c4a_vm.md | 25 +++++++++---------- 2 files changed, 22 insertions(+), 26 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md index d0b472411d..f7b9ad3a57 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4_vm.md @@ -1,31 +1,28 @@ --- -title: Launch an Intel Emerald Rapids instance +title: Launch an Intel Emerald Rapids c4-standard-8 instance weight: 30 ### FIXED, DO NOT MODIFY layout: learningpathall --- -## Launch an x86-based `c4-standard-8` instance -In this section, you will set up the second benchmarking system, an Intel Emerald Rapids `c4-standard-8` instance. +In this section, you'll set up the second benchmarking system: an Intel-based Emerald Rapids `c4-standard-8` instance on Google Cloud (referred to as **c4**). -## Creating the Instance +## Create the c4-standard-8 instance Follow the same steps from the previous section where you launched the c4a instance, but make the following changes for the Intel-based c4-standard-8: -* Where you name the instance, in the **Name** field, enter "c4". +* In the **Name** field, enter "c4". +* In the **Machine types for common workloads** section, select the **c4** radio button. +![alt-text#center](images/launch_c4/3.png "Select the c4 radio button") -* Where you select the machine series, scroll down and select the C4 radio button. +* In the **Machine configuration** section, open the dropdown select `c4-standard-8`. -![](images/launch_c4/3.png) +![alt-text#center](images/launch_c4/4.png "Open the dropdown and select `c4-standard-8`") -For the machine types section, scroll down to the **Machine type** dropdown, and click it to show all available options. +* In the **Machine type** section, open the dropdown and select `c4-standard-8` under the **Standard** tab. -![](images/launch_c4/4.png) - -Where you choose machine size, select "c4-standard-8" under the **Standard** tab. - -![](images/launch_c4/5.png) +![alt-text#center](images/launch_c4/5.png "Select `c4-standard-8`") {{% notice Note %}} Be sure to set the disk size to **1000 GB** in the **OS and Storage** tab, just as you did for the `c4a` instance. diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md index b3aaea8594..e0ebbb6883 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md @@ -1,12 +1,11 @@ --- -title: Launch a Google Axion instance +title: Launch an Arm-based c4a-standard-4 instance weight: 20 ### FIXED, DO NOT MODIFY layout: learningpathall --- -## Launch an Arm-based c4a-standard-4 instance In this section, you'll launch the first of two VMs used for benchmarking Go applications: the Arm-based c4a-standard-4 instance on Google Cloud, (referred to as "c4a"). ## Create the c4a-standard-4 instance @@ -15,47 +14,47 @@ To access the Google Cloud console, navigate to [https://console.cloud.google.co In the search bar at the top, start typing `vm`, then select **VM Instances** when it appears. -![](images/launch_c4a/3.png) +![alt-text#center](images/launch_c4a/3.png) On the **VM Instances** page, click **Create instance**. -![](images/launch_c4a/4.png) +![alt-text#center](images/launch_c4a/4.png) In the **Name** field, enter `c4a`. -![](images/launch_c4a/5.png) +![alt-text#center](images/launch_c4a/5.png) Now select machine series by scrolling down to the Machine series section, and selecting the **C4A** radio button. -![](images/launch_c4a/7.png) +![alt-text#center](images/launch_c4a/7.png) To view machine types, scroll down to the **Machine type** dropdown, and select it to show all available options. -![](images/launch_c4a/8.png) +![alt-text#center](images/launch_c4a/8.png) Now choose machine size by selecting **c4a-standard-4** under the **Standard** tab. -![](images/launch_c4a/9.png) +![alt-text#center](images/launch_c4a/9.png) To configure storage, select the **OS and Storage** tab. -![](images/launch_c4a/10.png) +![alt-text#center](images/launch_c4a/10.png) To modify storage settings, select **Change**. -![](images/launch_c4a/11.png) +![alt-text#center](images/launch_c4a/11.png) To set disk size, select the **Size (GB)** field and enter "1000" for the value. -![](images/launch_c4a/16.png) +![alt-text#center](images/launch_c4a/16.png) Now confirm storage settings by selecting **Select** to continue. -![](images/launch_c4a/18.png) +![alt-text#center](images/launch_c4a/18.png) To launch the instance, select **Create** to bring up the instance. -![](images/launch_c4a/19.png) +![alt-text#center](images/launch_c4a/19.png) After a few seconds, your c4a instance is up and running, and you are ready to continue to the next section. From 1bac686b4e387ccc40e402e5c85ee5d07e242084 Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Mon, 30 Jun 2025 14:08:03 +0000 Subject: [PATCH 13/19] Updates --- .../go-benchmarking-with-sweet/add_c4a_vm.md | 10 +++++----- .../manual_run_benchstat.md | 12 +++++------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md index e0ebbb6883..5428f22691 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md @@ -10,17 +10,17 @@ In this section, you'll launch the first of two VMs used for benchmarking Go app ## Create the c4a-standard-4 instance -To access the Google Cloud console, navigate to [https://console.cloud.google.com/welcome](https://console.cloud.google.com/welcome). +Go to the Google Cloud console: [https://console.cloud.google.com/welcome](https://console.cloud.google.com/welcome). -In the search bar at the top, start typing `vm`, then select **VM Instances** when it appears. +In the search bar at the top, start typing `vm`, then select **VM instances** when it appears. -![alt-text#center](images/launch_c4a/3.png) +![alt-text#center](images/launch_c4a/3.png "Select VM instances") - On the **VM Instances** page, click **Create instance**. + On the **VM instances** page, click **Create instance**. ![alt-text#center](images/launch_c4a/4.png) - In the **Name** field, enter `c4a`. + In the **Name** field, enter the name of the instance - here it should be `c4a`. ![alt-text#center](images/launch_c4a/5.png) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md index 41b7931480..2d9bf2e7d9 100755 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md @@ -11,13 +11,11 @@ You've successfully run and downloaded the benchmark results from both your Arm- ## Inspecting the results files -If you're curious about the format of the results files, you can open them to better understand what `benchstat` is analyzing. +You can open the result files to see the raw benchmark output that Benchstat analyzes. -1. View raw results +Open the `c4a.result` file in a text editor. You should see something like this: -Open the `c4a.result` file in a text editor, and you'll see something like this: - - ![](images/run_manually/11.png) + ![alt-text#center](images/run_manually/11.png) The file contains the results of the `markdown` benchmark run on the Arm-based c4a VM, showing time and memory stats taken for each iteration. If you open the `c4.result` file, you'll see similar results for the x86-based c4 VM. @@ -43,13 +41,13 @@ Make a temporary benchstat directory to hold the results files on either the c4a Click the `UPLOAD FILE` button in the GCP console, and upload the `c4a.results` AND `c4.results` files you downloaded earlier. (This uploads them to your home directory, not to the current directory.) - ![](images/run_manually/16.png) + ![alt-text#center](images/run_manually/16.png) 3. Verify upload You'll know it worked correctly via the confirmation dialog in your terminal: - ![](images/run_manually/17.png) + ![alt-text#center](images/run_manually/17.png) 4. Move files to working directory From 40c6ff8b50a95c1162596998604e4ed11ba78301 Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Mon, 30 Jun 2025 16:09:28 +0000 Subject: [PATCH 14/19] Updates --- .../go-benchmarking-with-sweet/add_c4a_vm.md | 8 +++--- .../rexec_sweet_run.md | 26 +++++++++---------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md index 5428f22691..86f1972d3e 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md @@ -24,7 +24,7 @@ In the search bar at the top, start typing `vm`, then select **VM instances** wh ![alt-text#center](images/launch_c4a/5.png) -Now select machine series by scrolling down to the Machine series section, and selecting the **C4A** radio button. +Now select the machine series by scrolling down to the Machine series section, and selecting the **C4A** radio button. ![alt-text#center](images/launch_c4a/7.png) @@ -46,15 +46,15 @@ To modify storage settings, select **Change**. To set disk size, select the **Size (GB)** field and enter "1000" for the value. -![alt-text#center](images/launch_c4a/16.png) +![alt-text#center](images/launch_c4a/16.png "Enter value in the Size (GB) field") Now confirm storage settings by selecting **Select** to continue. -![alt-text#center](images/launch_c4a/18.png) +![alt-text#center](images/launch_c4a/18.png "Confirm the selection of settings with the Select button") To launch the instance, select **Create** to bring up the instance. -![alt-text#center](images/launch_c4a/19.png) +![alt-text#center](images/launch_c4a/19.png "Select the Create button to launch the instance") After a few seconds, your c4a instance is up and running, and you are ready to continue to the next section. diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md index c38f3f3d23..f0c1b0ac56 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md @@ -7,15 +7,16 @@ layout: learningpathall --- With `rexec_sweet` installed, your benchmarking instances running, and your local machine authenticated with GCP, you're ready to run automated benchmarks across your configured environments. -## Run an automated benchmark and analysis +## Run an automated benchmark and generate results -1. **Run the script:** Execute the `rexec_sweet` script from your local terminal: +To begin, open a terminal on your local machine and run: ```bash rexec_sweet ``` +The script will prompt you to choose a benchmark. -2. **Select a benchmark:** When prompted, press **Enter** to run the default benchmark (markdown), recommended for your first run. +Press **Enter** to run the default benchmark, markdown, which is a good starting point for your first run. ```bash Available benchmarks: @@ -32,7 +33,7 @@ Available benchmarks: Enter number (1-10) [default: markdown]: ``` -3. **Select instances:** The script will proceed and call into GCP to detect all running VMs. You should see the script output: +The script then detects your running GCP instances and displays them. You’ll be asked whether you want to use the first two instances it finds and the default install paths. ```output Available instances: @@ -41,12 +42,9 @@ Available instances: Do you want to run the first two instances found with default install directories? [Y/n]: ``` +You can accept the defaults by pressing **Enter**, which uses the instances listed and assumes Go and Sweet were installed to ~/benchmarks/sweet. -4. **Choose your configuration:** You have two options: - - - **Use default settings:** If you want to run benchmarks on the instances labeled with "will be used as nth instance", and you installed Go and Sweet into the default path (~/benchmarks/sweet), you can press **Enter** to accept the defaults. - - - **Custom configuration:** If you are running more than two instances, and the script doesn't suggest the correct two to autorun, or you installed Go and Sweet to non-default folders, select "n" and press **Enter**. The script will then prompt you to select the instances and runtime paths. +If you're running more than two instances or installed Go and Sweet to a non-default location, enter n and follow the prompts to manually select instances and specify custom install paths. In this example, you'll manually select the instances and paths as shown below: @@ -87,15 +85,17 @@ Running benchmarks on the selected instances... Report generated in results/c4-c4a-markdown-20250610T190407 ``` -5. **View the report:** Once on your local machine, `rexec_sweet` will generate an HTML report that will open automatically in your web browser. +### View the report + +Once on your local machine, `rexec_sweet` will generate an HTML report that will open automatically in your web browser. - If you close the report, you can reopen it by navigating to the results subdirectory and opening report.html in your browser. + If you close the report, you can reopen it by navigating to the `results` subdirectory and opening report.html in your browser. -![](images/run_auto/2.png) +![alt-text#center](images/run_auto/2.png "Sample HTML report") {{% notice Note %}} If you see output messages from `rexec_sweet.py` similar to "geomeans may not be comparable" or "Dn: ratios must be >0 to compute geomean", this is expected and can be ignored. These messages indicate that the benchmark sets differ between the two VMs, which is common when running benchmarks on different hardware or configurations. {{% /notice %}} -6. **Analyze results:** Upon completion, the script will generate a report in the `results` subdirectory of the current working directory of the `rexec_sweet.py` script, which opens automatically in your web browser to view the benchmark results and comparisons. +Upon completion, the script generates a report in the `results` subdirectory of the current working directory of the `rexec_sweet.py` script, which opens automatically in your web browser to view the benchmark results and comparisons. From e18c61ad4e05d36c118f3aeb1640ac6b73d141ef Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Mon, 30 Jun 2025 16:24:39 +0000 Subject: [PATCH 15/19] Updates --- .../go-benchmarking-with-sweet/add_c4a_vm.md | 6 ++--- .../rexec_sweet_install.md | 22 +++++++++++++------ .../rexec_sweet_run.md | 6 ++--- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md index 86f1972d3e..a0777b35ba 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md @@ -18,15 +18,15 @@ In the search bar at the top, start typing `vm`, then select **VM instances** wh On the **VM instances** page, click **Create instance**. -![alt-text#center](images/launch_c4a/4.png) +![alt-text#center](images/launch_c4a/4.png "Select Create instance") In the **Name** field, enter the name of the instance - here it should be `c4a`. -![alt-text#center](images/launch_c4a/5.png) +![alt-text#center](images/launch_c4a/5.png "Enter name of the instance") Now select the machine series by scrolling down to the Machine series section, and selecting the **C4A** radio button. -![alt-text#center](images/launch_c4a/7.png) +![alt-text#center](images/launch_c4a/7.png "Select C4A radio button") To view machine types, scroll down to the **Machine type** dropdown, and select it to show all available options. diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md index 7030f3f759..c3092b629d 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md @@ -6,9 +6,9 @@ weight: 53 layout: learningpathall --- -In the last section, you learned how to run benchmarks and benchstat manually. Now you'll learn how to run them automatically, with enhanced visualization of the results. +In the last section, you learned how to run benchmarks and Benchstat manually. Now you'll automate that process and generate visual reports using a script called `rexec_sweet.py`. -## Introducing rexec_sweet.py +## What is rexec_sweet.py? `rexec_sweet.py` is a script that automates the benchmarking workflow: it connects to your GCP instances, runs benchmarks, collects results, and generates HTML reports - all in one step. @@ -20,23 +20,29 @@ It provides several key benefits: Before running the script, ensure you've completed the "Install Go, Sweet, and Benchstat" step. All other dependencies are installed automatically by the setup script. -## Setting up rexec_sweet +## Set up rexec_sweet -1. **Create a working directory:** On your local machine, open a terminal, then create and change into a directory to store the `rexec_sweet.py` script and related files: +### Create a working directory + +On your local machine, open a terminal, then create and change into a directory to store the `rexec_sweet.py` script and related files: ```bash mkdir rexec_sweet cd rexec_sweet ``` -2. **Clone the repository:** Get the `rexec_sweet.py` script from the GitHub repository: +### Clone the repository + +Get the `rexec_sweet.py` script from the GitHub repository: ```bash git clone https://github.com/geremyCohen/go_benchmarks.git cd go_benchmarks ``` -3. **Run the installer:** Copy and paste this command into your terminal to run the installer: +### Run the installer + +Copy and paste this command into your terminal to run the installer: ```bash ./install.sh @@ -51,7 +57,9 @@ Before running the script, ensure you've completed the "Install Go, Sweet, and B If you see this prompt, enter `N` to continue with the installation without modifying the existing installed dependencies. -4. **Verify VM status:** Make sure the GCP VM instances you created in the previous section are running. If not, start them now, and wait a few minutes for them to finish booting. +### Verify VM status + +Make sure the GCP VM instances you created in the previous section are running. If not, start them now, and wait a few minutes for them to finish booting. {{% notice Note %}} The install script prompts you to authenticate with Google Cloud Platform (GCP) using the gcloud command-line tool at the end of install. If after installing you have issues running the script and/or get GCP authentication errors, you can manually authenticate with GCP by running the following command: `gcloud auth login` diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md index f0c1b0ac56..67be9e6393 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_run.md @@ -72,7 +72,7 @@ Output directory: /private/tmp/a/go_benchmarks/results/c4-c4a-markdown-20250610T After selecting instances and paths, the script will: - Run the selected benchmark on both VMs - - Run `benchstat` to compare the results + - Use `benchstat` to compare the results - Push the results to your local machine ```output @@ -87,7 +87,7 @@ Report generated in results/c4-c4a-markdown-20250610T190407 ### View the report -Once on your local machine, `rexec_sweet` will generate an HTML report that will open automatically in your web browser. +Once on your local machine, `rexec_sweet` will generate an HTML report that opens automatically in your web browser. If you close the report, you can reopen it by navigating to the `results` subdirectory and opening report.html in your browser. @@ -95,7 +95,7 @@ Once on your local machine, `rexec_sweet` will generate an HTML report that will {{% notice Note %}} -If you see output messages from `rexec_sweet.py` similar to "geomeans may not be comparable" or "Dn: ratios must be >0 to compute geomean", this is expected and can be ignored. These messages indicate that the benchmark sets differ between the two VMs, which is common when running benchmarks on different hardware or configurations. +If you see output messages from `rexec_sweet.py` similar to "geomeans may not be comparable" or "Dn: ratios must be >0 to compute geomean", this is expected and can be ignored. These warnings typically appear when benchmark sets differ slightly between the two VMs. {{% /notice %}} Upon completion, the script generates a report in the `results` subdirectory of the current working directory of the `rexec_sweet.py` script, which opens automatically in your web browser to view the benchmark results and comparisons. From 2a12f5a9e537e167b45e35d01241f3ba58ef4856 Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Mon, 30 Jun 2025 16:28:20 +0000 Subject: [PATCH 16/19] Image callouts --- .../go-benchmarking-with-sweet/add_c4a_vm.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md index a0777b35ba..e05e7222e9 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/add_c4a_vm.md @@ -30,19 +30,19 @@ Now select the machine series by scrolling down to the Machine series section, a To view machine types, scroll down to the **Machine type** dropdown, and select it to show all available options. -![alt-text#center](images/launch_c4a/8.png) +![alt-text#center](images/launch_c4a/8.png "Select Machine type dropdown") Now choose machine size by selecting **c4a-standard-4** under the **Standard** tab. -![alt-text#center](images/launch_c4a/9.png) +![alt-text#center](images/launch_c4a/9.png "Select machine size") To configure storage, select the **OS and Storage** tab. -![alt-text#center](images/launch_c4a/10.png) +![alt-text#center](images/launch_c4a/10.png "Configure storage") To modify storage settings, select **Change**. -![alt-text#center](images/launch_c4a/11.png) +![alt-text#center](images/launch_c4a/11.png "Modify storage settings") To set disk size, select the **Size (GB)** field and enter "1000" for the value. From 33536bce2b0eabeb523e759acc39d8beb04e8ddb Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Mon, 30 Jun 2025 17:55:55 +0000 Subject: [PATCH 17/19] Updates --- .../manual_run_benchmark.md | 30 ++++++++++++------- .../running_benchmarks.md | 10 +++---- 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchmark.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchmark.md index 22c3d9fadc..c98ac9273c 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchmark.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchmark.md @@ -6,29 +6,39 @@ weight: 51 layout: learningpathall --- -In this section, you'll download the results of the benchmark you ran manually in the previous sections from each VM. You will use these results to understand how `sweet` and `benchstat` work together. +In this section, you'll download the benchmark results you ran manually in previous steps from each VM. You will use these results to understand how `sweet` and `benchstat` work together. ## Download benchmark results from each VM -Let's walk through the steps to manually download the sweet benchmark results from your initial run on each VM. +Start by retrieving the results generated by Sweet from your earlier benchmark runs. -1. **Locate results:** Change directory to the `results/markdown` directory and list the files to see the `arm-benchmarks.result` file: +### Locate results + +Change directory to the `results/markdown` directory and list the files to see the `arm-benchmarks.result` file: ```bash cd results/markdown ls -d $PWD/* ``` -2. **Copy result path:** Copy the absolute pathname of `arm-benchmarks.result`. +### Copy result path + +Copy the absolute pathname of `arm-benchmarks.result`. You'll need this to initiate the download. + +### Download results + +Select `DOWNLOAD FILE` in your GCP terminal interface. Paste the absolute pathname you copied into the dialog and confirm the download. This downloads the benchmark results to your local machine. + + ![alt-text#center](images/run_manually/6.png "Download the results") -3. **Download results:** Click `DOWNLOAD FILE`, and paste the **ABSOLUTE PATHNAME** you just copied for the filename, and then click `Download`. This downloads the benchmark results to your local machine. +### Rename the file - ![](images/run_manually/6.png) +After downloading the file to your local machine, rename it to `c4a.result` to distinguish it from the x86 results you'll download next. This naming convention helps you clearly identify which architecture each result came from. You'll know the download was successful if you see the file named `c4a.result` in your Downloads folder and receive a confirmation in your browser. -4. **Rename the file:** Once downloaded, on your local machine, rename this file to `c4a.result` so you can distinguish it from the x86 results you'll download later. This naming convention helps you clearly identify which results came from which architecture. You'll know the file downloaded successfully if you see the file in your Downloads directory with the name `c4a.result`, as well as the confirmation dialog in your browser: + ![alt-text#center](images/run_manually/7.png "A successful download") - ![](images/run_manually/7.png) +### Repeat for the second VM -5. **Repeat for c4 instance:** Repeat steps 2-8 with your `c4` (x86) instance. Do everything the same, except after downloading the c4's `arm-benchmarks.result` file, rename it to `c4.result`. +Repeat the same process with your c4 (x86) VM. Use the same results/markdown directory and download the `arm-benchmarks.result` file. This time, rename it to `c4.result` after downloading. -Now that you have the results from both VMs, in the next section, you'll learn how to use benchstat to analyze these results and understand the performance differences between the two architectures. +Now that you have the results from both VMs, in the next section, you'll learn how to use Benchstat to analyze these results and understand the performance differences between the two architectures. diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/running_benchmarks.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/running_benchmarks.md index 68c8d0ac4b..8687181e66 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/running_benchmarks.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/running_benchmarks.md @@ -6,13 +6,13 @@ weight: 50 layout: learningpathall --- -Now that setup is complete, it's important to understand the available benchmarks and the performance metrics you'll be analyzing in order to compare system performance effectively. +Now that setup is complete, it's important to understand the benchmarks you’ll run and the performance metrics you’ll use to evaluate results across systems. ## Available benchmarks Whether running manually or automatically, the benchmarking process consists of two main steps: -1. **Running benchmarks with Sweet**: `sweet` executes the benchmarks on each VM, generating raw performance data +1. **Running benchmarks with Sweet**: `sweet` executes the benchmarks on each VM, generating raw performance data. 2. **Analyzing results with Benchstat**: `benchstat` compares the results from different VMs to identify performance differences. Benchstat can output results in text format (default) or CSV format. The text format provides a human-readable tabular view, while CSV allows for further processing with other tools. @@ -98,11 +98,11 @@ Large gaps between average and peak memory usage suggest opportunities for memor Here are some general tips to keep in mind as you explore benchmarking across different apps and instance types: -- Unlike Intel and AMD processors that use hyper-threading, Arm processors provide single-threaded cores without hyper-threading. A four-core Arm processor has four independent cores running four threads, while an four-core Intel processor provides eight logical cores through hyper-threading. This means each Arm vCPU represents a full physical core, while each Intel/AMD vCPU represents half a physical core. For fair comparison, this learning path uses a 4-vCPU Arm instance against an 8-vCPU Intel instance. When scaling up instance sizes during benchmarking, make sure to keep a 2:1 Intel/AMD:Arm vCPU ratio if you wish to keep parity on CPU resources. +- Unlike Intel and AMD processors that use hyper-threading, Arm processors provide single-threaded cores without hyper-threading. A four-core Arm processor has four independent cores running four threads, while a four-core Intel processor provides eight logical cores through hyper-threading. This means that each Arm vCPU represents a full physical core, while each Intel/AMD vCPU represents half a physical core. For fair comparison, this Learning Path uses a 4-vCPU Arm instance against an 8-vCPU Intel instance. When scaling up instance sizes during benchmarking, make sure to keep a 2:1 Intel/AMD:Arm vCPU ratio if you wish to keep parity on CPU resources. -- It's suggested to run each benchmark at least 10 times (specified via the `count` parameter) to handle outlier/errant runs and ensure statistical significance. +- Run each benchmark at least 10 times (-count 10) to account for outliers and produce statistically meaningful results. -- Results may be bound by CPU, memory, or I/O performance. If you see significant differences in one metric but not others, it may indicate a bottleneck in that area; running the same benchmark with different configurations (e.g., more CPU cores, more memory) can help identify the bottleneck. +- Results can be bound by CPU, memory, or I/O performance. If you see significant differences in one metric but not others, it might indicate a bottleneck in that area; running the same benchmark with different configurations (for example, using more CPU cores or more memory) can help identify the bottleneck. From 833b83054bb5e6ab40f4ced3c122780622a6f269 Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Mon, 30 Jun 2025 18:28:06 +0000 Subject: [PATCH 18/19] Updates --- .../manual_run_benchstat.md | 20 +++++++++---------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md index 2d9bf2e7d9..2028fd866f 100755 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md @@ -9,9 +9,9 @@ layout: learningpathall You've successfully run and downloaded the benchmark results from both your Arm-based and x86-based VMs. In this section, you'll use Benchstat to compare performance between the two instances. -## Inspecting the results files +## Inspect the results files -You can open the result files to see the raw benchmark output that Benchstat analyzes. +To understand what Benchstat analyzes, open the result files to view the raw benchmark output. Open the `c4a.result` file in a text editor. You should see something like this: @@ -19,16 +19,14 @@ Open the `c4a.result` file in a text editor. You should see something like this: The file contains the results of the `markdown` benchmark run on the Arm-based c4a VM, showing time and memory stats taken for each iteration. If you open the `c4.result` file, you'll see similar results for the x86-based c4 VM. -2. Close the editor - Close the text editor when done. ## Run Benchstat to compare results -To compare the results, you'll use `benchstat` to analyze the two result files you downloaded. Since all the prerequisites are already installed on the `c4` and `c4a` instances, benchstat will be run from one of those instances. +To compare the results, you'll now use Benchstat to analyze the two result files you downloaded. Since all the prerequisites are already installed on the `c4` and `c4a` instances, Benchstat will be run from one of those instances. -1. Create working directory +### Create working directory Make a temporary benchstat directory to hold the results files on either the c4a or c4 instance, and change directory into it: @@ -37,7 +35,7 @@ Make a temporary benchstat directory to hold the results files on either the c4a cd benchstat_results ``` -2. Upload result files +### Upload result files Click the `UPLOAD FILE` button in the GCP console, and upload the `c4a.results` AND `c4.results` files you downloaded earlier. (This uploads them to your home directory, not to the current directory.) @@ -49,7 +47,7 @@ You'll know it worked correctly via the confirmation dialog in your terminal: ![alt-text#center](images/run_manually/17.png) -4. Move files to working directory +### Move files to working directory Move the results files to the `benchstat_results` directory, and confirm their presence: @@ -64,7 +62,7 @@ Move the results files to the `benchstat_results` directory, and confirm their p c4.results c4a.results ``` -5. Run benchstat +### Run benchstat Now you can run `benchstat` to compare the two results files: @@ -75,7 +73,7 @@ Now you can run `benchstat` to compare the two results files: benchstat c4a.results c4.results > c4a_vs_c4.txt ``` -6. View comparison results +### View comparison results Run the `cat` command to view the results: @@ -128,7 +126,7 @@ Run the `cat` command to view the results: In this example, you can see that the c4a (Arm) instance completed the markdown benchmark in 143.9m seconds, while the c4 (x86) instance took 158.3m seconds, indicating better performance on the Arm system for this particular workload. - If you wanted the results in CSV format, you could run the `benchstat` command with the `-format csv` option instead. + If you want the results in CSV format, you can run the `benchstat` command with the `-format csv` option instead. At this point, you can download the `c4a_vs_c4.txt` for further analysis or reporting. You can also run the same or different benchmarks with the same, or different combinations of VMs, and continue comparing results using `benchstat`. From 9656b5753b64094ea248682304975220beae0a04 Mon Sep 17 00:00:00 2001 From: Maddy Underwood Date: Mon, 30 Jun 2025 18:35:34 +0000 Subject: [PATCH 19/19] Final --- .../manual_run_benchstat.md | 12 ++++++------ .../rexec_sweet_install.md | 2 ++ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md index 2028fd866f..d3949feeb3 100755 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/manual_run_benchstat.md @@ -11,11 +11,11 @@ You've successfully run and downloaded the benchmark results from both your Arm- ## Inspect the results files -To understand what Benchstat analyzes, open the result files to view the raw benchmark output. +To understand what Benchstat analyzes, open the results files to view the raw benchmark output. Open the `c4a.result` file in a text editor. You should see something like this: - ![alt-text#center](images/run_manually/11.png) + ![alt-text#center](images/run_manually/11.png "A results file") The file contains the results of the `markdown` benchmark run on the Arm-based c4a VM, showing time and memory stats taken for each iteration. If you open the `c4.result` file, you'll see similar results for the x86-based c4 VM. @@ -35,17 +35,17 @@ Make a temporary benchstat directory to hold the results files on either the c4a cd benchstat_results ``` -### Upload result files +### Upload results files Click the `UPLOAD FILE` button in the GCP console, and upload the `c4a.results` AND `c4.results` files you downloaded earlier. (This uploads them to your home directory, not to the current directory.) - ![alt-text#center](images/run_manually/16.png) + ![alt-text#center](images/run_manually/16.png "Upload results file") -3. Verify upload +### Verify upload You'll know it worked correctly via the confirmation dialog in your terminal: - ![alt-text#center](images/run_manually/17.png) + ![alt-text#center](images/run_manually/17.png "Confirmation dialog in terminal") ### Move files to working directory diff --git a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md index c3092b629d..62302bea62 100644 --- a/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md +++ b/content/learning-paths/servers-and-cloud-computing/go-benchmarking-with-sweet/rexec_sweet_install.md @@ -22,6 +22,8 @@ Before running the script, ensure you've completed the "Install Go, Sweet, and B ## Set up rexec_sweet +Follow the steps below to set up rexec_sweet.py. + ### Create a working directory On your local machine, open a terminal, then create and change into a directory to store the `rexec_sweet.py` script and related files: