diff --git a/.gitignore b/.gitignore
index 03157b502..2e3e6bd9e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -39,6 +39,7 @@ fpga_utils.c
*.a
*.ko
fio
+.coverage
!SDAccel/aws_platform/xilinx_aws-vu9p-f1_1ddr-xpr-2pr_4_0/sw/lib/x86_64/libxilinxopencl.so
!SDAccel/aws_platform/xilinx_aws-vu9p-f1_4ddr-xpr-2pr_4_0/sw/lib/x86_64/libxilinxopencl.so
@@ -139,3 +140,8 @@ slurm*.out
# RTD Builds
docs-rtd/build/*
+
+# HLx files
+*.tmp
+**/example_projects/
+*.pb
diff --git a/.gitmodules b/.gitmodules
index 3c052d8f8..567d43422 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -6,4 +6,7 @@
path = hdk/common/ip
url = https://github.com/aws/aws-fpga-resources.git
ignore = dirty
- branch = Vivado_2025.1-hdk/common/ip
+[submodule "hdk/common/shell_stable/hlx"]
+ path = hdk/common/shell_stable/hlx
+ url = https://github.com/aws/aws-fpga-resources.git
+ ignore = dirty
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
index e88e65ea8..834a74686 100644
--- a/.readthedocs.yaml
+++ b/.readthedocs.yaml
@@ -1,9 +1,13 @@
version: 2
build:
- os: ubuntu-20.04
+ os: ubuntu-24.04
tools:
- python: "3.10"
+ python: "3.12"
+ nodejs: "22"
+ rust: "1.82"
+ golang: "1.23"
+
python:
install:
@@ -11,3 +15,26 @@ python:
sphinx:
configuration: docs-rtd/source/conf.py
+ builder: html
+ fail_on_warning: true
+
+search:
+ ranking:
+ index.html: 10
+ all-links.html: 7
+ User-Guide-AWS-EC2-FPGA-Development-Kit.html: 9
+ hdk/README.html: 9
+ hdk/cl/CHECKLIST-BEFORE-BUILDING-CL.html: 8
+ hdk/cl/examples/cl-dram-hbm-dma/README.html: 8
+ hdk/cl/examples/cl-mem-perf/README.html: 8
+ hdk/cl/examples/cl-sde/README.html: 8
+ hdk/cl/examples/CL-TEMPLATE/README.html: 8
+ hdk/docs/AWS-CLI-FPGA-Commands.html: 8
+ hdk/docs/AWS-Shell-Interface-Specification.html: 8
+ hdk/docs/List-AFI-on-Marketplace.html: 8
+ sdk/README.html: 9
+ sdk/apps/msix-interrupts/README.html: 8
+ sdk/apps/virtual-ethernet/README.html: 8
+ sdk/docs/F2-Software-Performance-Optimization-Guide.html: 8
+ vitis/README.html: 9
+ developer-resources/Amazon-DCV-Setup-Guide.html: 8
diff --git a/ERRATA.md b/ERRATA.md
index 00da20a9a..2a79171a1 100644
--- a/ERRATA.md
+++ b/ERRATA.md
@@ -41,6 +41,10 @@ Shell errata is [documented here](./hdk/docs/AWS_Shell_ERRATA.md)
9. Vivado 2025.1 introduces a `set_property DONT_TOUCH` to the HBM model that makes meeting
timing difficult in the implementation stage. AMD has responded to this issue on their AR, stating that it will be fixed in a future version of Vivado. [See here for more details](https://adaptivesupport.amd.com/s/article/000038502?language=en_US&t=1754923887312). All HDK CL examples have been updated to address this issue. Customers should follow this AR when creating their own designs.
+## HLx
+
+1. When executing the `aws::make_ipi` command in Vivado to set up the HLx IPI environment, the AWS IP instance may default to the name `f1_inst`. This is a known Vivado behavior and can be safely ignored. Users can rename this instance according to their preference.
+
## SDK
1. The following fpga_mgmt flags are not supported for F2:
@@ -74,3 +78,7 @@ timing difficult in the implementation stage. AMD has responded to this issue on
2. Support for Vitis 2024.1 and 2024.2 accelerator binary creation and AFI creation is not supported, but will be released at a later time.
3. Support for Vitis software emulation has been deprecated by AMD, therefore, no longer supported.
+
+## Amazon DCV
+
+1. Amazon DCV does not support Rocky Linux 8.10 at this time.
diff --git a/Jenkinsfile b/Jenkinsfile
deleted file mode 100644
index 46198bfaa..000000000
--- a/Jenkinsfile
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env groovy
-
-// basic declarative pipeline
-
-pipeline {
- agent {
- label "f1"
- }
- stages {
- stage('build') {
- steps {
- sh 'shared/tests/jenkins.sh'
- }
- }
- }
- post {
- always {
- archiveArtifacts artifacts: 'logs/*', fingerprint: true
- }
- }
-}
\ No newline at end of file
diff --git a/README.md b/README.md
index 635c67676..d2555e7d6 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-
+
# AWS F2
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md
index 4d380c2c0..a4df175fd 100644
--- a/RELEASE_NOTES.md
+++ b/RELEASE_NOTES.md
@@ -1,5 +1,15 @@
# F2 Developer Kit Release Notes
+## v2.2.1
+
+* [Release of FPGA Developer AMI 1.18.0 (Rocky Linux 8.10)](http://aws.amazon.com/marketplace/pp/prodview-7mukkbz7l2uvu) with Vivado/Vitis 2025.1 tools pre-installed
+* [Release of Vivado HLx flow](./User_Guide_AWS_EC2_FPGA_Development_Kit.md#development-environments)
+* Fixed TCL glob expression to properly read both .sv and .v files. Credit to @pyz-creeper and @dsw for this update!
+* Updated error codes in create-fpga-image for unsupported design logic
+* Updated the Virtual Ethernet Application to write the DMA buffer descriptors using the byte alignment required by the CL_SDE example, preventing data alignment errors on Rocky
+* [Added Amazon FPGA Image (AFI) creation Python script](./hdk/README.md#step-6-submit-generated-dcp-for-afi-creation)
+* Updated XRT version which includes stability fixes for Vitis
+
## v2.2.0
* Release of Vivado/Vitis 2025.1 Tools on [FPGA Developer AMI 1.18.0 (Ubuntu)](http://aws.amazon.com/marketplace/pp/prodview-tcl7sjgreh6bq)
diff --git a/User_Guide_AWS_EC2_FPGA_Development_Kit.md b/User_Guide_AWS_EC2_FPGA_Development_Kit.md
index a71dd0a05..5603767b8 100644
--- a/User_Guide_AWS_EC2_FPGA_Development_Kit.md
+++ b/User_Guide_AWS_EC2_FPGA_Development_Kit.md
@@ -25,15 +25,15 @@ This documentation is relevant to F2 only. Therefore, it applies to all branches
### Instance Types
-
+
### 2nd Generation On-Cloud FPGA Accelerator Card
-
+
### Comparison to F1
-
+
## AWS EC2 F2 FPGA Development Kit
@@ -43,10 +43,11 @@ This table lists the F2 development flows currently enabled and supported in the
| Development Environment | Description | Accelerator Language | Hardware Interface | Debug Options | Typical Developer |
| ------------------------|-------------|----------------------|--------------------|---------------|-------------------|
-| Hardware accelerator development using Vivado (HDK) | This environment supports the Hardware Development Kit (HDK) design flow, which empowers FPGA developers to create accelerator designs from scratch, using HDL source code and IPs.
The AMD Vivado tool synthesizes, implements, and generates the Design Check Point (DCP) file used in F2 AFI creation. AWS FPGA developers benefit from the suite of scripts supplied in the HDK that help to automate different design steps. This allows for flexibility in architecting, implementing, and optimizing accelerator designs while using the HDK.| Verilog/SystemVerilog/VHDL | User-implemented DMA engine or Streaming Data Engine (SDE) | Simulation | Hardware developers with advanced FPGA experience |
+| Hardware accelerator development using Vivado (HDK) | This environment supports the Hardware Development Kit (HDK) design flow, which empowers FPGA developers to create accelerator designs from scratch, using HDL source code and IPs.
The AMD Vivado tool synthesizes, implements, and generates the Design Check Point (DCP) file used in F2 AFI creation. AWS FPGA developers benefit from the suite of scripts supplied in the HDK that help to automate different design steps. This allows for flexibility in architecting, implementing, and optimizing accelerator designs while using the HDK.| Verilog/SystemVerilog/VHDL | User-implemented DMA engine or Streaming Data Engine (SDE) | Simulation, Virtual JTAG | Hardware developers with advanced FPGA experience |
| Hardware accelerator development using Vitis | This environment supports the Vitis design flow, which enables software developers to write C++ code, which may then be compiled into RTL and used in cycle-accurate hardware simulation. After it may then be built into an accelerator design. This step is not necessary, but is encouraged. Vitis may also be used to implement accelerator designs from scratch, using HDL and IPs directly, similar to Vivado. Vitis offers additional analysis tools to aid in the refinement of designs. | Verilog/System Verilog/VHDL | XDMA Engine (coming soon) | Hardware Emulation | Advanced software developers or hardware developers with intermediate to advanced FPGA experience |
+| Hardware accelerator development using Vivado IP Integrator (IPI) and High Level Design (HLx) | This environment supports the Vivado high-level design flow using IP integrator in the GUI. | Block Design in IP Integrator | AWS IP for HLx | Simulation, Virtual JTAG | Hardware developers with intermediate FPGA experience |
-On-premise environment: Customers can set up a on-premise development environment. See the [supported AMD tool versions here.](#hardware-development-kit-hdk) Refer to this guide [here](./hdk/docs/on_premise_licensing_help.md) for licensing requirements.
+On-premise environment: Customers can set up a [on-premise development (with licensing requirements listed)](./hdk/docs/on_premise_licensing_help.md) environment for [supported AMD tool versions.](#hardware-development-kit-hdk).
### Quick Start Links
@@ -116,12 +117,48 @@ On-premise environment: Customers can set up a on-premise development environmen
Design Source |
- | Testbench |
+ Testbench |
| Runtime Software |
-
+
+ | HLx |
+ hello_world_hlx |
+ Demonstrates simple register peek and poke using GPIO and VLED |
+ Vivado IPI Setup Guide |
+ Design Spec |
+
+
+ | Testbench |
+
+
+ | Runtime Software |
+
+
+ | hello_world_mb_hlx |
+ Demonstrates integrating MicroBlaze soft processor in HLx design |
+ |
+ Design Spec |
+
+
+ | Testbench |
+
+
+ | Runtime Software |
+
+
+ | cl_ipi_cdma_test_hlx |
+ Demonstrates direct memory access to the DDR and HBM in AWS IP |
+ |
+ Design Spec |
+
+
+ | Testbench |
+
+
+ | Runtime Software |
+
### AWS Shells
@@ -178,8 +215,9 @@ To get started, please see the [README for a hello world accelerator example](./
A free-to-use FPGA developer AMI is available for on-cloud F2 development with AMD tools pre-installed on a variety of AWS EC2 instance types. Customers can use this AMI to design, simulate, and build their designs. The table below lists the FPGA Developer AMI(s) currently released to customers:
-| FPGA Developer AMI Version | FPGA Developer AMI ID | Vivado/Vitis Version Supported | Operating System Version |
-|----------------------------|-----------------------|--------------------------------|-----------------------------|
+| FPGA Developer AMI Version | FPGA Developer AMI ID (us-east-1) | Vivado/Vitis Version Supported | Operating System Version |
+|----------------------------|-----------------------------------|--------------------------------|-----------------------------|
+| 1.18.0 | [ami-04b57de2833b499b1](http://aws.amazon.com/marketplace/pp/prodview-7mukkbz7l2uvu) | 2025.1 | Rocky Linux 8.10 (4.18.0-553.36.1.el8_10.x86_64)|
| 1.18.0 | [ami-098b2ed4c92602975](http://aws.amazon.com/marketplace/pp/prodview-tcl7sjgreh6bq) | 2025.1 | Ubuntu 24.04 (kernel 6.8.0-1021-aws)|
| 1.16.1 | [ami-092fc5deb8f3c0f7d](https://aws.amazon.com/marketplace/pp/prodview-f5kjsenkfkz5u) | 2024.1 | Ubuntu 20.04.6 (kernel 5.15)|
@@ -193,13 +231,15 @@ If you have never used AWS before, we recommend you start with [AWS getting star
## Next Steps
-Before you create your own AWS FPGA design, we recommend that you go through the [step-by-step quickstart guide for customer hardware development](./hdk/README.md).
+Once developers are familiar with the F2 development kit and the HDK development environment, we recommend exploring all the design features and examples offered in the AWS EC2 FPGA Development Kit:
+
+1. **Get Started**: Follow the [step-by-step quickstart guide for customer hardware development](./hdk/README.md) before creating your own AWS FPGA design
+2. **Learn by Example**: Explore [CL examples](./hdk/README.md#cl-examples) to understand shell-to-CL connectivity, memory interfaces (DDR & HBM), and the [CL clock generation block](./hdk/docs/AWS_CLK_GEN_spec.md)
+
+ - [Run RTL simulations](./hdk/docs/RTL_Simulation_Guide_for_HDK_Design_Flow.md) for design verification of existing CL examples.
+ - Review the [AWS F2 Shell-CL interfaces](./hdk/docs/AWS_Shell_Interface_Specification.md), e.g. [the HBM monitor interface](./hdk/docs/AWS_Shell_Interface_Specification.md#hbm-monitor-interface)
-Once developers are familiar with the F2 development kit and the HDK development environment, we recommend exploring the following contents to master all the design features and examples offered in the AWS EC2 FPGA Development Kit:
+3. **Create Your Design**: Use the [CL_TEMPLATE](./hdk/cl/examples/CL_TEMPLATE/README.md) example as a starting point
-- [Run RTL simulations](./hdk/docs/RTL_Simulation_Guide_for_HDK_Design_Flow.md) provided in CL examples to learn the design verification setup in the HDK development environment.
-- Familiarize with the [AWS F2 Shell-CL interfaces](./hdk/docs/AWS_Shell_Interface_Specification.md), e.g. [the HBM monitor interface](./hdk/docs/AWS_Shell_Interface_Specification.md/#hbm-monitor-interface)
-- Familiarize with the [shell floorplan](./hdk/docs/shell_floorplan.md) and locations of major shell interfaces.
-- Deep dive into [CL examples](./hdk/README.md#cl-examples) to explore shell-to-CL connectivity, CL resources e.g. DDR and HBM, and features e.g. [CL clock generation block](./hdk/docs/AWS_CLK_GEN_spec.md).
-- Create a custom CL design using the [CL_TEMPLATE](./hdk/cl/examples/CL_TEMPLATE/README.md) example.
-- Connect to a custom CL design in FPGA through [Virtual JTAG](./hdk/docs/Virtual_JTAG_XVC.md) to run hardware debug.
+ - Review the [shell floorplan](./hdk/docs/shell_floorplan.md) and locations of major shell interfaces.
+ - Connect to debug cores within a custom FPGA CL design through the [Virtual JTAG](./hdk/docs/Virtual_JTAG_XVC.md) interface to debug hardware issues.
diff --git a/developer_resources/Amazon_DCV_Setup_Guide.md b/developer_resources/Amazon_DCV_Setup_Guide.md
index cf1921849..91af36210 100644
--- a/developer_resources/Amazon_DCV_Setup_Guide.md
+++ b/developer_resources/Amazon_DCV_Setup_Guide.md
@@ -32,7 +32,7 @@ graphical user interface (GUI) to visualize FPGA development in the cloud.
### Prerequisites
1. [Instance and IAM Configuration for DCV Licensing](https://docs.aws.amazon.com/dcv/latest/adminguide/setting-up-license.html#dcv-lic-req)
-2. [Depenency Installation](https://docs.aws.amazon.com/dcv/latest/adminguide/setting-up-installing-linux-prereq.html#linux-prereq-gui)
+2. [Dependency Installation](https://docs.aws.amazon.com/dcv/latest/adminguide/setting-up-installing-linux-prereq.html#linux-prereq-gui)
- :warning: DO NOT PERFORM STEP 3! Upgrading may impact the stability of development kit software!
3. [Protocol Setup](https://docs.aws.amazon.com/dcv/latest/adminguide/setting-up-installing-linux-prereq.html#linux-prereq-wayland)
4. [Driver Installation and Setting Virtual Display Resolution](https://docs.aws.amazon.com/dcv/latest/adminguide/setting-up-installing-linux-prereq.html#linux-prereq-nongpu)
diff --git a/docs-rtd/Makefile b/docs-rtd/Makefile
index ae305be65..6f28d6f85 100644
--- a/docs-rtd/Makefile
+++ b/docs-rtd/Makefile
@@ -154,7 +154,16 @@ doctest:
"results in $(BUILDDIR)/doctest/output.txt."
spelling:
- $(SPHINXBUILD) -b spelling source/ build/
+ $(SPHINXBUILD) -b spelling source/ build/ && \
+ echo "Spelling check complete" >&2 && \
+ if [ -n "$$(find build -name "*.spelling" 2>/dev/null)" ]; then \
+ find build -name "*.spelling" -exec cat {} \; | \
+ sed 's/^\(.*\.rst\):\([0-9]*\): (\([^)]*\)).*/- \3 (in \1, line \2)/' | \
+ sort | uniq >&2 && \
+ echo "Spelling errors found. Total misspelled words: $$(find build -name "*.spelling" -exec cat {} \; | wc -l)" >&2; \
+ else \
+ echo "No spelling errors found." >&2; \
+ fi
links:
make clean && make html && python3 ../shared/bin/check_doc_links.py
diff --git a/docs-rtd/requirements.txt b/docs-rtd/requirements.txt
index 6409c0f12..118cd4cbc 100644
--- a/docs-rtd/requirements.txt
+++ b/docs-rtd/requirements.txt
@@ -1,13 +1,92 @@
-sphinx>=6.1.0,<9.0.0
-sphinx_book_theme>=1.1.3,<2.0.0
-sphinx-sitemap==2.6.0
-
-sphinxcontrib-spelling==7.7.0
+#
+# This file is autogenerated by pip-compile with Python 3.12
+# by the following command:
+#
+# pip-compile --output-file=requirements.txt requirements.in
+#
+accessible-pygments==0.0.5
+ # via pydata-sphinx-theme
+alabaster==0.7.16
+ # via sphinx
+babel==2.17.0
+ # via
+ # pydata-sphinx-theme
+ # sphinx
+beautifulsoup4==4.13.5
+ # via pydata-sphinx-theme
+certifi==2025.8.3
+ # via requests
+charset-normalizer==3.4.3
+ # via requests
+docutils==0.19
+ # via
+ # -r requirements.in
+ # pydata-sphinx-theme
+ # sphinx
+idna==3.10
+ # via requests
+imagesize==1.4.1
+ # via sphinx
+jinja2==3.1.6
+ # via sphinx
+markupsafe==3.0.2
+ # via jinja2
+packaging==25.0
+ # via
+ # pydata-sphinx-theme
+ # sphinx
+pydata-sphinx-theme==0.15.4
+ # via sphinx-book-theme
pyenchant==3.2.2
-docutils>=0.18,<0.21
-
-sphinx-copybutton>=0.5.2
-
-termcolor==2.5.0
-requests<3.0.0
-
+ # via
+ # -r requirements.in
+ # sphinxcontrib-spelling
+pygments==2.19.2
+ # via
+ # accessible-pygments
+ # pydata-sphinx-theme
+ # sphinx
+requests==2.32.5
+ # via
+ # sphinx
+ # sphinxcontrib-spelling
+snowballstemmer==3.0.1
+ # via sphinx
+soupsieve==2.8
+ # via beautifulsoup4
+sphinx==5.3.0
+ # via
+ # -r requirements.in
+ # pydata-sphinx-theme
+ # sphinx-book-theme
+ # sphinx-copybutton
+ # sphinx-last-updated-by-git
+ # sphinxcontrib-spelling
+sphinx-book-theme==1.1.3
+ # via -r requirements.in
+sphinx-copybutton==0.5.2
+ # via -r requirements.in
+sphinx-last-updated-by-git==0.3.8
+ # via sphinx-sitemap
+sphinx-sitemap==2.8.0
+ # via -r requirements.in
+sphinxcontrib-applehelp==2.0.0
+ # via sphinx
+sphinxcontrib-devhelp==2.0.0
+ # via sphinx
+sphinxcontrib-htmlhelp==2.1.0
+ # via sphinx
+sphinxcontrib-jsmath==1.0.1
+ # via sphinx
+sphinxcontrib-qthelp==2.0.0
+ # via sphinx
+sphinxcontrib-serializinghtml==2.0.0
+ # via sphinx
+sphinxcontrib-spelling==8.0.1
+ # via -r requirements.in
+typing-extensions==4.15.0
+ # via
+ # beautifulsoup4
+ # pydata-sphinx-theme
+urllib3==2.5.0
+ # via requests
diff --git a/docs-rtd/source/ERRATA.rst b/docs-rtd/source/ERRATA.rst
index d58f2959c..8c9ecdb1f 100644
--- a/docs-rtd/source/ERRATA.rst
+++ b/docs-rtd/source/ERRATA.rst
@@ -115,4 +115,10 @@ Software defined Accelerator Development (Vitis)
3. Support for Vitis software emulation has been deprecated by AMD, therefore, no longer supported.
+Amazon DCV
+----------
+
+1. Amazon DCV does not support Rocky Linux 8.10 at this time.
+
+
`Back to Home <./index.html>`__
diff --git a/docs-rtd/source/RELEASE-NOTES.rst b/docs-rtd/source/RELEASE-NOTES.rst
index 1c2e92e39..ee19d4a41 100644
--- a/docs-rtd/source/RELEASE-NOTES.rst
+++ b/docs-rtd/source/RELEASE-NOTES.rst
@@ -1,6 +1,17 @@
F2 Developer Kit Release Notes
==============================
+v2.2.1
+------
+
+- `Release of FPGA Developer AMI 1.18.0 (Rocky Linux 8.10) `__ with Vivado/Vitis 2025.1 tools pre-installed
+- `Release of Vivado HLx flow <./User-Guide-AWS-EC2-FPGA-Development-Kit.html#development-environments>`__
+- Fixed TCL glob expression to properly read both .sv and .v files. Credit to @pyz-creeper and @dsw for this update!
+- Updated error codes in create-fpga-image for unsupported design logic
+- Updated the Virtual Ethernet Application to write the DMA buffer descriptors using the byte alignment required by the CL_SDE example, preventing data alignment errors on Rocky
+- `Added Amazon FPGA Image (AFI) creation Python script <./hdk/README.html#step-6-submit-generated-dcp-for-afi-creation>`__
+- Updated XRT version which includes stability fixes for Vitis
+
.. _v220:
v2.2.0
diff --git a/docs-rtd/source/User-Guide-AWS-EC2-FPGA-Development-Kit.rst b/docs-rtd/source/User-Guide-AWS-EC2-FPGA-Development-Kit.rst
index 79aa9cb80..728b4eda6 100644
--- a/docs-rtd/source/User-Guide-AWS-EC2-FPGA-Development-Kit.rst
+++ b/docs-rtd/source/User-Guide-AWS-EC2-FPGA-Development-Kit.rst
@@ -64,7 +64,7 @@ Instance Types
Second-Generation On-Cloud FPGA Accelerator Card
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-|image1|
+|accel_card_specs|
.. _comparison-to-f1:
@@ -76,7 +76,7 @@ Comparison to F1
AWS EC2 F2 FPGA Development Kit
-------------------------------
-.. _development-environments-user-guide:
+.. _development-environments:
Development Environments
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -106,7 +106,7 @@ supported in the development kit.
using the HDK.
- Verilog/System Verilog/VHDL
- User-implemented DMA engine or Streaming Data Engine (SDE)
- - Simulation
+ - Simulation and Virtual JTAG
- Hardware developers with advanced FPGA experience
* - Hardware accelerator development using Vitis
- This environment supports the Vitis design flow,
@@ -124,6 +124,14 @@ supported in the development kit.
- Hardware Emulation
- Advanced software developers or hardware developers
with intermediate to advanced FPGA experiences
+ * - Hardware accelerator development using Vivado IP Integrator (IPI) and
+ High Level Design (HLx)
+ - This environment supports the Vivado high-level design flow using IP
+ integrator in the GUI.
+ - Block Design in IP Integrator
+ - AWS IP for HLx
+ - Simulation and Virtual JTAG
+ - Hardware developers with intermediate FPGA experience
On-premise environment: Customers can set up a on-premise development
environment. See the `supported AMD tool versions here. <#hardware-development-kit-hdk>`__ Refer to
@@ -218,13 +226,57 @@ Quick Start Links
-
-
-
- - `Testbench `__
+ - `Testbench `__
* -
-
-
-
- `Runtime Software `__
-
+ * - HLx
+ - `hello_world_hlx `__
+ - Demonstrates simple register peek and poke using GPIO and VLED
+ - `Vivado IPI Setup Guide <./hdk/docs/IPI-GUI-Vivado-Setup.html>`__
+ - `Design Spec <./hdk/cl/examples/hello-world-hlx/README.html>`__
+ * -
+ -
+ -
+ -
+ - `Testbench `__
+ * -
+ -
+ -
+ -
+ - `Runtime Software `__
+ * -
+ - `hello_world_mb_hlx `__
+ - Demonstrates integrating MicroBlaze soft processor in HLx design
+ -
+ - `Design Spec <./hdk/cl/examples/hello-world-mb-hlx/README.html>`__
+ * -
+ -
+ -
+ -
+ - `Testbench `__
+ * -
+ -
+ -
+ -
+ - `Runtime Software `__
+ * -
+ - `cl_ipi_cdma_test_hlx `__
+ - Demonstrates direct memory access to the DDR and HBM in AWS IP
+ -
+ - `Design Spec <./hdk/cl/examples/cl-ipi-cdma-test-hlx/README.html>`__
+ * -
+ -
+ -
+ -
+ - `Testbench `__
+ * -
+ -
+ -
+ -
+ - `Runtime Software `__
.. _aws-shells:
@@ -357,6 +409,10 @@ currently released to customers:
- FPGA Developer AMI ID
- Vivado/Vitis Version Supported
- Operating System Version
+ * - 1.18.0
+ - `ami-04b57de2833b499b1 `__
+ - 2025.1
+ - Rocky Linux 8.10 (4.18.0-553.36.1.el8_10.x86_64)
* - 1.18.0
- `ami-098b2ed4c92602975 `__
- 2025.1
@@ -369,10 +425,10 @@ currently released to customers:
Given the large size of the FPGA used for F2, AMD tools work best with
at least 4 vCPU’s and 32GiB Memory. We recommend `Compute Optimized and
Memory Optimized instance
-types `__ to successfully
+types `__ to successfully
run the synthesis of acceleration code. Developers may start coding and
run simulations on low-cost `General Purpose instances
-types `__.
+types `__.
Note that the tools used by the HDK are only supported on x86-based EC2
instances (Graviton-based instances are not compatible with the tools).
@@ -422,8 +478,8 @@ FPGA Development Kit:
- Connect to a custom CL design in FPGA through `Virtual
JTAG <./hdk/docs/Virtual-JTAG-XVC.html>`__ to run hardware debug.
-.. |f2_instances| image:: ./_static/instance_sizes_20250110.png
-.. |image1| image:: ./_static/accel_card_specs_20250110.png
-.. |f2_f1_comp| image:: ./_static/f2_f1_comp_20250110.png
+.. |f2_instances| image:: ./_static/instance_sizes.png
+.. |accel_card_specs| image:: ./_static/accel_card_specs.png
+.. |f2_f1_comp| image:: ./_static/f2_f1_comp.png
`Back to Home <./index.html>`__
diff --git a/docs-rtd/source/_static/accel_card_specs.png b/docs-rtd/source/_static/accel_card_specs.png
new file mode 100644
index 000000000..6ec7cd570
Binary files /dev/null and b/docs-rtd/source/_static/accel_card_specs.png differ
diff --git a/docs-rtd/source/_static/cl_ipi_cdma_test_hlx_images/cl_ipi_cdma_test_hlx.png b/docs-rtd/source/_static/cl_ipi_cdma_test_hlx_images/cl_ipi_cdma_test_hlx.png
new file mode 100644
index 000000000..88c295c6d
Binary files /dev/null and b/docs-rtd/source/_static/cl_ipi_cdma_test_hlx_images/cl_ipi_cdma_test_hlx.png differ
diff --git a/docs-rtd/source/_static/hello_world_hlx_images/hello_world_hlx.png b/docs-rtd/source/_static/hello_world_hlx_images/hello_world_hlx.png
new file mode 100644
index 000000000..43c951bc8
Binary files /dev/null and b/docs-rtd/source/_static/hello_world_hlx_images/hello_world_hlx.png differ
diff --git a/docs-rtd/source/_static/hello_world_mb_hlx_images/hello_world_mb_hlx.png b/docs-rtd/source/_static/hello_world_mb_hlx_images/hello_world_mb_hlx.png
new file mode 100644
index 000000000..85e725786
Binary files /dev/null and b/docs-rtd/source/_static/hello_world_mb_hlx_images/hello_world_mb_hlx.png differ
diff --git a/docs-rtd/source/_static/hlx_images/aws_ip_clocks.png b/docs-rtd/source/_static/hlx_images/aws_ip_clocks.png
new file mode 100644
index 000000000..fbe33045d
Binary files /dev/null and b/docs-rtd/source/_static/hlx_images/aws_ip_clocks.png differ
diff --git a/docs-rtd/source/_static/hlx_images/aws_ip_ids.png b/docs-rtd/source/_static/hlx_images/aws_ip_ids.png
new file mode 100644
index 000000000..cd05cbb84
Binary files /dev/null and b/docs-rtd/source/_static/hlx_images/aws_ip_ids.png differ
diff --git a/docs-rtd/source/_static/hlx_images/aws_ip_interfaces.png b/docs-rtd/source/_static/hlx_images/aws_ip_interfaces.png
new file mode 100644
index 000000000..d77fafea7
Binary files /dev/null and b/docs-rtd/source/_static/hlx_images/aws_ip_interfaces.png differ
diff --git a/docs-rtd/source/_static/hlx_images/ipi_mod_ref.png b/docs-rtd/source/_static/hlx_images/ipi_mod_ref.png
new file mode 100644
index 000000000..43c951bc8
Binary files /dev/null and b/docs-rtd/source/_static/hlx_images/ipi_mod_ref.png differ
diff --git a/docs-rtd/source/_static/hlx_images/vivado_gui.png b/docs-rtd/source/_static/hlx_images/vivado_gui.png
new file mode 100644
index 000000000..2e55cec0d
Binary files /dev/null and b/docs-rtd/source/_static/hlx_images/vivado_gui.png differ
diff --git a/docs-rtd/source/all-links.rst b/docs-rtd/source/all-links.rst
index 4d4f9ae62..03c8fe2a5 100644
--- a/docs-rtd/source/all-links.rst
+++ b/docs-rtd/source/all-links.rst
@@ -1,3 +1,5 @@
+:orphan:
+
All Documents by Section
========================
diff --git a/docs-rtd/source/conf.py b/docs-rtd/source/conf.py
index e8b899f04..868d0d1bb 100644
--- a/docs-rtd/source/conf.py
+++ b/docs-rtd/source/conf.py
@@ -28,48 +28,54 @@
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
- 'sphinx_sitemap',
- 'sphinx.ext.autodoc',
- 'sphinx.ext.doctest',
- 'sphinx.ext.intersphinx',
- 'sphinx.ext.todo',
- 'sphinx.ext.coverage',
- 'sphinx.ext.mathjax',
- 'sphinx.ext.ifconfig',
- 'sphinx.ext.viewcode',
- 'sphinx.ext.imgmath',
- 'sphinx.ext.extlinks',
- 'sphinxcontrib.spelling',
- 'sphinx_copybutton',
- 'sphinx_book_theme',
+ "sphinx_sitemap",
+ "sphinx.ext.autodoc",
+ "sphinx.ext.doctest",
+ "sphinx.ext.intersphinx",
+ "sphinx.ext.todo",
+ "sphinx.ext.coverage",
+ "sphinx.ext.mathjax",
+ "sphinx.ext.ifconfig",
+ "sphinx.ext.viewcode",
+ "sphinx.ext.imgmath",
+ "sphinx.ext.extlinks",
+ "sphinxcontrib.spelling",
+ "sphinx_copybutton",
+ "sphinx_book_theme",
]
# Makes the spelling filters visible.
-sys.path.insert(0, os.path.abspath('./spelling_filters'))
+sys.path.insert(0, os.path.abspath("./spelling_filters"))
from hex_filter import HexFilter
from ordinal_filter import OrdinalFilter
from rtl_hex_filter import RTLHexFilter
+from username_filter import UsernameFilter
spelling_ignore_acronyms = True
-spelling_filters = ['hex_filter.HexFilter', 'rtl_hex_filter.RTLHexFilter', 'ordinal_filter.OrdinalFilter']
+spelling_filters = [
+ "hex_filter.HexFilter",
+ "rtl_hex_filter.RTLHexFilter",
+ "ordinal_filter.OrdinalFilter",
+ "username_filter.UsernameFilter",
+]
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
# The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
-master_doc = 'index'
+master_doc = "index"
# General information about the project.
-author = 'the AWS F2 Team'
-project = 'AWS F2'
-copyright = '2024-2025, Amazon, Inc'
+author = "the AWS F2 Team"
+project = "AWS F2"
+copyright = "2024-2025, Amazon, Inc"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -112,7 +118,7 @@
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
@@ -124,30 +130,30 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = 'sphinx_book_theme'
+html_theme = "sphinx_book_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
-html_baseurl = 'https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/'
+html_baseurl = "https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/"
-sitemap_filename = 'sitemap.xml'
+sitemap_filename = "sitemap.xml"
html_context = {
# ...
- 'default_mode': 'light',
- 'author': 'the AWS F2 Team',
+ "default_mode": "light",
+ "author": "the AWS F2 Team",
}
html_theme_options = {
- 'repository_url': 'https://github.com/aws/aws-fpga',
- 'use_issues_button': True,
- 'use_repository_button': True,
- 'use_download_button': True,
- 'use_fullscreen_button': True,
- 'use_edit_page_button': True,
- 'repository_branch': 'f2',
+ "repository_url": "https://github.com/aws/aws-fpga",
+ "use_issues_button": True,
+ "use_repository_button": True,
+ "use_download_button": True,
+ "use_fullscreen_button": True,
+ "use_edit_page_button": True,
+ "repository_branch": "f2",
# "navbar_persistent": [],
}
@@ -158,7 +164,7 @@
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
# NOTE: This guy controls the little title header on the left side of the page with the search bar, etc.
-html_title = 'AWS F2 Documentation'
+html_title = "AWS F2 Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
@@ -175,8 +181,8 @@
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-html_css_files = ['css/custom.css']
+html_static_path = ["_static"]
+html_css_files = ["css/custom.css"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
@@ -219,7 +225,7 @@
# html_file_suffix = None
# Output file base name for HTML help builder.
-htmlhelp_basename = 'F2doc'
+htmlhelp_basename = "F2doc"
# Don't copy prompts in code blocks
copybutton_prompt_text = "$ "
diff --git a/docs-rtd/source/developer-resources/Amazon-DCV-Setup-Guide.rst b/docs-rtd/source/developer-resources/Amazon-DCV-Setup-Guide.rst
index 2c899501e..576bcd9eb 100644
--- a/docs-rtd/source/developer-resources/Amazon-DCV-Setup-Guide.rst
+++ b/docs-rtd/source/developer-resources/Amazon-DCV-Setup-Guide.rst
@@ -50,8 +50,7 @@ Prerequisites
~~~~~~~~~~~~~
1. `Instance and IAM Configuration for DCV Licensing `__
-2. `Depenency
- Installation `__
+2. `Dependency Installation `__
- ⚠️ DO NOT PERFORM STEP 3! Upgrading may impact the stability of
development kit software!
diff --git a/docs-rtd/source/hdk/README.rst b/docs-rtd/source/hdk/README.rst
index 77730980c..dc832688e 100644
--- a/docs-rtd/source/hdk/README.rst
+++ b/docs-rtd/source/hdk/README.rst
@@ -11,6 +11,8 @@ Table of Contents
- `HDK Overview <#hdk-overview>`__
- `Getting Started <#getting-started-hdk>`__
+ - `Quick Start Example: Host-to-FPGA Communication via the OCL Interface <#quick-start-example-host-to-fpga-communication-ocl>`__
+
- `Build Accelerator AFI using HDK Design
Flow <#build-accelerator-afi-using-hdk-design-flow>`__
@@ -70,6 +72,27 @@ Logic Region (SLR) of the FPGA to developers.
Getting Started
---------------
+.. _quick-start-example-host-to-fpga-communication-ocl:
+
+Quick Start HW/SW Example: Host-to-FPGA Communication via the OCL Interface
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+### Quick Start HW/SW Example: Host-to-FPGA Communication via PCIe-to-AXI OCL Interface
+
+The `test_aws_clk_gen.c software runtime example `__
+utilizes the `OCL AXI interface <./docs/AWS-Shell-Interface-Specification.html>`__ to program
+the `AWS Clock Generation IP <./docs/AWS-CLK-GEN-spec.html>`__ within the CL_MEM_PERF AFI.
+
+The example can be run by following the steps in the following documentation references:
+
+1. Build and ingest the `CL_MEM_PERF <./cl/examples/cl-mem-perf/README.html>`__ example by following the [Build Accelerator AFI using HDK Design Flow](#build-accelerator-afi-using-hdk-design-flow) section below
+
+2. `Load the AGFI <#step-7-load-accelerator-afi-on-f2-instance>`__ generated by the ``create-fpga-image`` command
+
+3. Follow the `CL_MEM_PERF software runtime compilation instructions <./cl/examples/cl-mem-perf/README.html#software>`__ and execute ``./test_aws_clk_gen``
+
+.. _build-accelerator-afi-using-hdk-design-flow:
+
Build Accelerator AFI using HDK Design Flow
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -288,11 +311,12 @@ The output of this command includes two identifiers for your AFI:
export DCP_TARBALL_NAME=$(basename ${DCP_TARBALL_TO_INGEST})
export CL_DESIGN_NAME=''
- export CL_DESIGN_DESCRIPTION='Description of ${CL_DESIGN_NAME}'
+ export CL_DESIGN_DESCRIPTION="Description of ${CL_DESIGN_NAME}"
# Call AWS CLI ingestion command
aws ec2 create-fpga-image --name ${CL_DESIGN_NAME} --description "${CL_DESIGN_DESCRIPTION}" --input-storage-location Bucket=${DCP_BUCKET_NAME},Key=${DCP_FOLDER_NAME}/${DCP_TARBALL_NAME} --logs-storage-location Bucket=${LOGS_BUCKET_NAME},Key=${LOGS_FOLDER_NAME}/ --region ${REGION}
+ # expected response format:
{
"FpgaImageId": "afi-09953582f46c45b17",
"FpgaImageGlobalId": "agfi-0925b211f5a81b071"
@@ -345,8 +369,8 @@ Step 7. Load Accelerator AFI on F2 Instance
Now that your AFI is available, it can be tested on an F2 instance. The
instance can be launched using any preferred AMI, private or public,
-from the AWS Marketplace catalog. AWS recommends using AMIs with Ubuntu
-20.04 and kernel version 5.15.
+from the AWS EC2 AMI Catalog. AWS recommends using AMIs with `similar
+OS and kernel versions <../User-Guide-AWS-EC2-FPGA-Development-Kit.html#fpga-developer-ami>`__ to those of our developer AMIs.
Now you need to install the FPGA Management tools by sourcing the
``sdk_setup.sh`` script:
@@ -814,5 +838,9 @@ Additional HDK Documentation
docs/Supported-DDR-Modes
docs/Virtual-JTAG-XVC
docs/XDMA-Install
+ docs/IPI-GUI-Vivado-Setup
+ docs/IPI-GUI-AWS-IP
+ docs/IPI-GUI-Examples
+ docs/IPI-GUI-Flows
`Back to Home <../index.html>`__
diff --git a/docs-rtd/source/hdk/cl/CHECKLIST-BEFORE-BUILDING-CL.rst b/docs-rtd/source/hdk/cl/CHECKLIST-BEFORE-BUILDING-CL.rst
index c8b0ae416..dfcf4bc54 100644
--- a/docs-rtd/source/hdk/cl/CHECKLIST-BEFORE-BUILDING-CL.rst
+++ b/docs-rtd/source/hdk/cl/CHECKLIST-BEFORE-BUILDING-CL.rst
@@ -17,4 +17,4 @@ building a CL Design Check Point (DCP) file for AFI generation.
4. Update the timing and placement constraints under
``$CL_DIR/build/constraints`` for your design specific changes.
-`Back to HDK README <../index.html>`__
+`Back to HDK README <../README.html>`__
diff --git a/docs-rtd/source/hdk/cl/examples/cl-ipi-cdma-test-hlx/README.rst b/docs-rtd/source/hdk/cl/examples/cl-ipi-cdma-test-hlx/README.rst
new file mode 100644
index 000000000..da1f6452e
--- /dev/null
+++ b/docs-rtd/source/hdk/cl/examples/cl-ipi-cdma-test-hlx/README.rst
@@ -0,0 +1,36 @@
+HLx Flow for CDMA Test IP Integrator Example
+============================================
+
+Table of Contents
+-----------------
+
+- `HLx Flow for CDMA Test IP Integrator
+ Example <#hlx-flow-for-cdma-test-ip-integrator-example>`__
+
+ - `Table of Contents <#table-of-contents>`__
+ - `Overview <#overview>`__
+ - `Building and Testing Example <#building-and-testing-example>`__
+
+Overview
+--------
+
+This example design exercises the following data interfaces:
+
+- AXIL_OCL: Polls the AXI GPIO to which the DDR and HBM calibration done
+ signals are connected
+- AXI_PCIS: Writes 1K data pattern to DDR source buffer
+- AXIL_OCL: Configures AXI CDMA for 1K DMA transfer from DDR to HBM and polls
+ AXI CDMA status register to determine transfer completion
+- AXI_PCIS: Reads 1K from HBM destination buffer and compares against
+ original data pattern
+
+|block-diagram|
+
+Building and Testing Example
+----------------------------
+
+Follow the common design steps specified in the `IPI example design flow
+document <./../../../docs/IPI-GUI-Flows.html>`__ to build and test this
+example on F2 instances.
+
+.. |block-diagram| image:: ../../../../_static/cl_ipi_cdma_test_hlx_images/cl_ipi_cdma_test_hlx.png
diff --git a/docs-rtd/source/hdk/cl/examples/cl-mem-perf/README.rst b/docs-rtd/source/hdk/cl/examples/cl-mem-perf/README.rst
index a9b3c7bb5..855756c0b 100644
--- a/docs-rtd/source/hdk/cl/examples/cl-mem-perf/README.rst
+++ b/docs-rtd/source/hdk/cl/examples/cl-mem-perf/README.rst
@@ -635,6 +635,8 @@ Simulations
Please see more details on running simulations in this
`README <./verif/README.html>`__
+.. _cl-mem-perf-software:
+
Software
--------
diff --git a/docs-rtd/source/hdk/cl/examples/cl-sde/software/src/README.rst b/docs-rtd/source/hdk/cl/examples/cl-sde/software/src/README.rst
index 4d1918414..52423de02 100644
--- a/docs-rtd/source/hdk/cl/examples/cl-sde/software/src/README.rst
+++ b/docs-rtd/source/hdk/cl/examples/cl-sde/software/src/README.rst
@@ -358,14 +358,12 @@ Data Flow Models
Card-to-Host (C2H) Data Flow
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Follow the recommended `Card-to-Host data flow
-model <../../../../../../sdk/apps/virtual-ethernet/doc/SDE-HW-Guide.html#c2h>`__
+Follow the recommended `Card-to-Host data flow model <../../../../../../sdk/apps/virtual-ethernet/doc/SDE-HW-Guide.html#c2h-sde>`__
Host-to-Card (H2C) Data Flow
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Follow the recommended `Host-to-Card data flow
-model <../../../../../../sdk/apps/virtual-ethernet/doc/SDE-HW-Guide.html#h2c>`__
+Follow the recommended `Host-to-Card data flow model <../../../../../../sdk/apps/virtual-ethernet/doc/SDE-HW-Guide.html#h2c-sde>`__
Support
-------
diff --git a/docs-rtd/source/hdk/cl/examples/hello-world-hlx/README.rst b/docs-rtd/source/hdk/cl/examples/hello-world-hlx/README.rst
new file mode 100644
index 000000000..8eee494fa
--- /dev/null
+++ b/docs-rtd/source/hdk/cl/examples/hello-world-hlx/README.rst
@@ -0,0 +1,39 @@
+HLx Flow for Hello World IP Integrator Example
+==============================================
+
+Table of Contents
+-----------------
+
+- `HLx Flow for Hello World IP Integrator
+ Example <#hlx-flow-for-hello-world-ip-integrator-example>`__
+
+ - `Table of Contents <#table-of-contents>`__
+ - `Overview <#overview>`__
+ - `Building and Testing Example <#building-and-testing-example>`__
+
+Overview
+--------
+
+This IP Integrator design includes the AWS IP configured with an OCL interface
+(AXI4-Lite Master) that controls the VLED through AXI GPIO and a PCIS interface
+(AXI4 Master) that performs read and write operations to the AXI BRAM in the CL.
+
+The VLED is set based upon writing ``0xAAAA`` into the AXI GPIO (0x0)
+slave register to drive the VLED. The value is read using the Verilog task
+``tb.get_virtual_led`` in simulation or ``fpga-get-virtual-led`` on F2
+instance.
+
+The PCIS Interface writes ASCII data into the AXI BRAM memory space and
+reads back from these addresses to print out “Hello World!” in simulation
+or on a F2 instance.
+
+|block-diagram|
+
+Building and Testing Example
+----------------------------
+
+Follow the common design steps specified in the `IPI example design flow
+document <./../../../docs/IPI-GUI-Flows.html>`__ to build and test this
+example on F2 instances.
+
+.. |block-diagram| image:: ../../../../_static/hello_world_hlx_images/hello_world_hlx.png
diff --git a/docs-rtd/source/hdk/cl/examples/hello-world-mb-hlx/README.rst b/docs-rtd/source/hdk/cl/examples/hello-world-mb-hlx/README.rst
new file mode 100644
index 000000000..3fd532c98
--- /dev/null
+++ b/docs-rtd/source/hdk/cl/examples/hello-world-mb-hlx/README.rst
@@ -0,0 +1,85 @@
+HLx Flow for Hello World MicroBlaze IP Integrator Example
+=========================================================
+
+Table of Contents
+-----------------
+
+ - `Overview <#mb-hlx-overview>`__
+ - `Building and Testing Example <#mb-hlx-building-and-testing-example>`__
+
+ - `MicroBlaze Debug Module (MCM) <#mb-hlx-microblaze-debug-module-mcm>`__
+ - `BRAM Access through VJTAG and MDM <#mb-hlx-bram-access-through-vjtag-and-mdm>`__
+
+.. _mb-hlx-overview:
+
+Overview
+--------
+
+This design shares the same basic structure as the
+`hello_world example <./../hello-world-hlx/README.html>`__.
+
+In addition, the design includes a MicroBlaze (MB) processor with LMB memory
+connections and a MicroBlaze Debug Module (MDM) for debugging purposes.
+The MicroBlaze uses its Data Port (DP) Master to access the AXI BRAM,
+which is also accessible by the PCIS Master. Through BSCAN, the host's XSDB
+program connects to the MDM, allowing it to issue commands to the MicroBlaze
+for reading and writing to the AXI BRAM.
+
+The example program executes in the following sequence:
+
+- After reset, MicroBlaze (MB) begins executing from an ELF file that is loaded
+ into LMB Memory.
+- MicroBlaze writes into the shared memory and writes into bit 0 of the
+ GPIO. MicroBlaze polls for bit 1 and bit 0 to be asserted.
+- The host polls GPIO bit 0 for assertion. It writes a pattern into
+ the shared memory (``0xBEEF_DEAD``) and writes into bit 1 of the
+ GPIO.
+- Once MicroBlaze polls GPIO bit 1 and bit 0 assertion, it verifies the
+ write pattern (``0xBEEF_DEAD``) from the host and writes to GPIO bit
+ 2.
+- The polls GPIO bit2, bit1 and bit0 assertion. After that, the application
+ competes successfully.
+
+|block-diagram-mb-hlx|
+
+.. _mb-hlx-building-and-testing-example:
+
+Building and Testing Example
+----------------------------
+
+Follow the common design steps specified in the `IPI example design flow
+document <./../../../docs/IPI-GUI-Flows.html>`__ to build and test this
+example on F2 instances.
+
+.. _mb-hlx-microblaze-debug-module-mcm:
+
+MicroBlaze Debug Module (MCM)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Before design implementation, Enable the BSCAN ports in CL by defining
+ the ``BSCAN_EN`` macro. **NOTE: This is required to use the MicroBlaze
+ Debug Module (MDM) in the design.**
+
+ .. code:: text
+
+ set_property verilog_define BSCAN_EN=1 [current_fileset]
+
+.. _mb-hlx-bram-access-through-vjtag-and-mdm:
+
+.. code:: text
+
+ BRAM Access through VJTAG and MDM
+ xsdb% connect
+ tcfchan#0
+ xsdb% targets
+ 1 debug_bridge
+ 2 00000000
+ 3 00000000
+ 4 MicroBlaze Debug Module at USER1.2.2
+ 5 MicroBlaze #0 (Running)
+ xsdb% target 5 # <------- Change the target to MicroBlaze
+ xsdb% mwr 0xC0000100 0xDEADBEEF # <------- Test a memory write to the BRAM's start address
+ xsdb% mrd 0xC0000100 # <------- Read to verify the test data has been stored in the BRAM successfully
+ C0000100: DEADBEEF
+
+.. |block-diagram-mb-hlx| image:: ../../../../_static/hello_world_mb_hlx_images/hello_world_mb_hlx.png
diff --git a/docs-rtd/source/hdk/docs/AWS-CLI-FPGA-Commands.rst b/docs-rtd/source/hdk/docs/AWS-CLI-FPGA-Commands.rst
index e08f43f0e..4adb563c5 100644
--- a/docs-rtd/source/hdk/docs/AWS-CLI-FPGA-Commands.rst
+++ b/docs-rtd/source/hdk/docs/AWS-CLI-FPGA-Commands.rst
@@ -237,19 +237,32 @@ Error Codes
See AWS FPGA HDK documentation for valid input format. We recommend
using the scripts provided with AWS FPGA HDK*
-- ``UNKNOWN_BITSTREAM_GENERATE_ERROR`` *An error occurred generating the
- FPGA image bitstream. If an S3 LogsStorageLocation was provided in the
- CreateFpgaImage request, review the captured bitstream generation logs
- saved to S3 under the FpgaImageId for this AFI.*
-
- - **Note:** This is a catch-all error and could be caused due to a
- variety of issues, for eg:
-
- - We found a combinatorial loop in the CL design. Bitstream
- generation logs might show errors like \`ERROR: [DRC LUTLP-1]
- Combinatorial Loop Alert: 2 LUT cells form a combinatorial loop.
- Combinatorial loops are not allowed in CL designs and AFI's are
- not generated in such a case.
+- ``UNSUPPORTED_DESIGN_LOGIC`` *The FPGA image bitstream generation
+ failed during design rule validation. If an S3 LogsStorageLocation was
+ provided in the CreateFpgaImage request, review the captured bitstream
+ generation logs saved to S3 under the FpgaImageId for this AFI.
+ Examples of failures include:*
+
+ *1. The design validation detected unsupported primitives in the
+ customer logic. Certain FPGA primitives are restricted to maintain
+ platform stability and ensure reliable operation of customer workloads.
+ The following primitives are not supported: DNA_PORT, FRAME_ECC, MCAP,
+ ICAP_TOP, ICAP_BOT, MASTER_JTAG, DCIRESET, EFUSE_USR, USR_ACCESS,
+ STARTUP, BSCAN1, BSCAN2, BSCAN3, BSCAN4, SYSMON.* *NOTE: This
+ implementation follows the*
+ `design advisory issued by AMD `__.
+ *Refer to it for detailed information.*
+
+ *2. We found a combinatorial loop in the CL design. Bitstream
+ generation logs might show errors like ERROR: [DRC LUTLP-1]
+ Combinatorial Loop Alert: 2 LUT cells form a combinatorial loop.
+ Combinatorial loops are not allowed in CL designs and AFIs are not
+ generated in such cases.*
+
+- ``UNKNOWN_BITSTREAM_GENERATE_ERROR`` *An unclassified error occurred
+ generating the FPGA image bitstream. If an S3 LogsStorageLocation was
+ provided in the CreateFpgaImage request, review the captured bitstream
+ generation logs saved to S3 under the FpgaImageId for this AFI.*
``delete-fpga-image``
---------------------
diff --git a/docs-rtd/source/hdk/docs/AWS-Fpga-Pcie-Memory-Map.rst b/docs-rtd/source/hdk/docs/AWS-Fpga-Pcie-Memory-Map.rst
index f1a6cd205..ecadf3efb 100644
--- a/docs-rtd/source/hdk/docs/AWS-Fpga-Pcie-Memory-Map.rst
+++ b/docs-rtd/source/hdk/docs/AWS-Fpga-Pcie-Memory-Map.rst
@@ -78,9 +78,8 @@ this flexibility to F2 customers by advertising all the BARs in the
shell as prefetchable. Customer applications must access BARs in a way
supported by the customer logic (CL). For example, enabling
write-combining on a prefetchable BAR requires a custom kernel driver or
-application to map and mark the target memory space as `write-combining
-(WC)
-memory `__.
+application to map and mark the target memory space as
+`write-combining (WC) memory `__.
Additionally, applications enabling prefetching should avoid caching
data from a memory space that contains any clear-on-read registers or
FIFOs.
diff --git a/docs-rtd/source/hdk/docs/IPI-GUI-AWS-IP.rst b/docs-rtd/source/hdk/docs/IPI-GUI-AWS-IP.rst
new file mode 100644
index 000000000..a065f7b78
--- /dev/null
+++ b/docs-rtd/source/hdk/docs/IPI-GUI-AWS-IP.rst
@@ -0,0 +1,79 @@
+AWS FPGA IP for IP Integrator Overview
+======================================
+
+Table of Contents
+-----------------
+
+- `AWS FPGA IP for IP Integrator
+ Overview <#aws-fpga-ip-for-ip-integrator-overview>`__
+
+ - `Table of Contents <#table-of-contents>`__
+ - `AWS IP Overview <#aws-ip-overview>`__
+ - `Enable IP Interfaces <#enable-ip-interfaces>`__
+ - `Clock Signals <#clock-signals>`__
+ - `CL Partition ID <#cl-partition-id>`__
+ - `Advanced <#advanced>`__
+
+AWS IP Overview
+---------------
+
+The AWS IP serves as a central component in the IP Integrator (IPI)
+designs, providing essential AXI interfaces (OCL, PCIS and PCIM) for
+Host-FPGA communication, configurable clock management through
+predefined recipes, and auxiliary signal ports like VLED/VDIP. It
+enables seamless integration between CL designs and the F2 Shell.
+
+To configure the AWS IP, double-click the AWS IP block in the 'Block
+Diagram'. The 'Re-customize IP' GUI displays four configuration
+categories.
+
+Enable IP Interfaces
+--------------------
+
+Select the box to enable desired interfaces. The block diagram updates
+automatically to show enabled interfaces, ports, and clocks
+
+For details about the shell interface, see `AWS Shell Interface
+Specification <./AWS-Shell-Interface-Specification.html>`__.
+
+|aws-ip-interfaces|
+
+Clock Signals
+-------------
+
+Review the `Clock Recipes User
+Guide <./Clock-Recipes-User-Guide.html>`__ to determine
+the number of clocks needed for Groups A, B, and C, and select
+appropriate clock recipes for all CL clocks.
+
+|aws_ip_clocks|
+
+**NOTE**: ``clk_main_a0_out`` is a required clock and cannot be
+disabled.
+
+**NOTE**: You must select 'Enable Ports for HBM in CL' in the 'Enable IP
+Interfaces' tab to see HBM AXI clock recipe options.
+
+CL Partition ID
+---------------
+
+The PCIe Vendor ID, Device ID, Subsystem Vendor ID and Subsystem ID can
+be configured. For now these default values match typically AWS examples
+and shouldn't be modified at this time.
+
+|aws_ip_ids|
+
+Advanced
+--------
+
+Pipeline stages configuration:
+
+- Range: 1-4 pipeline stages
+- Applies to the ``sh_cl_ddr_stat_`` interface for DDR in the CL
+- Selection depends on design size and complexity
+
+.. |aws-ip-interfaces| image:: ./../../_static/hlx_images/aws_ip_interfaces.png
+.. |aws_ip_clocks| image:: ./../../_static/hlx_images/aws_ip_clocks.png
+.. |aws_ip_ids| image:: ./../../_static/hlx_images/aws_ip_ids.png
+
+`Back to Vivado IPI GUI Setup Guide <./IPI-GUI-Vivado-Setup.html>`__
diff --git a/docs-rtd/source/hdk/docs/IPI-GUI-Examples.rst b/docs-rtd/source/hdk/docs/IPI-GUI-Examples.rst
new file mode 100644
index 000000000..9fa7e82a4
--- /dev/null
+++ b/docs-rtd/source/hdk/docs/IPI-GUI-Examples.rst
@@ -0,0 +1,266 @@
+AWS GUI Workflow with Vivado IP Integrator Quick Start Examples
+===============================================================
+
+Table of Contents
+-----------------
+
+- `AWS GUI Workflow with Vivado IP Integrator Quick Start
+ Examples <#aws-gui-workflow-with-vivado-ip-integrator-quick-start-examples>`__
+
+ - `Table of Contents <#table-of-contents>`__
+ - `Overview <#overview>`__
+ - `HLx Examples Using IP Integrator
+ Flow <#hlx-examples-using-ip-integrator-flow>`__
+ - `Tutorial on how to create HLx IPI hello_world example with AXI GPIO and AXI BRAM <#tutorial-on-how-to-create-hlx-ipi-hello-world-example-with-axi-gpio-and-axi-bram>`__
+
+ - `Create Directory Structure and Vivado
+ Project <#create-directory-structure-and-vivado-project>`__
+ - `Configure the Block Diagram <#configure-the-block-diagram>`__
+
+ - `Configure AWS IP <#configure-aws-ip>`__
+ - `Add and Configure AXI GPIO <#add-and-configure-axi-gpio>`__
+ - `Add and Configure AXI BRAM <#add-and-configure-axi-bram>`__
+ - `Connect the Design <#connect-the-design>`__
+ - `Address Editor Tab <#address-editor-tab>`__
+ - `Save and Validate the Design <#save-and-validate-the-design>`__
+
+ - `Add Simulation Sources from Example
+ Design <#add-simulation-sources-from-example-design>`__
+
+ - `Run Simulation <#run-simulation>`__
+
+ - `Add Design Constraints <#add-design-constraints>`__
+ - `Implement the Design Tarball
+ File <#implement-the-design-tarball-file>`__
+ - `CL Example Software <#cl-example-software>`__
+
+Overview
+--------
+
+This document provides an overview of IP Integrator (IPI) examples in
+the HLx environment. Before starting, complete the `Vivado Setup
+Instructions <./IPI-GUI-Vivado-Setup.html>`__ to familiarize yourself with
+the Vivado GUI and IP Integrator.
+
+All examples in this document have been integrated into an automated
+flow that directly generates Vivado projects.
+
+.. _hlx-examples-using-ip-integrator-flow:
+
+HLx Examples Using IP Integrator Flow
+-------------------------------------
+
+This section provides example designs to help you become familiar with
+the automated project generation flow and IP Integrator functionality
+
+Available examples are:
+
+- `hello_world <./../cl/examples/hello-world-hlx/README.html>`__
+- `hello_world_mb <./../cl/examples/hello-world-mb-hlx/README.html>`__
+- `cl_ipi_cdma_test <./../cl/examples/cl-ipi-cdma-test-hlx/README.html>`__
+
+Click any example link above for detailed design information and getting
+started instructions.
+
+.. _tutorial-on-how-to-create-hlx-ipi-hello-world-example-with-axi-gpio-and-axi-bram:
+
+Tutorial on how to create HLx IPI hello_world example with AXI GPIO and AXI BRAM
+--------------------------------------------------------------------------------
+
+This tutorial demonstrates how to configure AWS IP with the OCL
+interface (AXI4-Lite Master) and the PCIS interface (AXI4 Master),
+similar to the ones in the
+`hello_world <./../cl/examples/hello-world-hlx/README.html>`__
+example.
+
+The AXI GPIO IP controls the virtual LEDs (VLEDs). Writing ``0xAAAA`` to
+the AXI GPIO (0x0) slave register drives VLEDs. The VLED value can be
+read using the verilog task ``tb.get_virtual_led`` in simulation or
+``fpga-get-virtual-led`` on an F2 instance.
+
+The PCIS interface accesses the AXI BRAM, where the ASCII string 'Hello
+World!' can be written to a BRAM location and read back for display in
+either the simulation environment or on an F2 instance.
+
+Create Directory Structure and Vivado Project
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Change directories to ``hdk/cl/examples``
+
+Create a directory in examples like ``hello_world_hlx_ipi``
+
+Change directories into ``hello_world_hlx_ipi/``
+
+Start Vivado by typing ``vivado`` in the bash console.
+
+Create a project any device by typing the following command in Vivado's
+TCL Tab.
+
+.. code:: Tcl
+
+ create_project -name hello_world
+
+Enter the following Tcl command to configure AWS project settings and
+create a block diagram with AWS IP:
+
+.. code:: Tcl
+
+ aws::make_ipi
+
+Configure the Block Diagram
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Configure AWS IP
+^^^^^^^^^^^^^^^^
+
+Configure the AWS IP block by double-clicking it and selecting three
+interfaces under 'IP Interfaces': 'Use OCL Register Interface
+(M_AXI_OCL)', 'Use PCI Slave-access Interface (M_AXI_PCIS)', and 'Use
+Auxiliary (non-AXI) Signal Ports'. For clock configuration, use Group-A
+Clock with the default clock recipe to set a 250 MHz frequency.
+
+Add and Configure AXI GPIO
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Right-click in the canvas and select 'Add IP...', then search for and
+double-click 'AXI GPIO'. Once added, double-click the ``axi_gpio_0`` block
+in the canvas. In the 'Re-customize IP' dialog box, select 'All Outputs'
+under the GPIO section and set GPIO Width to 16, then click 'OK'.
+
+Add and Configure AXI BRAM
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Right-click in the canvas and select 'Add IP...', then search for and
+double-click 'AXI BRAM Controller'. Once added, double-click the
+``axi_bram_ctrl_0`` block in the canvas and set the Data Width to 512 to
+match the PCIS AXI4 Master Interface's data width, then click 'OK'.
+
+Connect the Design
+^^^^^^^^^^^^^^^^^^
+
+Click 'Run Connection Automation' at the top of the Block Diagram.
+Configure the AXI BRAM controller by setting both
+``axi_bram_ctrl_0/BRAM_PORTA`` and ``BRAM_PORTB`` to 'Auto', then set
+``axi_bram_ctrl_0/S_AXI`` Master to ``f2_inst/M_AXI_PCIS`` with
+remaining options as 'Auto'. For the AXI GPIO, set ``axi_gpio_0/S_AXI``
+Master to ``f2_inst/M_AXI_OCL`` with other options as 'Auto', then click
+'OK'.
+
+After completing the automation, expand ``axi_gpio_0/GPIO`` by clicking
+the + symbol. Connect ``gpio_io_o[15:0]`` from the ``f2_inst`` block to
+``status_vled[15:0]``, then run 'Connection Automation'.
+
+Address Editor Tab
+^^^^^^^^^^^^^^^^^^
+
+In the 'Address Editor' tab above the block diagram, you can view the
+address configurations: the AXI BRAM instance has a default 64K address
+space starting at ``0xC0000000`` (adjustable by modifying the Range
+value), while the AXI GPIO instance uses a 4K address space with
+M_AXI_OCL starting at ``0x00000000``.
+
+Save and Validate the Design
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Save the block diagram, then select 'Tools' -> 'Validate Design' and
+click 'OK' when validation completes successfully.
+
+Add Simulation Sources from Example Design
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To add simulation sources, navigate to 'Project Manager' in the 'Flow
+Navigator' and select 'Add Sources' -> 'Add or create simulation
+sources' -> 'Select Add Files'. Add the test file
+`test_cl.sv` from `hdk/common/shell_stable/hlx/hlx/hlx_examples/build/IPI//verif`
+directory, and ensure you deselect the option to scan and add RTL include files.
+
+Configure the following simulation settings to import source files from external
+directories instead of copying them to the Vivado project:
+
+1. Source file options:
+
+ - Deselect 'Copy sources into project' (creates links instead)
+ - Select 'Add sources from subdirectories'
+ - Enable 'Include all design sources for simulation'
+ - Click 'Finish'
+
+2. Simulation settings:
+
+ - Right-click 'SIMULATION' in Project Manager
+ - Select 'Simulation Settings'
+ - In Verilog options, click the '...' box
+ - Verify/update the following:
+
+ - CL_NAME=cl_top
+ - TEST_NAME=test_cl
+
+ - Click 'OK'
+ - Click 'Apply'
+ - Click 'OK' to return to Vivado project
+
+Run Simulation
+^^^^^^^^^^^^^^
+
+From the 'Flow Navigator' tab, select 'Simulation' -> 'Run Simulation'
+-> 'Run Behavioral Simulation', then add your required simulation
+signals. In the Tcl console, enter the following command.
+
+.. code:: Tcl
+
+ run -all
+
+Note: If critical warnings appear, click 'OK' and run the command twice
+(this is a known issue that will be addressed in future versions).
+
+Add Design Constraints
+~~~~~~~~~~~~~~~~~~~~~~
+
+No additional constraints are needed for this design.
+
+Implement the Design Tarball File
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To implement the design, launch implementation:
+
+- Right-click 'impl_1'
+- Select 'Launch Runs...'
+- Click 'OK'
+- Click 'OK' on the 'Missing Synthesis Results' dialog
+
+This process will run both synthesis and implementation.
+
+The completed tarball file is located in:
+
+.. code:: bash
+
+ /example_projects/.runs/faas_1/build/checkpoints/to_aws/.Developer_CL.tar
+
+For instructions on creating an F2 AFI from the design tarball, see
+`Submit Generated DCP for AFI
+Creation <./../README.html#step-6-submit-generated-dcp-for-afi-creation>`__
+in the HDK quick start guide.
+
+CL Example Software
+~~~~~~~~~~~~~~~~~~~
+
+Compile the runtime software required for F2 instance execution by
+copying the software directory to your target location and running these
+commands:
+
+.. code:: bash
+
+ cp -r $HDK_COMMON_DIR/shell_stable/hlx/hlx_examples/build/IPI/hello_world/software .
+ cd software
+ make all
+ sudo ./test_cl
+
+HLx IPI CL Examples
+-------------------
+.. toctree::
+ :maxdepth: 1
+
+ ./../cl/examples/hello-world-hlx/README
+ ./../cl/examples/hello-world-mb-hlx/README
+ ./../cl/examples/cl-ipi-cdma-test-hlx/README
+
+`Back to Vivado IPI GUI Setup Guide <./IPI-GUI-Vivado-Setup.html>`__
diff --git a/docs-rtd/source/hdk/docs/IPI-GUI-Flows.rst b/docs-rtd/source/hdk/docs/IPI-GUI-Flows.rst
new file mode 100644
index 000000000..355723150
--- /dev/null
+++ b/docs-rtd/source/hdk/docs/IPI-GUI-Flows.rst
@@ -0,0 +1,116 @@
+HLx GUI Flows with Vivado IP Integrator
+=======================================
+
+Table of Contents
+-----------------
+
+- `HLx GUI Flows with Vivado IP
+ Integrator <#hlx-gui-flows-with-vivado-ip-integrator>`__
+
+ - `Table of Contents <#table-of-contents>`__
+ - `Overview <#overview>`__
+ - `Create IP Integrator Project with Example
+ Design <#create-ip-integrator-project-with-example-design>`__
+
+ - `Create Design <#create-design>`__
+ - `Run Simulation <#run-simulation>`__
+ - `Run Implementation <#run-implementation>`__
+ - `AFI Creation <#afi-creation>`__
+ - `Runtime Example <#runtime-example>`__
+
+Overview
+--------
+
+This document covers top level steps for using the HLx GUI flows.
+
+Create IP Integrator Project with Example Design
+------------------------------------------------
+
+This section specifies the end-to-end flow for creating a pre-defined
+IPI example design and executing it on an F2 instance.
+
+Create Design
+~~~~~~~~~~~~~
+
+- To launch Vivado GUI
+
+ - Change to the ``hdk/cl/examples/`` directory, e.g.
+ ``hdk/cl/examples/hello_world_hlx``
+
+ - Invoke Vivado by typing ``vivado`` in the console
+
+ - In the Vivado Tcl console type in the following to create the HLx
+ example.
+
+ .. code:: Tcl
+
+ aws::make_ipi -examples
+
+ **NOTE**: See what examples are possible, type
+ ``aws::make_ipi -examples`` into Tcl console.
+ **NOTE**: IPI example design names do not include ``_hlx``,
+ which differs from the CL name ````.
+
+ - The example will be generated in
+ ``cl/examples//example_projects``. The Vivado
+ project is ``examples_projects/.xpr``
+
+ - Once the Block diagram is opened, review the different IP blocks
+ especially the settings in the AWS IP
+
+Run Simulation
+~~~~~~~~~~~~~~
+
+The simulation settings are already configured.
+
+- To launch simulation from within the Vivado GUI
+
+ - Click on 'SIMULATION' -> 'Run Simulation' -> 'Run Behavioral
+ Simulation'
+ - Add signals needed in the simulation
+ - Type ``run -all`` in the Tcl console
+
+Run Implementation
+~~~~~~~~~~~~~~~~~~
+
+- To run implementation from within the GUI is opened, in the Design
+ Runs tab:
+
+ - Right click on 'impl_1' in the Design Runs tab and select Launch
+ Runs…
+ - Click 'OK' in the Launch Runs Dialog Box.
+ - Click 'OK' in the Missing Synthesis Results Dialog Box
+
+- This step will run both synthesis and implementation.
+
+AFI Creation
+~~~~~~~~~~~~
+
+The completed tarball file for a successfully implemented example design
+can be found in:
+
+.. code:: bash
+
+ $CL_DIR/build/scripts/example_projects/.runs/faas_1/build/checkpoints/to_aws/.Developer_CL.tar
+
+For information on how to create AFI from this tarball file, follow the
+`Submit Generated DCP for AFI
+Creation <./../README.html#step-6-submit-generated-dcp-for-afi-creation>`__
+section in the HDK step-by-step quick start guide.
+
+Runtime Example
+~~~~~~~~~~~~~~~
+
+The runtime software must be compiled before the AFI can run on F2
+instances. Copy the example's software directory to your preferred
+location and compile it using the following commands:
+
+.. code:: bash
+
+ source $AWS_FPGA_REPO_DIR/sdk_setup.sh
+ cp -r $HDK_COMMON_DIR/shell_stable/hlx/hlx_examples/build/IPI//software
+ cd software
+ make all
+ sudo ./test_cl
+
+`Back to Vivado IPI GUI Setup Guide <./IPI-GUI-Vivado-Setup.html>`__
diff --git a/docs-rtd/source/hdk/docs/IPI-GUI-Vivado-Setup.rst b/docs-rtd/source/hdk/docs/IPI-GUI-Vivado-Setup.rst
new file mode 100644
index 000000000..31bfb1ad3
--- /dev/null
+++ b/docs-rtd/source/hdk/docs/IPI-GUI-Vivado-Setup.rst
@@ -0,0 +1,333 @@
+Vivado IP Integrator Setup
+==========================
+
+Table of Contents
+-----------------
+
+- `Vivado IP Integrator Setup <#vivado-ip-integrator-setup>`__
+
+ - `Table of Contents <#table-of-contents>`__
+ - `Overview <#overview>`__
+ - `Installation in Linux <#installation-in-linux>`__
+
+ - `Switching between HDK and HLx
+ flows <#switching-between-hdk-and-hlx-flows>`__
+
+ - `Vivado Overview <#vivado-overview>`__
+
+ - `Sources Tab <#sources-tab>`__
+
+ - `Hierarchy Tab <#hierarchy-tab>`__
+ - `IP Sources Tab <#ip-sources-tab>`__
+
+ - `Flow Navigator <#flow-navigator>`__
+
+ - `PROJECT MANAGER <#project-manager>`__
+ - `IP INTEGRATOR <#ip-integrator>`__
+ - `SIMULATION <#simulation>`__
+ - `RTL ANALYSIS <#rtl-analysis>`__
+ - `SYNTHESIS <#synthesis>`__
+ - `IMPLEMENTATION <#implementation>`__
+
+ - `TCL Commands <#tcl-commands>`__
+ - `Design Runs Tab <#design-runs-tab>`__
+
+ - `Vivado Flows Overview <#vivado-flows-overview>`__
+
+ - `IP Integration flow <#ip-integration-flow>`__
+ - `General Environment <#general-environment>`__
+
+ - `Design Constraints in
+ Project <#design-constraints-in-project>`__
+ - `Synthesis/Implementation <#synthesis-implementation>`__
+
+ - `Next Steps <#next-steps>`__
+
+Overview
+--------
+
+This document assumes you have cloned the developer kit and sourced the
+`hdk_setup.sh `__. It is highly
+recommended that you get familiar with the HDK development flow by
+following the `step-by-step quick start guide for customer hardware
+development <../README.html>`__ prior to using the Vivado IP
+Integrator (IPI).
+
+After you become familiar with building an example AFI and running it on
+F2 instances, refer to `IP Integrator Quick Start
+Examples <./IPI-GUI-Examples.html>`__ documentation for help with example
+designs, new designs, and additional tutorials.
+
+Installation in Linux
+---------------------
+
+Using a text editor, open either ``~/.Xilinx/Vivado/init.tcl`` or
+``~/.Xilinx/Vivado/Vivado_init.tcl``. If neither files exists, run the
+following command to create one under ``~/.Xilinx/Vivado/``.
+
+.. code:: bash
+
+ touch Vivado_init.tcl
+
+To get the absolute path of ``$HDK_SHELL_DIR`` , use this command:
+
+.. code:: bash
+
+ echo $HDK_SHELL_DIR
+
+**NOTE**: If your ``$HDK_SHELL_DIR`` is empty or does not display when
+echoed, you need to source the
+`hdk_setup.sh `__.
+
+In ``init.tcl`` or ``Vivado_init.tcl``, append the following lines based
+upon the ``$HDK_SHELL_DIR`` path to the end of the file.
+
+.. code:: bash
+
+ set shell small_shell
+ source $::env(HDK_SHELL_DIR)/hlx/hlx_setup.tcl
+
+**NOTE**: A ``shell`` variable must be specified for the flow to pair
+the customer design with the correct shell variant. Valid values are
+``xdma_shell`` (coming soon) or ``small_shell``.
+
+Switching between HDK and HLx flows
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Vivado automatically sources either ``~/.Xilinx/Vivado/init.tcl`` or
+ ``~/.Xilinx/Vivado/Vivado_init.tcl`` at startup. After completing the
+ setup steps above, the IPI features will load automatically each time
+ you launch Vivado.
+
+- To switch back to the HDK flow, please remove the
+ ``source $::env(HDK_SHELL_DIR)/hlx/hlx_setup.tcl`` line from your
+ ``init.tcl`` or ``Vivado_init.tcl`` file.
+
+Vivado Overview
+---------------
+
+This section provides a basic overview of the Vivado GUI. The GUI
+environment enables developers of all experience levels to:
+
+- Quickly set project options and strategies to meet design requirements
+- Access interactive reports and design views
+- Efficiently resolve timing and area issues
+
+The IP Integrator (IPI) is a design entry tool in the Vivado HLx Design
+Suite. It allows developers to connect IPs at a block level and
+generates 'what you see is what you get' RTL files in either VHDL or
+Verilog format. The IPI flow enhances the standard RTL flow by providing
+designer assistance features, including:
+
+- Simplified connectivity of IPs through interface-based connections
+- Block automation that adds helper IPs (such as interconnects, DMAs,
+ and other support blocks) based on IP configuration
+- Connectivity automation for routing interfaces, clocks, and resets
+ between blocks
+- Design Rule Checks (DRCs) for ensuring proper interface connectivity
+ and clock domain crossing
+- Advanced hardware debug capabilities enabling transaction-level
+ debugging
+
+For detailed information and design methodology guidelines, refer to the
+following documentation:
+
+- `Vivado Design Suite User Guide
+ (UG892) `__
+- `Designing IP Sybsystems UsingIP Integrator
+ (UG994) `__
+- `UltraFast Design Methodology Guide for FPGAs and SoCs
+ (UG949) `__
+
+To open the GUI, run command ``vivado``. After Vivado loads, create an
+empty project by selecting ``Create New Project`` and following the
+prompts until you see a blank canvas. The sections below describe the
+tabs and menus, refer to the screenshot below. Exploring these tabs and
+menus in your blank project is encouraged.
+
+|vivado_gui|
+
+Sources Tab
+~~~~~~~~~~~
+
+The box in yellow contains the design sources.
+
+Hierarchy Tab
+^^^^^^^^^^^^^
+
+The 'Sources' tab is divided into three different categories.
+
+1. Design Sources: contains synthesis/implementation sources
+2. Constraints: contains timing constraint (XDC) files
+3. Simulation Sources: contains simulation-only sources
+
+Clicking on a file displays its information in the 'Properties' tab
+(under 'Sources'). In this tab, you can specify how the file is used in
+the design flow:
+
+- RTL/IP sources are typically marked for:
+
+ - Synthesis, implementation, simulation
+ - Synthesis, implementation
+ - Simulation
+
+- XDC files are typically marked for:
+
+ - Synthesis, implementation
+ - Synthesis
+ - Implementation
+
+IP Sources Tab
+^^^^^^^^^^^^^^
+
+When an IP is added to your project, the 'IP Sources' tab becomes
+visible. This tab contains imported IP sources.
+
+Flow Navigator
+~~~~~~~~~~~~~~
+
+The 'Flow Navigator', located in the green box, allows you to launch
+predefined design flow steps, such as synthesis and implementation.
+
+PROJECT MANAGER
+^^^^^^^^^^^^^^^
+
+The 'PROJECT MANAGER' section allows you to add sources (RTL, IP, and
+XDC files), access Language Templates for common RTL constructs, XDCs
+and DEBUG, and use IP Catalog to add IPs to the project. This portion
+targets the RTL flow.
+
+The IP Catalog allows you to search for specific IPs or browse through
+IP categories. When using IP Catalog, you are responsible for adding and
+connecting the IP to your RTL design.
+
+IP INTEGRATOR
+^^^^^^^^^^^^^
+
+This section allows you to open and modify the 'Block Design' and
+generate the 'Block Design' after validation.
+
+**Note**: The HLx flow pre-creates the 'Block Design' framework with AWS
+IP and board, so 'Create Block Design' is not necessary.
+
+Double-clicking an IP in the 'Block Design' opens the 'Re-customize IP'
+dialog box, where you can review or modify IP settings. When connecting
+designs, you can use 'Run Connection Automation' to automatically
+connect interfaces.
+
+SIMULATION
+^^^^^^^^^^
+
+In this section, you can modify simulation settings by right-clicking
+'SIMULATION'. To run a simulation, select 'Run Simulation' → 'Run
+Behavioral Simulation'.
+
+RTL ANALYSIS
+^^^^^^^^^^^^
+
+Clicking 'Open Elaborate Design' analyzes the RTL files, allowing you to
+verify RTL structures and syntax before synthesis.
+
+SYNTHESIS
+^^^^^^^^^
+
+Right-clicking 'SYNTHESIS' allows you to view synthesis settings and
+launch a synthesis run. After synthesis completes, click 'Open
+Synthesized Design' to access the post-synthesis checkpoint for
+analysis. This stage is crucial for developing timing constraints for
+the CL.
+
+IMPLEMENTATION
+^^^^^^^^^^^^^^
+
+Right-clicking 'IMPLEMENTATION' allows you to view implementation
+settings and launch an implementation run. After implementation
+completes, click 'Open Implemented Design' to access the
+post-implementation checkpoint for analysis of the SH (Shell) and CL
+(Custom Logic).
+
+TCL Commands
+~~~~~~~~~~~~
+
+The orange box is where you enter Tcl commands. The 'Tcl Console' tab
+above displays the command outputs.
+
+Design Runs Tab
+~~~~~~~~~~~~~~~
+
+The 'Design Runs' are located in the blue box. This area provides
+functionality similar to the 'SYNTHESIS' and 'IMPLEMENTATION' sections
+in the 'Flow Navigator'. The examples and tutorials demonstrate how to
+use 'synth_1' and 'impl_1' runs to build your design.
+
+Vivado Flows Overview
+---------------------
+
+The Vivado HLx environment supports IP Integrator (IPI) flow. This
+section provides a top-level overview of these flows. For detailed
+information, see `HLx GUI Flows with Vivado IP
+Integrator <./IPI-GUI-Flows.html>`__.
+
+IP Integration flow
+~~~~~~~~~~~~~~~~~~~
+
+You can easily create a full design by adding Vivado IP to the block
+diagram. Use RTL module referencing to add custom RTL as IP to the block
+diagram. This flow supports both RTL and IP additions as IP blocks. Find
+examples in the `IP Integrator Quick Start
+Examples <./IPI-GUI-Examples.html#hlx-examples-using-ip-integrator-flow>`__.
+
+|ipi_mod_ref|
+
+General Environment
+~~~~~~~~~~~~~~~~~~~
+
+Design Constraints in Project
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Top-level clocks from the Shell are provided for synthesis in:
+
+- cl_clocks_aws.xdc – Top-level clock constraints for the CL
+
+The following files are available for adding custom constraints:
+
+- cl_synth_user.xdc – User synthesis constraints
+- cl_pnr_user.xdc – User timing and floorplanning constraints
+
+Synthesis/Implementation
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+By default, synthesis is using the ``Default`` directive and all
+implementation steps are using the ``Explore`` directive.
+
+To modify implementation settings, right-click 'IMPLEMENTATION', click
+'Implementation Settings...' and selection the 'Implementation' option
+in 'Project Settings'. Modify directives only for
+
+- opt_design
+- place_design
+- phys_opt_design
+- route_design
+
+NOTE: Do not change the ``Strategy`` option, as this will override HLx
+environment settings.
+
+For getting started, refer to `IP Integrator Quick Start
+Examples <./IPI-GUI-Examples.html>`__.
+
+Next Steps
+----------
+
+1. Review the `AWS IP <./IPI-GUI-AWS-IP.html>`__ documentation to
+ familiarize yourself with shell features available in the IPI
+ environment.
+2. Test `building an IPI example design in Vivado
+ GUI <./IPI-GUI-Flows.html>`__
+3. Proceed to the `IPI Quick Start Examples <./IPI-GUI-Examples.html>`__
+ for guidance on creating example designs, developing new designs and
+ following additional tutorials.
+
+.. |vivado_gui| image:: ./../../_static/hlx_images/vivado_gui.png
+.. |ipi_mod_ref| image:: ./../../_static/hlx_images/ipi_mod_ref.png
+
+`Back to Home <./../../index.html>`__
diff --git a/docs-rtd/source/hdk/docs/Virtual-JTAG-XVC.rst b/docs-rtd/source/hdk/docs/Virtual-JTAG-XVC.rst
index 32b366524..854a8927a 100644
--- a/docs-rtd/source/hdk/docs/Virtual-JTAG-XVC.rst
+++ b/docs-rtd/source/hdk/docs/Virtual-JTAG-XVC.rst
@@ -95,7 +95,7 @@ guide /driver_v0.4/xvc_pcie_driver_base.c:306:25: error: too many arguments to function ‘class_create’
306 | xvc_dev_class = class_create(THIS_MODULE, "xil_xvc_class");
3. To resolve the error, update the ``xvc_pcie_driver_base.c`` file as follows:
diff --git a/docs-rtd/source/index.rst b/docs-rtd/source/index.rst
index 037fea17f..4126f2966 100644
--- a/docs-rtd/source/index.rst
+++ b/docs-rtd/source/index.rst
@@ -29,6 +29,9 @@ If you are new to AWS EC2 FPGA-accelerated instances, we recommend you read this
* - `Vitis (Software-Defined) <./vitis/README.html>`__
- Software developers with C/C++ or RTL experience
- Vitis HLS/RTL/Hardware Emulation
+ * - `HLx <./hdk/docs/IPI-GUI-Vivado-Setup.html>`__
+ - Developers with Block Design experience
+ - Vivado IP Integrator
Table of Contents
-----------------
@@ -43,6 +46,7 @@ Table of Contents
vitis/README
developer-resources/Amazon-DCV-Setup-Guide.rst
+ hdk/docs/IPI-GUI-Vivado-Setup
ERRATA
diff --git a/docs-rtd/source/sdk/apps/virtual-ethernet/doc/SDE-HW-Guide.rst b/docs-rtd/source/sdk/apps/virtual-ethernet/doc/SDE-HW-Guide.rst
index 4770d2e93..2bad56d0e 100644
--- a/docs-rtd/source/sdk/apps/virtual-ethernet/doc/SDE-HW-Guide.rst
+++ b/docs-rtd/source/sdk/apps/virtual-ethernet/doc/SDE-HW-Guide.rst
@@ -2589,11 +2589,13 @@ of write pointer plus 1 is equal to the read pointer.
write pointer value to determine how many valid metadata entries are
present in the circular buffer.
-.. _data_flow_model:
+.. _data-flow-model:
Data Flow Model
---------------
+.. _c2h-sde:
+
C2H
~~~
@@ -2644,6 +2646,8 @@ C2H
disabled and the software is not required to update SDE’s copy of
the read pointer.
+.. _h2c-sde:
+
H2C
~~~
diff --git a/docs-rtd/source/sitemap.xml b/docs-rtd/source/sitemap.xml
index 739c803fc..71f4557e6 100644
--- a/docs-rtd/source/sitemap.xml
+++ b/docs-rtd/source/sitemap.xml
@@ -2,201 +2,233 @@
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/ERRATA.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/RELEASE-NOTES.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/User-Guide-AWS-EC2-FPGA-Development-Kit.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/all-links.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/developer-resources/Amazon-DCV-Setup-Guide.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/cl/CHECKLIST-BEFORE-BUILDING-CL.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/cl/examples/CL-TEMPLATE/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/cl/examples/cl-dram-hbm-dma/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/cl/examples/cl-dram-hbm-dma/verif/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
+
+
+
+ https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/cl/examples/cl-ipi-cdma-test-hlx/README.html
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/cl/examples/cl-mem-perf/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/cl/examples/cl-mem-perf/verif/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/cl/examples/cl-sde/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/cl/examples/cl-sde/software/src/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/cl/examples/cl-sde/verif/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
+
+
+
+ https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/cl/examples/hello-world-hlx/README.html
+ 2025-09-25T00:00:01+00:00
+
+
+
+ https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/cl/examples/hello-world-mb-hlx/README.html
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/AWS-CLI-FPGA-Commands.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/AWS-CLK-GEN-spec.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/AWS-Fpga-Pcie-Memory-Map.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/AWS-Shell-ERRATA.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/AWS-Shell-Interface-Specification.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/Clock-Recipes-User-Guide.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
+
+
+ https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/IPI-GUI-AWS-IP.html
+ 2025-09-25T00:00:01+00:00
+
+
+ https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/IPI-GUI-Examples.html
+ 2025-09-25T00:00:01+00:00
+
+
+ https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/IPI-GUI-Flows.html
+ 2025-09-25T00:00:01+00:00
+
+
+
+ https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/IPI-GUI-Vivado-Setup.html
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/List-AFI-on-Marketplace.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/RTL-Simulation-Guide-for-HDK-Design-Flow.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/Supported-DDR-Modes.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/Virtual-JTAG-XVC.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/XDMA-Install.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/on-premise-licensing-help.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/hdk/docs/shell-floorplan.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/index.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/sdk/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/sdk/apps/msix-interrupts/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/sdk/apps/virtual-ethernet/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/sdk/apps/virtual-ethernet/doc/SDE-HW-Guide.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/sdk/apps/virtual-ethernet/doc/Virtual-Ethernet-Application-Guide.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/sdk/docs/F2-Software-Performance-Optimization-Guide.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/sdk/docs/Load-Times.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/sdk/userspace/cython-bindings/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/sdk/userspace/fpga_mgmt_examples/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/sdk/userspace/fpga_mgmt_tools/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/vitis/ERRATA.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/vitis/README.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/genindex.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
https://awsdocs-fpga-f2.readthedocs-hosted.com/latest/search.html
- 2025-08-18T00:00:01+00:00
+ 2025-09-25T00:00:01+00:00
\ No newline at end of file
diff --git a/docs-rtd/source/spelling_filters/hex_filter.py b/docs-rtd/source/spelling_filters/hex_filter.py
index f5784554c..312b053b0 100644
--- a/docs-rtd/source/spelling_filters/hex_filter.py
+++ b/docs-rtd/source/spelling_filters/hex_filter.py
@@ -1,7 +1,8 @@
from enchant.tokenize import Filter
+
class HexFilter(Filter):
- def _skip(self, word):
+ def _skip(self, word) -> bool:
if word.startswith(("0x", "0X")):
try:
int(word[2:], 16)
@@ -14,4 +15,4 @@ def _skip(self, word):
return True
except ValueError:
return False
- return False
\ No newline at end of file
+ return False
diff --git a/docs-rtd/source/spelling_filters/ordinal_filter.py b/docs-rtd/source/spelling_filters/ordinal_filter.py
index 92b8bde5e..2107975c1 100644
--- a/docs-rtd/source/spelling_filters/ordinal_filter.py
+++ b/docs-rtd/source/spelling_filters/ordinal_filter.py
@@ -1,19 +1,20 @@
from enchant.tokenize import Filter
+
class OrdinalFilter(Filter):
- def _skip(self, word):
+ def _skip(self, word) -> bool:
is_ordinal = any([word.endswith(suffix) for suffix in ("st", "nd", "rd", "th")])
if not is_ordinal:
return False
try:
number = int(word[:-2])
if number % 10 == 1 and number % 100 != 11:
- return word.endswith('st')
+ return word.endswith("st")
elif number % 10 == 2 and number % 100 != 12:
- return word.endswith('nd')
+ return word.endswith("nd")
elif number % 10 == 3 and number % 100 != 13:
- return word.endswith('rd')
+ return word.endswith("rd")
else:
- return word.endswith('th')
+ return word.endswith("th")
except ValueError:
- return False
\ No newline at end of file
+ return False
diff --git a/docs-rtd/source/spelling_filters/rtl_hex_filter.py b/docs-rtd/source/spelling_filters/rtl_hex_filter.py
index 8c5d2794e..dacb2f37a 100644
--- a/docs-rtd/source/spelling_filters/rtl_hex_filter.py
+++ b/docs-rtd/source/spelling_filters/rtl_hex_filter.py
@@ -1,30 +1,18 @@
import re
from enchant.tokenize import Filter
+
class RTLHexFilter(Filter):
- def _skip(self, word):
- rtl_hex_literal_prefix = "’h"
- if rtl_hex_literal_prefix in word:
- # If the literal makes use of underscores for readability
- if "_" in word:
- underscore_positions = [pos.start() for pos in re.finditer("_", word)]
- for pos in underscore_positions:
- aligned_word = ""
- try:
- aligned_word = word[pos + 1 : pos + 5]
- word_len = len(aligned_word)
- if word_len < 4:
- raise IndexError(f"Word {aligned_word} is only {word_len} hex digits long!")
- try:
- int(aligned_word, base=16)
- return True
- except ValueError as v:
- print("ERROR: Could not convert hex word literal {aligned_word} to a base-16 integer!")
- return False
- except IndexError as i:
- print(f"ERROR: Could not gather 4 hex digits after underscore: \n\t{aligned_word} \n\t{word}")
- print((i))
- return False
- else:
- return True
- return False
+ def _skip(self, word) -> bool:
+ if "'h" not in word:
+ return False
+
+ if "_" not in word:
+ return True
+
+ # Check if segments after underscores are valid 4-digit hex
+ for match in re.finditer("_", word):
+ segment = word[match.start() + 1 : match.start() + 5]
+ if len(segment) < 4 or not all(c in "0123456789abcdefABCDEF" for c in segment):
+ return False
+ return True
diff --git a/docs-rtd/source/spelling_filters/username_filter.py b/docs-rtd/source/spelling_filters/username_filter.py
new file mode 100644
index 000000000..ea7af22cb
--- /dev/null
+++ b/docs-rtd/source/spelling_filters/username_filter.py
@@ -0,0 +1,9 @@
+from enchant.tokenize import Filter
+
+
+class UsernameFilter(Filter):
+ def _skip(self, word) -> bool:
+ """Skip word if it starts or ends with '@'"""
+ if word.startswith("@") or word.endswith("@"):
+ return True
+ return False
diff --git a/docs-rtd/source/spelling_wordlist.txt b/docs-rtd/source/spelling_wordlist.txt
index caa58fa84..9726bc584 100644
--- a/docs-rtd/source/spelling_wordlist.txt
+++ b/docs-rtd/source/spelling_wordlist.txt
@@ -277,3 +277,14 @@ Startup
popups
morgnza
libsde
+afi
+el
+HLx
+hlx
+ipi
+mb
+cdma
+xdc
+impl
+pnr
+floorplanning
diff --git a/docs-rtd/source/vitis/ERRATA.rst b/docs-rtd/source/vitis/ERRATA.rst
index 28afe30d3..e407a8365 100644
--- a/docs-rtd/source/vitis/ERRATA.rst
+++ b/docs-rtd/source/vitis/ERRATA.rst
@@ -25,9 +25,7 @@ The following examples are not currently supported by AMD:
The following examples are currently under development by AMD:
-- ``rtl_kernels/rtl_streaming_free_running_k2k``
-- ``rtl_kernels/rtl_streaming_k2k_mm``
-- ``rtl_kernels/rtl_vadd_hw_debug``
+- ``performance/axi_burst_performance``
Hardware Emulation
------------------
diff --git a/docs-rtd/source/vitis/README.rst b/docs-rtd/source/vitis/README.rst
index 3d611b474..c9ded1c2f 100644
--- a/docs-rtd/source/vitis/README.rst
+++ b/docs-rtd/source/vitis/README.rst
@@ -161,6 +161,8 @@ command to insure that all files required for simulation are present. If
all required files are present, you will see
``All required simulation files are present!``.
+``NOTE: All paths shown below are identical for customers using Rocky Linux, substituting 'rocky' for 'ubuntu'``
+
.. code:: bash
ubuntu@ip-aaa-bb-cc-dd:~/aws-fpga/vitis/examples/vitis_examples/hello_world$ hw_file_check
diff --git a/hdk/README.md b/hdk/README.md
index 29ac54b12..0a847a5aa 100644
--- a/hdk/README.md
+++ b/hdk/README.md
@@ -6,6 +6,7 @@
- [Table of Contents](#table-of-contents)
- [HDK Overview](#hdk-overview)
- [Getting Started](#getting-started)
+ - [Quick Start HW/SW Example: Host-to-FPGA Communication via the OCL Interface](#quick-start-hwsw-example-host-to-fpga-communication-via-the-ocl-interface)
- [Build Accelerator AFI using HDK Design Flow](#build-accelerator-afi-using-hdk-design-flow)
- [Step 1. Setup Development Environment](#step-1-setup-development-environment)
- [Step 2. Clone Developer Kit Repository](#step-2-clone-developer-kit-repository)
@@ -39,13 +40,25 @@ The HDK design flow enables developers to create RTL-based accelerator designs f
## Getting Started
+### Quick Start HW/SW Example: Host-to-FPGA Communication via the OCL Interface
+
+The [test_aws_clk_gen.c software runtime example](./cl/examples/cl_mem_perf/software/runtime/test_aws_clk_gen.c)
+utilizes the [OCL AXI interface](./docs/AWS_Shell_Interface_Specification.md)
+to program the [AWS Clock Generation IP](./docs/AWS_CLK_GEN_spec.md) within the CL_MEM_PERF AFI.
+
+The example can be run by following the steps in the following documentation references:
+
+1. Build and ingest the [CL_MEM_PERF](./cl/examples/cl_mem_perf/README.md) example by following the [Build Accelerator AFI using HDK Design Flow](#build-accelerator-afi-using-hdk-design-flow) section below
+2. [Load the AGFI](#step-7-load-accelerator-afi-on-f2-instance) generated by the `create-fpga-image` command
+3. Follow the [CL_MEM_PERF software runtime compilation instructions](./cl/examples/cl_mem_perf/README.md#software) and execute `./test_aws_clk_gen`
+
### Build Accelerator AFI using HDK Design Flow
This section provides a step-by-step guide to build an F2 AFI using the HDK design flow. The flow starts with an existing Customer Logic (CL) example design. Steps 1 through 3 demonstrate how to set up the HDK development environment. Steps 4 through 5 show the commands used to generate CL Design Checkpoint (DCP) files and other build artifacts. Steps 6 and 7 demonstrate how to submit the DCP file to generate an AFI for use on F2 instances.
#### Step 1. Setup Development Environment
-Developers can either use the AWS-provided developer AMI for F2 or their on-premise development environment for this demo.
+Developers can either use the [AWS-provided developer AMI](./../User_Guide_AWS_EC2_FPGA_Development_Kit.md#fpga-developer-ami) for F2 or their [on-premise development environment](./docs/on_premise_licensing_help.md) for this demo.
#### Step 2. Clone Developer Kit Repository
@@ -129,100 +142,19 @@ Generated post-route DCP and design manifest files are archived into a tarball f
#### Step 6. Submit Generated DCP for AFI Creation
-To submit the DCP, create an S3 bucket and upload the DCP tarball file to the bucket. DCP submission requires the following information:
-
-- Name of the design (Optional).
-- Generic description of the logic design (Optional).
-- Destination location of the tarball file object in your S3 bucket.
-- Destination location of an S3 directory where AWS can save the logs for your AFI’s creation.
-
-To upload your tarball file to S3, you can use any of [the tools supported by S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/upload-objects.html).
-
-For example, you can use the AWS CLI as follows:
-
-Create a bucket and folder for your tarball, then copy to S3.
-
-Currently, `us-east-1` and `eu-west-2` are available as `REGION` options.
-
-```bash
-export DCP_BUCKET_NAME=''
-export DCP_FOLDER_NAME=''
-export REGION='us-east-1'
-export DCP_TARBALL_TO_INGEST='<$CL_DIR/build/checkpoints/YYYY_MM_DD-HHMMSS.Developer_CL.tar>'
-
-# Create an S3 bucket (choose a unique bucket name)
-aws s3 mb s3://${DCP_BUCKET_NAME} --region ${REGION}
-# Create folder for your tarball files
-aws s3 mb s3://${DCP_BUCKET_NAME}/${DCP_FOLDER_NAME}/
-# Upload the file to S3
-aws s3 cp ${DCP_TARBALL_TO_INGEST} s3://${DCP_BUCKET_NAME}/${DCP_FOLDER_NAME}/
-```
-
-**NOTE**: The trailing '/' is required after `${DCP_FOLDER_NAME}`
-
-Create a folder for your log files
-
-```bash
-export LOGS_BUCKET_NAME=''
-export LOGS_FOLDER_NAME=''
-
-# Create a folder to keep your logs
-aws s3 mb s3://${LOGS_BUCKET_NAME}/${LOGS_FOLDER_NAME}/ --region ${REGION}
-# Create a temp file
-touch LOGS_FILES_GO_HERE.txt
-# Create the folder on S3
-aws s3 cp LOGS_FILES_GO_HERE.txt s3://${LOGS_BUCKET_NAME}/${LOGS_FOLDER_NAME}/
-```
-
-**NOTE**: The trailing '/' is required after `${LOGS_FOLDER_NAME}`
-
-The output of this command includes two identifiers for your AFI:
+Once developers have built their DCP, they may submit their FPGA design for AFI creation in one of two ways:
-```bash
-export DCP_TARBALL_NAME=$(basename ${DCP_TARBALL_TO_INGEST})
-export CL_DESIGN_NAME=''
-export CL_DESIGN_DESCRIPTION='Description of ${CL_DESIGN_NAME}'
-
-# Call AWS CLI ingestion command
-aws ec2 create-fpga-image --name ${CL_DESIGN_NAME} --description "${CL_DESIGN_DESCRIPTION}" --input-storage-location Bucket=${DCP_BUCKET_NAME},Key=${DCP_FOLDER_NAME}/${DCP_TARBALL_NAME} --logs-storage-location Bucket=${LOGS_BUCKET_NAME},Key=${LOGS_FOLDER_NAME}/ --region ${REGION}
-
-{
- "FpgaImageId": "afi-09953582f46c45b17",
- "FpgaImageGlobalId": "agfi-0925b211f5a81b071"
-}
-```
+1. Execute the [create_afi.py utility](./scripts/create_afi.py) from anywhere within the `aws-fpga` repository:
+ - `$AWS_FPGA_REPO_DIR/hdk/scripts/create_afi.py`
+ - Make require a Python virtual env which can be started with: `source $AWS_FPGA_REPO_DIR/hdk/scripts/start_venv.sh`
-- `FpgaImageId` or AFI ID: This is the main ID used to manage developer’s AFI through the AWS EC2 CLI and AWS SDK APIs. This ID is regional, i.e., if an AFI is copied across multiple regions, it will have a different, unique AFI ID in each region.
+2. OR: Upload the DCP to S3 and specify all fields to the `aws ec2 create-fpga-image` utility according to instructions in [Manual AFI Creation](./docs/Amazon_FPGA_Images_Afis_Guide.md#option-2-manual-afi-creation)
-- `FpgaImageGlobalId` or AGFI ID: This is a global ID used to refer to an AFI from within an F2 instance. For example, to load or clear an AFI from an FPGA slot, developers need to use the AGFI ID. Since the AGFI IDs is global (by design), it allows developers to copy a combination of AFI/AMI to multiple regions and they will work without any extra setup.
-
-The `describe-fpga-images` command allows developers to check the AFI’s state while the AFI creation process runs in the background. The AFI ID returned by the `create-fpga-image` command must be provided. The AFI is ready to be deployed once the creation completes and the state code returned is `available`.
-
-```bash
-aws ec2 describe-fpga-images --fpga-image-ids afi-09953582f46c45b17 --region us-east-1
-
- ...
-
- {
- "FpgaImages": [
- {
- "FpgaImageId": "afi-09953582f46c45b17",
- "FpgaImageGlobalId": "agfi-0925b211f5a81b071",
- "Name": "cl_sde_0x10212415",
- "Description": "Latest devkit build of cl_sde with 0x10212415 small shell release",
- ...
- "State": {
- "Code": "available"
- },
- ...
- }
- ]
- }
-```
+**NOTE: Additional information about AFI's and surrounding tools can be found in the [Amazon FPGA Images (AFIs) Guide](./docs/Amazon_FPGA_Images_Afis_Guide.md)**
#### Step 7. Load Accelerator AFI on F2 Instance
-Now that your AFI is available, it can be tested on an F2 instance. The instance can be launched using any preferred AMI, private or public, from the AWS Marketplace catalog. AWS recommends using AMIs with Ubuntu 20.04 and kernel version 5.15.
+Now that your AFI is available, it can be tested on an F2 instance. The instance can be launched using any preferred AMI, private or public, from the AWS EC2 AMI Catalog. AWS recommends using AMIs with [similar OS and kernel versions](../User_Guide_AWS_EC2_FPGA_Development_Kit.md#fpga-developer-ami) to those of our developer AMIs.
Now you need to install the FPGA Management tools by sourcing the `sdk_setup.sh` script:
diff --git a/hdk/cl/examples/CL_TEMPLATE/build/scripts/synth_CL_TEMPLATE.tcl b/hdk/cl/examples/CL_TEMPLATE/build/scripts/synth_CL_TEMPLATE.tcl
index 4cf67a181..a73cf60d3 100644
--- a/hdk/cl/examples/CL_TEMPLATE/build/scripts/synth_CL_TEMPLATE.tcl
+++ b/hdk/cl/examples/CL_TEMPLATE/build/scripts/synth_CL_TEMPLATE.tcl
@@ -28,7 +28,7 @@ print "Reading encrypted user source codes"
# Reading the .sv and .v files, as proper designs would not require reading
# .vh, nor .inc files
-read_verilog -sv [glob ${src_post_enc_dir}/*.?v]
+read_verilog -sv [glob ${src_post_enc_dir}/*.{s,}v]
#---- End of section replaced by User ----
diff --git a/hdk/cl/examples/cl_dram_hbm_dma/build/scripts/synth_cl_dram_hbm_dma.tcl b/hdk/cl/examples/cl_dram_hbm_dma/build/scripts/synth_cl_dram_hbm_dma.tcl
index e03d8978f..1e423d17d 100644
--- a/hdk/cl/examples/cl_dram_hbm_dma/build/scripts/synth_cl_dram_hbm_dma.tcl
+++ b/hdk/cl/examples/cl_dram_hbm_dma/build/scripts/synth_cl_dram_hbm_dma.tcl
@@ -26,9 +26,8 @@ print "Reading encrypted user source codes"
#---- User would replace this section -----
-# Reading the .sv and .v files, as proper designs would not require reading
-# .vh, nor .inc files
-read_verilog -sv [glob ${src_post_enc_dir}/*.?v]
+# Reading the .sv and .v files, and any .vh files that require handling
+read_verilog -sv [glob ${src_post_enc_dir}/*.{s,}v]
read_verilog -sv [glob ${src_post_enc_dir}/cl_dram_dma_defines.vh]
set_property file_type {Verilog Header} [get_files ${src_post_enc_dir}/cl_dram_dma_defines.vh]
diff --git a/hdk/cl/examples/cl_ipi_cdma_test_hlx/README.md b/hdk/cl/examples/cl_ipi_cdma_test_hlx/README.md
new file mode 100644
index 000000000..3817378cd
--- /dev/null
+++ b/hdk/cl/examples/cl_ipi_cdma_test_hlx/README.md
@@ -0,0 +1,23 @@
+# HLx Flow for CDMA Test IP Integrator Example
+
+## Table of Contents
+
+- [HLx Flow for CDMA Test IP Integrator Example](#hlx-flow-for-cdma-test-ip-integrator-example)
+ - [Table of Contents](#table-of-contents)
+ - [Overview](#overview)
+ - [Building and Testing Example](#building-and-testing-example)
+
+## Overview
+
+This example design exercises the following data interfaces:
+
+- AXIL_OCL: Polls the AXI GPIO to which the DDR and HBM calibration done signals are connected
+- AXI_PCIS: Writes 1K data pattern to DDR source buffer
+- AXIL_OCL: Configures AXI CDMA for 1K DMA transfer from DDR to HBM and polls AXI CDMA status register to determine transfer completion
+- AXI_PCIS: Reads 1K from HBM destination buffer and compares against original data pattern
+
+
+
+## Building and Testing Example
+
+Follow the common design steps specified in the [IPI example design flow document](./../../../docs/IPI-GUI-Flows.md) to build and test this example on F2 instances.
diff --git a/hdk/cl/examples/cl_mem_perf/build/scripts/synth_cl_mem_perf.tcl b/hdk/cl/examples/cl_mem_perf/build/scripts/synth_cl_mem_perf.tcl
index 09c687407..2b0673888 100644
--- a/hdk/cl/examples/cl_mem_perf/build/scripts/synth_cl_mem_perf.tcl
+++ b/hdk/cl/examples/cl_mem_perf/build/scripts/synth_cl_mem_perf.tcl
@@ -27,9 +27,8 @@ print "Reading encrypted user source codes"
#---- User would replace this section -----
-# Reading the .sv and .v files, as proper designs would not require reading
-# .vh, nor .inc files
-read_verilog -sv [glob ${src_post_enc_dir}/*.?v]
+# Reading the .sv and .v files, and any .vh files that require handling
+read_verilog -sv [glob ${src_post_enc_dir}/*.{s,}v]
read_verilog -sv [glob ${src_post_enc_dir}/cl_mem_perf_defines.vh]
set_property file_type {Verilog Header} [get_files ${src_post_enc_dir}/cl_mem_perf_defines.vh]
diff --git a/hdk/cl/examples/cl_sde/build/scripts/synth_cl_sde.tcl b/hdk/cl/examples/cl_sde/build/scripts/synth_cl_sde.tcl
index 73ce9b067..35014370a 100644
--- a/hdk/cl/examples/cl_sde/build/scripts/synth_cl_sde.tcl
+++ b/hdk/cl/examples/cl_sde/build/scripts/synth_cl_sde.tcl
@@ -27,13 +27,12 @@ print "Reading encrypted user source codes"
#---- User would replace this section -----
-# Reading the .sv and .v files, as proper designs would not require reading
-# .vh, nor .inc files
+# # Reading the .sv and .v files, and any .vh files that require handling
read_verilog -sv [glob ${src_post_enc_dir}/cl_sde_defines.vh]
set_property file_type {Verilog Header} [get_files ${src_post_enc_dir}/cl_sde_defines.vh]
set_property is_global_include true [get_files ${src_post_enc_dir}/cl_sde_defines.vh]
-read_verilog -sv [glob ${src_post_enc_dir}/*.?v]
+read_verilog -sv [glob ${src_post_enc_dir}/*.{s,}v]
#---- End of section replaced by User ----
diff --git a/hdk/cl/examples/cl_sde/software/runtime/Makefile b/hdk/cl/examples/cl_sde/software/runtime/Makefile
index 206b0fac0..639d5708a 100644
--- a/hdk/cl/examples/cl_sde/software/runtime/Makefile
+++ b/hdk/cl/examples/cl_sde/software/runtime/Makefile
@@ -38,7 +38,7 @@ GLOBAL_SDE_OFFSET := -DGLOBAL_SDE_OFFSET=0x0
GLOBAL_ATG_OFFSET := -DGLOBAL_ATG_OFFSET=0x0
OPT := -DFPGA_ALLOW_NON_ROOT -DCONFIG_LOGLEVEL=1 $(GLOBAL_SDE_OFFSET) $(GLOBAL_ATG_OFFSET)
-LIB_CFLAGS := $(OPT) -g -Wall -Werror -W -Wno-parentheses -Wstrict-prototypes -Wmissing-prototypes $(INCLUDES) -fvisibility=hidden -mavx2 -MMD -MP
+LIB_CFLAGS := $(OPT) -g -Wall -Werror -W -Wno-parentheses -Wstrict-prototypes -Wmissing-prototypes $(INCLUDES) -fvisibility=hidden -mavx2 -MMD -MP -fPIC
EXAMPLE_CFLAGS := $(OPT) -g -Wall -Werror -W -Wno-parentheses -Wstrict-prototypes -Wmissing-prototypes $(INCLUDES)
SRCS := $(wildcard $(SDE_SRC_DIR)/*.c)
SDE_EXAMPLES := sde_c2h_perf_test sde_h2c_perf_test sde_c2h_simple sde_h2c_simple sde_loopback_simple sde_c2h_user_buffers sde_loopback_perf_test
@@ -86,7 +86,7 @@ install_sde_lib: $(SDELIB_SO)
$(SDELIB_SO): $(OBJS)
mkdir -p $(SDE_LIB_SO_DIR)
- $(CC) -o $(SDELIB_SO) $(LIBCFLAGS) -shared $(OBJS) -Wl,-soname,libsde.so.1
+ $(CC) -o $(SDELIB_SO) $(LIB_CFLAGS) -shared $(OBJS) -Wl,-soname,libsde.so.1
$(SDE_SRC_DIR)/%.o: $(SDE_SRC_DIR)/%.c
$(CC) $(LIB_CFLAGS) -c $< -o $@
diff --git a/hdk/cl/examples/hello_world_hlx/README.md b/hdk/cl/examples/hello_world_hlx/README.md
new file mode 100644
index 000000000..75b971bd1
--- /dev/null
+++ b/hdk/cl/examples/hello_world_hlx/README.md
@@ -0,0 +1,22 @@
+# HLx Flow for Hello World IP Integrator Example
+
+## Table of Contents
+
+- [HLx Flow for Hello World IP Integrator Example](#hlx-flow-for-hello-world-ip-integrator-example)
+ - [Table of Contents](#table-of-contents)
+ - [Overview](#overview)
+ - [Building and Testing Example](#building-and-testing-example)
+
+## Overview
+
+This IP Integrator design includes the AWS IP configured with an OCL interface (AXI4-Lite Master) that controls the VLED through AXI GPIO and a PCIS interface (AXI4 Master) that performs read and write operations to the AXI BRAM in the CL.
+
+The VLED is set based upon writing `0xAAAA` into the AXI GPIO (0x0) slave register to drive the VLED. The value is read using the Verilog task `tb.get_virtual_led` in simulation or `fpga-get-virtual-led` on F2 instance.
+
+The PCIS Interface writes ASCII data into the AXI BRAM memory space and reads back from these addresses to print out “Hello World!” in simulation or on a F2 instance.
+
+
+
+## Building and Testing Example
+
+Follow the common design steps specified in the [IPI example design flow document](./../../../docs/IPI-GUI-Flows.md) to build and test this example on F2 instances.
diff --git a/hdk/cl/examples/hello_world_mb_hlx/README.md b/hdk/cl/examples/hello_world_mb_hlx/README.md
new file mode 100644
index 000000000..edf83844e
--- /dev/null
+++ b/hdk/cl/examples/hello_world_mb_hlx/README.md
@@ -0,0 +1,60 @@
+# HLx Flow for Hello World MicroBlaze IP Integrator Example
+
+## Table of Contents
+
+- [HLx Flow for Hello World MicroBlaze IP Integrator Example](#hlx-flow-for-hello-world-microblaze-ip-integrator-example)
+ - [Table of Contents](#table-of-contents)
+ - [Overview](#overview)
+ - [Building and Testing Example](#building-and-testing-example)
+ - [MicroBlaze Debug Module (MCM)](#microblaze-debug-module-mcm)
+ - [BRAM Access through VJTAG and MDM](#bram-access-through-vjtag-and-mdm)
+
+## Overview
+
+This design shares the same basic structure as the [hello_world example](../hello_world_hlx/README.md).
+
+In addition, the design includes a MicroBlaze (MB) processor with LMB memory connections and a MicroBlaze Debug Module (MDM) for debugging purposes. The MicroBlaze uses its Data Port (DP) Master to access the AXI BRAM, which is also accessible by the PCIS Master. Through BSCAN, the host's XSDB program connects to the MDM, allowing it to issue commands to the MicroBlaze for reading and writing to the AXI BRAM.
+
+The example program executes in the following sequence:
+
+- After reset, MicroBlaze (MB) begins executing from an ELF file that is loaded into LMB Memory.
+- MicroBlaze writes into the shared memory and writes into bit 0 of the GPIO. MicroBlaze polls for bit 1 and bit 0 to be asserted.
+- The host polls GPIO bit 0 for assertion. It writes a pattern into the shared memory (`0xBEEF_DEAD`) and writes into bit 1 of the GPIO.
+- Once MicroBlaze polls GPIO bit 1 and bit 0 assertion, it verifies the write pattern (`0xBEEF_DEAD`) from the host and writes to GPIO bit 2.
+- The host code polls GPIO bit2, bit1 and bit0 assertion. After that, the application competes successfully.
+
+
+
+## Building and Testing Example
+
+Follow the common design steps specified in the [IPI example design flow document](./../../../docs/IPI-GUI-Flows.md) to build and test this example on F2 instances.
+
+### MicroBlaze Debug Module (MCM)
+
+- Before design implementation, Enable the BSCAN ports in CL by defining the `BSCAN_EN` macro.
+ **NOTE: This is required to use the MicroBlaze Debug Module (MDM) in the design.**
+
+ ```Tcl
+ set_property verilog_define BSCAN_EN=1 [current_fileset]
+ ```
+
+### BRAM Access through VJTAG and MDM
+
+The BRAM in the example can be accessed via the MicroBlaze Debug Module (MDM) IP through a virtual JTAG connection. Refer to the [Virtual JTAG/XVC user guide](./../../../docs/Virtual_JTAG_XVC.md) on how to connect the Virtual JTAG cable to the CL. Once the connection is built, type `xsdb` in a terminal to open the XSDB debugger.
+
+Type in the following commands to test the VJTAG/MDM access to BRAM:
+
+```bash
+xsdb% connect
+tcfchan#0
+xsdb% targets
+ 1 debug_bridge
+ 2 00000000
+ 3 00000000
+ 4 MicroBlaze Debug Module at USER1.2.2
+ 5 MicroBlaze #0 (Running)
+xsdb% target 5 # <------- Change the target to MicroBlaze
+xsdb% mwr 0xC0000100 0xDEADBEEF # <------- Test a memory write to the BRAM's start address
+xsdb% mrd 0xC0000100 # <------- Read to verify the test data has been stored in the BRAM successfully
+C0000100: DEADBEEF
+```
diff --git a/hdk/common/shell_stable/hlx b/hdk/common/shell_stable/hlx
new file mode 160000
index 000000000..2383c2b64
--- /dev/null
+++ b/hdk/common/shell_stable/hlx
@@ -0,0 +1 @@
+Subproject commit 2383c2b64572c75163b1b60fbd0abea482c637e6
diff --git a/hdk/docs/AWS_CLI_FPGA_Commands.md b/hdk/docs/AWS_CLI_FPGA_Commands.md
index 39700dc5f..7fe468ca4 100644
--- a/hdk/docs/AWS_CLI_FPGA_Commands.md
+++ b/hdk/docs/AWS_CLI_FPGA_Commands.md
@@ -197,11 +197,15 @@ Errors can occur when calling this API and this document provides the specificat
* `DCP_NOT_FOUND`
*No DCP file was found with the supplied filename. See AWS FPGA HDK documentation for valid input format. We recommend using the scripts provided with AWS FPGA HDK*
+* `UNSUPPORTED_DESIGN_LOGIC`
+ *The FPGA image bitstream generation failed during design rule validation. If an S3 LogsStorageLocation was provided in the CreateFpgaImage request, review the captured bitstream generation logs saved to S3 under the FpgaImageId for this AFI. Examples of failures include:*
+
+ *1. The design validation detected unsupported primitives in the customer logic. Certain FPGA primitives are restricted to maintain platform stability and ensure reliable operation of customer workloads. The following primitives are not supported: DNA_PORT, FRAME_ECC, MCAP, ICAP_TOP, ICAP_BOT, MASTER_JTAG, DCIRESET, EFUSE_USR, USR_ACCESS, STARTUP, BSCAN1, BSCAN2, BSCAN3, BSCAN4, SYSMON.* ***NOTE: This implementation follows the [design advisory issued by AMD](https://docs.amd.com/r/en-US/000038693). Refer to it for detailed information.***
+
+ *2. We found a combinatorial loop in the CL design. Bitstream generation logs might show errors like ERROR: [DRC LUTLP-1] Combinatorial Loop Alert: 2 LUT cells form a combinatorial loop. Combinatorial loops are not allowed in CL designs and AFIs are not generated in such cases.*
+
* `UNKNOWN_BITSTREAM_GENERATE_ERROR`
- *An error occurred generating the FPGA image bitstream. If an S3 LogsStorageLocation was provided in the CreateFpgaImage request, review the captured bitstream generation logs saved to S3 under the FpgaImageId for this AFI.*
- * **Note:** This is a catch-all error and could be caused due to a variety of issues, for eg:
- * We found a combinatorial loop in the CL design. Bitstream generation logs might show errors like `ERROR: [DRC LUTLP-1] Combinatorial Loop Alert: 2 LUT cells form a combinatorial loop.
- Combinatorial loops are not allowed in CL designs and AFI's are not generated in such a case.
+ *An unclassified error occurred generating the FPGA image bitstream. If an S3 LogsStorageLocation was provided in the CreateFpgaImage request, review the captured bitstream generation logs saved to S3 under the FpgaImageId for this AFI.*
## `delete-fpga-image`
diff --git a/hdk/docs/Amazon_FPGA_Images_Afis_Guide.md b/hdk/docs/Amazon_FPGA_Images_Afis_Guide.md
new file mode 100644
index 000000000..4161fff5b
--- /dev/null
+++ b/hdk/docs/Amazon_FPGA_Images_Afis_Guide.md
@@ -0,0 +1,147 @@
+# Amazon FPGA Images (AFIs) Guide
+
+## Overview
+
+Amazon FPGA Images (AFIs) are the compiled and encrypted FPGA designs that can be loaded onto AWS FPGA instances (F2). This guide explains how to create, manage, and understand AFIs in the AWS ecosystem.
+
+## What are AFIs and AGFIs?
+
+When you create an AFI, AWS provides two important identifiers:
+
+| Identifier | Scope | Usage | Example |
+|------------|-------|-------|---------|
+| **AFI ID** (`FpgaImageId`) | Regional | Managing AFIs via AWS EC2 CLI/SDK APIs | `afi-06d0ffc989feeea2a` |
+| **AGFI ID** (`FpgaImageGlobalId`) | Global | Loading AFIs onto FPGA slots from within instances | `agfi-0f0e045f919413242` |
+
+### Amazon FPGA Image (AFI)
+
+An AFI is a compiled, encrypted, and signed FPGA design that can be loaded onto AWS FPGA instances. AFIs are created from Design Checkpoint (DCP) files generated during the FPGA development process. An AFI ID is a regional identifier that changes when an AFI is copied across regions.
+
+### Amazon Global FPGA Image ID (AGFI)
+
+The AGFI is a **globally unique identifier** that references a specific AFI across all AWS regions enabling seamless AFI/AMI combinations. It's used by FPGA management tools within EC2 instances to load or manage AFIs on FPGA slots.
+
+## AFI Creation Methods
+
+### Prerequisites
+
+- Design Checkpoint (DCP) tarball file
+- Required AWS permissions
+
+### Option 1: Programmatic AFI Creation (Recommended)
+
+The AWS FPGA HDK provides a Python script for streamlined AFI creation once a DCP is generated. Developers can call [create_afi.py](../scripts/create_afi.py) (with required Python modules included in [start_venv.sh](../scripts/start_venv.sh)) without any arguments to interactively input their AFI parameters:
+
+```bash
+source $AWS_FPGA_REPO_DIR/hdk/scripts/start_venv.sh
+$AWS_FPGA_REPO_DIR/hdk/scripts/create_afi.py
+```
+
+Alternatively, developers can read more in the help menu on how to pass all parameters in together:
+
+```bash
+$AWS_FPGA_REPO_DIR/hdk/scripts/create_afi.py --help
+```
+
+### Option 2: Manual AFI Creation
+
+For more control over the AFI creation process, you can manually submit your DCP file using the AWS CLI tool.
+
+#### Step 1: Prepare Your Environment
+
+Set up your environment variables:
+
+```bash
+export DCP_BUCKET_NAME='your-dcp-bucket-name'
+export DCP_FOLDER_NAME='your-dcp-folder'
+export LOGS_BUCKET_NAME='your-logs-bucket-name'
+export LOGS_FOLDER_NAME='your-logs-folder'
+export REGION='aws-region-code-eg-us-east-1'
+export DCP_TARBALL_TO_INGEST='path/to/your/YYYY_MM_DD-HHMMSS.Developer_CL.tar'
+```
+
+**Note**: Confirm your region supports FPGA images by checking the [Amazon EC2 instance types by Region index](https://docs.aws.amazon.com/ec2/latest/instancetypes/ec2-instance-regions.html)
+
+#### Step 2: Create S3 Storage
+
+Create S3 buckets and upload your DCP file:
+
+```bash
+# Create S3 bucket for DCP files
+aws s3 mb s3://${DCP_BUCKET_NAME} --region ${REGION}
+
+# Create folder for DCP files
+aws s3 mb s3://${DCP_BUCKET_NAME}/${DCP_FOLDER_NAME}/
+
+# Upload DCP tarball to S3
+aws s3 cp ${DCP_TARBALL_TO_INGEST} s3://${DCP_BUCKET_NAME}/${DCP_FOLDER_NAME}/
+```
+
+Create storage for AFI creation logs:
+
+```bash
+# Create folder for logs
+aws s3 mb s3://${LOGS_BUCKET_NAME}/${LOGS_FOLDER_NAME}/ --region ${REGION}
+
+# Create placeholder file to establish the folder structure
+touch LOGS_FILES_GO_HERE.txt
+aws s3 cp LOGS_FILES_GO_HERE.txt s3://${LOGS_BUCKET_NAME}/${LOGS_FOLDER_NAME}/
+```
+
+**Important**: The trailing `/` is required after folder names in S3 paths.
+
+#### Step 3: Submit AFI Creation Request
+
+```bash
+export DCP_TARBALL_NAME=$(basename ${DCP_TARBALL_TO_INGEST})
+export CL_DESIGN_NAME='your-design-name'
+export CL_DESIGN_DESCRIPTION="Description of your FPGA design"
+
+# Submit AFI creation request
+aws ec2 create-fpga-image \
+ --name ${CL_DESIGN_NAME} \
+ --description "${CL_DESIGN_DESCRIPTION}" \
+ --input-storage-location Bucket=${DCP_BUCKET_NAME},Key=${DCP_FOLDER_NAME}/${DCP_TARBALL_NAME} \
+ --logs-storage-location Bucket=${LOGS_BUCKET_NAME},Key=${LOGS_FOLDER_NAME}/ \
+ --region ${REGION}
+
+# expected response format:
+{
+ "FpgaImageId": "afi-09953582f46c45b17",
+ "FpgaImageGlobalId": "agfi-0925b211f5a81b071"
+}
+```
+
+The [create-fpga-images API](https://docs.aws.amazon.com/cli/latest/reference/ec2/create-fpga-image.html#output) or [AWS CLI FPGA Commands](./AWS_CLI_FPGA_Commands.md) documentation can be used to interpret your results.
+
+## Monitoring AFI Creation
+
+### Check AFI Status
+
+Use the AFI ID returned by the `create-fpga-image` command to monitor the creation progress:
+
+```bash
+aws ec2 describe-fpga-images --fpga-image-ids afi-09953582f46c45b17 --region ${REGION}
+```
+
+The [describe-fpga-images API](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-fpga-images.html#output) or [AWS CLI FPGA Commands](./AWS_CLI_FPGA_Commands.md) documentation can be used to interpret your results.
+
+## Using AFIs in FPGA Instances
+
+Once your AFI is `available`, you can load it onto FPGA slots within F2 instances using the **AGFI ID**:
+
+```bash
+# Load AFI onto FPGA slot 0
+sudo fpga-load-local-image -S 0 -I agfi-0925b211f5a81b071
+
+# Verify AFI is loaded
+sudo fpga-describe-local-image -S 0
+```
+
+## Troubleshooting
+
+- **AFI creation fails**: Check the logs in your designated S3 logs folder
+- **S3 permissions**: Verify your AWS credentials have appropriate S3 and EC2 permissions
+- **DCP file format**: Ensure your DCP tarball follows [AWS FPGA HDK requirements](./../README.md#step-4-build-cl-design-check-point-dcp)
+- For any issues with the devkit documentation or code, please open a [GitHub issue](https://github.com/aws/aws-fpga/issues) with all steps to reproduce
+- For questions about F2 instances, please open a [re:Post issue with the 'FPGA Development' tag](https://repost.aws/tags/TAc7ofO5tbQRO57aX1lBYbjA/fpga-development)
diff --git a/hdk/docs/IPI-GUI-AWS-IP.md b/hdk/docs/IPI-GUI-AWS-IP.md
new file mode 100644
index 000000000..035458a51
--- /dev/null
+++ b/hdk/docs/IPI-GUI-AWS-IP.md
@@ -0,0 +1,49 @@
+# AWS FPGA IP for IP Integrator Overview
+
+## Table of Contents
+
+- [AWS FPGA IP for IP Integrator Overview](#aws-fpga-ip-for-ip-integrator-overview)
+ - [Table of Contents](#table-of-contents)
+ - [AWS IP Overview](#aws-ip-overview)
+ - [Enable IP Interfaces](#enable-ip-interfaces)
+ - [Clock Signals](#clock-signals)
+ - [CL Partition ID](#cl-partition-id)
+ - [Advanced](#advanced)
+
+## AWS IP Overview
+
+The AWS IP serves as a central component in the IP Integrator (IPI) designs, providing essential AXI interfaces (OCL, PCIS and PCIM) for Host-FPGA communication, configurable clock management through predefined recipes, and auxiliary signal ports like VLED/VDIP. It enables seamless integration between CL designs and the F2 Shell.
+
+To configure the AWS IP, double-click the AWS IP block in the 'Block Diagram'. The 'Re-customize IP' GUI displays four configuration categories.
+
+## Enable IP Interfaces
+
+Select the box to enable desired interfaces. The block diagram updates automatically to show enabled interfaces, ports, and clocks
+
+For details about the shell interface, see [AWS Shell Interface Specification](./AWS_Shell_Interface_Specification.md).
+
+
+
+## Clock Signals
+
+Review the [Clock Recipes User Guide](./Clock_Recipes_User_Guide.md) to determine the number of clocks needed for Groups A, B, and C, and select appropriate clock recipes for all CL clocks.
+
+
+
+***NOTE***: `clk_main_a0_out` is a required clock and cannot be disabled.
+
+***NOTE***: You must select 'Enable Ports for HBM in CL' in the 'Enable IP Interfaces' tab to see HBM AXI clock recipe options.
+
+## CL Partition ID
+
+The PCIe Vendor ID, Device ID, Subsystem Vendor ID and Subsystem ID can be configured. For now these default values typically match AWS examples and shouldn't be modified at this time.
+
+
+
+## Advanced
+
+Pipeline stages configuration:
+
+- Range: 1-4 pipeline stages
+- Applies to the `sh_cl_ddr_stat_` interface for DDR in the CL
+- Selection depends on design size and complexity
diff --git a/hdk/docs/IPI-GUI-Examples.md b/hdk/docs/IPI-GUI-Examples.md
new file mode 100644
index 000000000..83c589f5f
--- /dev/null
+++ b/hdk/docs/IPI-GUI-Examples.md
@@ -0,0 +1,167 @@
+# AWS GUI Workflow with Vivado IP Integrator Quick Start Examples
+
+## Table of Contents
+
+- [AWS GUI Workflow with Vivado IP Integrator Quick Start Examples](#aws-gui-workflow-with-vivado-ip-integrator-quick-start-examples)
+ - [Table of Contents](#table-of-contents)
+ - [Overview](#overview)
+ - [HLx Examples Using IP Integrator Flow](#hlx-examples-using-ip-integrator-flow)
+ - [Tutorial on How to Create HLx IPI Hello\_World Example with AXI GPIO and AXI BRAM](#tutorial-on-how-to-create-hlx-ipi-hello_world-example-with-axi-gpio-and-axi-bram)
+ - [Create Directory Structure and Vivado Project](#create-directory-structure-and-vivado-project)
+ - [Configure the Block Diagram](#configure-the-block-diagram)
+ - [Configure AWS IP](#configure-aws-ip)
+ - [Add and Configure AXI GPIO](#add-and-configure-axi-gpio)
+ - [Add/and Configure AXI BRAM](#addand-configure-axi-bram)
+ - [Connect the Design](#connect-the-design)
+ - [Address Editor Tab](#address-editor-tab)
+ - [Save and Validate the Design](#save-and-validate-the-design)
+ - [Add Simulation Sources from Example Design](#add-simulation-sources-from-example-design)
+ - [Run Simulation](#run-simulation)
+ - [Add Design Constraints](#add-design-constraints)
+ - [Implement the Design Tarball File](#implement-the-design-tarball-file)
+ - [CL Example Software](#cl-example-software)
+
+## Overview
+
+This document provides an overview of IP Integrator (IPI) examples in the HLx environment. Before starting, complete the [Vivado Setup Instructions](./IPI-GUI-Vivado-Setup.md) to familiarize yourself with the Vivado GUI and IP Integrator.
+
+All examples in this document have been integrated into an automated flow that directly generates Vivado projects.
+
+## HLx Examples Using IP Integrator Flow
+
+This section provides example designs to help you become familiar with the automated project generation flow and IP Integrator functionality
+
+Available examples are:
+
+- [hello_world](./../cl/examples/hello_world_hlx/README.md)
+- [hello_world_mb](./../cl/examples/hello_world_mb_hlx/README.md)
+- [cl_ipi_cdma_test](./../cl/examples/cl_ipi_cdma_test_hlx/README.md)
+
+Click any example link above for detailed design information and getting started instructions.
+
+## Tutorial on How to Create HLx IPI Hello_World Example with AXI GPIO and AXI BRAM
+
+This tutorial demonstrates how to configure AWS IP with the OCL interface (AXI4-Lite Master) and the PCIS interface (AXI4 Master), similar to the ones in the [hello_world](./../cl/examples/hello_world_hlx/README.md) example.
+
+The AXI GPIO IP controls the virtual LEDs (VLEDs). Writing `0xAAAA` to the AXI GPIO (0x0) slave register drives VLEDs. The VLED value can be read using the verilog task `tb.get_virtual_led` in simulation or `fpga-get-virtual-led` on an F2 instance.
+
+The PCIS interface accesses the AXI BRAM, where the ASCII string 'Hello World!' can be written to a BRAM location and read back for display in either the simulation environment or on an F2 instance.
+
+### Create Directory Structure and Vivado Project
+
+Change directories to `hdk/cl/examples`
+
+Create a directory in examples like `hello_world_hlx_ipi`
+
+Change directories into `hello_world_hlx_ipi/`
+
+Start Vivado by typing `vivado` in the bash console.
+
+Create a project any device by typing the following command in Vivado's TCL Tab.
+
+```Tcl
+create_project -name hello_world
+```
+
+Enter the following Tcl command to configure AWS project settings and create a block diagram with AWS IP:
+
+```Tcl
+aws::make_ipi
+```
+
+***NOTE***: The AWS IP instance name may incorrectly display as `f1_inst` due to a known Vivado issue. To manually change it to f2_inst, click the instance box to highlight it and change the name field in 'Block Propertie' window.
+
+### Configure the Block Diagram
+
+#### Configure AWS IP
+
+Configure the AWS IP block by double-clicking it and selecting three interfaces under 'IP Interfaces': 'Use OCL Register Interface (M_AXI_OCL)', 'Use PCI Slave-access Interface (M_AXI_PCIS)', and 'Use Auxiliary (non-AXI) Signal Ports'. For clock configuration, use Group-A Clock with the default clock recipe to set a 250 MHz frequency.
+
+#### Add and Configure AXI GPIO
+
+Right-click in the canvas and select 'Add IP...', then search for and double-click 'AXI GPIO'. Once added, double-click the axi_gpio_0 block in the canvas. In the 'Re-customize IP' dialog box, select 'All Outputs' under the GPIO section and set GPIO Width to 16, then click 'OK'.
+
+#### Add/and Configure AXI BRAM
+
+Right-click in the canvas and select 'Add IP...', then search for and double-click 'AXI BRAM Controller'. Once added, double-click the `axi_bram_ctrl_0` block in the canvas and set the Data Width to 512 to match the PCIS AXI4 Master Interface's data width, then click 'OK'.
+
+#### Connect the Design
+
+Click 'Run Connection Automation' at the top of the Block Diagram (green highlighted section). Configure the AXI BRAM controller by setting both `axi_bram_ctrl_0/BRAM_PORTA` and `BRAM_PORTB` to 'Auto', then set `axi_bram_ctrl_0/S_AXI` Master to `f2_inst/M_AXI_PCIS` with remaining options as 'Auto'. For the AXI GPIO, set `axi_gpio_0/S_AXI` Master to `f2_inst/M_AXI_OCL` with other options as 'Auto', then click 'OK'.
+
+After completing the automation, expand `axi_gpio_0/GPIO` by clicking the + symbol. Connect `gpio_io_o[15:0]` from the `f2_inst` block to `status_vled[15:0]`, then run 'Connection Automation'.
+
+#### Address Editor Tab
+
+In the 'Address Editor' tab above the block diagram, you can view the address configurations: the AXI BRAM instance has a default 64K address space starting at `0xC0000000` (adjustable by modifying the Range value), while the AXI GPIO instance uses a 4K address space with M_AXI_OCL starting at `0x00000000`.
+
+#### Save and Validate the Design
+
+Save the block diagram, then select 'Tools' -> 'Validate Design' and click 'OK' when validation completes successfully.
+
+### Add Simulation Sources from Example Design
+
+To add simulation sources, navigate to 'Project Manager' in the 'Flow Navigator' and select 'Add Sources' -> 'Add or create simulation sources' -> 'Select Add Files'. Add the test file `test_cl.sv` from `hdk/common/shell_stable/hlx/hlx/hlx_examples/build/IPI//verif/` directory, and ensure you deselect the option to scan and add RTL include files.
+
+Configure the following simulation settings to import source files from external directories instead of copying them to the Vivado project:
+
+1. Source file options:
+ - Deselect 'Copy sources into project' (creates links instead)
+ - Select 'Add sources from subdirectories'
+ - Enable 'Include all design sources for simulation'
+ - Click 'Finish'
+
+2. Simulation settings:
+ - Right-click 'SIMULATION' in Project Manager
+ - Select 'Simulation Settings'
+ - In Verilog options, click the '...' box
+ - Verify/update the following:
+ - CL_NAME=cl_top
+ - TEST_NAME=test_cl
+ - Click 'OK'
+ - Click 'Apply'
+ - Click 'OK' to return to Vivado project
+
+#### Run Simulation
+
+From the 'Flow Navigator' tab, select 'Simulation' -> 'Run Simulation' -> 'Run Behavioral Simulation', then add your required simulation signals. In the Tcl console, enter the following command.
+
+```Tcl
+run -all
+```
+
+Note: If critical warnings appear, click 'OK' and run the command twice (this is a known issue that will be addressed in future versions).
+
+### Add Design Constraints
+
+No additional constraints are needed for this design.
+
+### Implement the Design Tarball File
+
+To implement the design, launch implementation:
+
+- Right-click 'impl_1'
+- Select 'Launch Runs...'
+- Click 'OK'
+- Click 'OK' on the 'Missing Synthesis Results' dialog
+
+This process will run both synthesis and implementation.
+
+The completed tarball file is located in:
+
+```bash
+/example_projects/.runs/faas_1/build/checkpoints/to_aws/.Developer_CL.tar
+```
+
+For instructions on creating an F2 AFI from the design tarball, see [Submit Generated DCP for AFI Creation](./../README.md#step-6-submit-generated-dcp-for-afi-creation) in the HDK quick start guide.
+
+### CL Example Software
+
+Compile the runtime software required for F2 instance execution by copying the software directory to your target location and running these commands:
+
+```bash
+cp -r $HDK_COMMON_DIR/shell_stable/hlx/hlx_examples/build/IPI/hello_world/software .
+cd software
+make all
+sudo ./test_cl
+```
diff --git a/hdk/docs/IPI-GUI-Flows.md b/hdk/docs/IPI-GUI-Flows.md
new file mode 100644
index 000000000..e48ca4f62
--- /dev/null
+++ b/hdk/docs/IPI-GUI-Flows.md
@@ -0,0 +1,83 @@
+# HLx GUI Flows with Vivado IP Integrator
+
+## Table of Contents
+
+- [HLx GUI Flows with Vivado IP Integrator](#hlx-gui-flows-with-vivado-ip-integrator)
+ - [Table of Contents](#table-of-contents)
+ - [Overview](#overview)
+ - [Create IP Integrator Project with Example Design](#create-ip-integrator-project-with-example-design)
+ - [Setup HLx Environment](#setup-hlx-environment)
+ - [Create Design](#create-design)
+ - [Run Simulation](#run-simulation)
+ - [Run Implementation](#run-implementation)
+ - [AFI Creation](#afi-creation)
+ - [Runtime Example](#runtime-example)
+
+## Overview
+
+This document covers top level steps for using the HLx GUI flows.
+
+## Create IP Integrator Project with Example Design
+
+This section specifies the end-to-end flow for creating a pre-defined IPI example design and executing it on an F2 instance.
+
+### Setup HLx Environment
+
+Clone the `aws-fpga` repository and follow the [Vivado HLx Setup Instructions](./IPI-GUI-Vivado-Setup.md) to set up the HLx environemnt.
+
+### Create Design
+
+- To launch Vivado GUI
+ - Change to the `hdk/cl/examples/` directory, e.g. `hdk/cl/examples/hello_world_hlx`
+ - Invoke Vivado by typing `vivado` in the console
+ - In the Vivado Tcl console type in the following to create the HLx example.
+
+ ```Tcl
+ aws::make_ipi -examples
+ ```
+
+ ***NOTE***: See what examples are possible, type `aws::make_ipi -examples` into Tcl console.
+ ***NOTE***: IPI example design names do not include `_hlx`, which differs from the CL name ``.
+
+ - The example will be generated in `cl/examples//example_projects`. The Vivado project is `examples_projects/.xpr`
+ - Once the Block diagram is opened, review the different IP blocks especially the settings in the AWS IP
+
+### Run Simulation
+
+The simulation settings are already configured.
+
+- To launch simulation from within the Vivado GUI
+ - Click on 'SIMULATION' -> 'Run Simulation' -> 'Run Behavioral Simulation'
+ - Add signals needed in the simulation
+ - Type `run -all` in the Tcl console
+
+### Run Implementation
+
+- To run implementation from within the GUI is opened, in the Design Runs tab:
+ - Right click on 'impl_1' in the Design Runs tab and select Launch Runs…
+ - Click 'OK' in the Launch Runs Dialog Box.
+ - Click 'OK' in the Missing Synthesis Results Dialog Box
+
+- This step will run both synthesis and implementation.
+
+### AFI Creation
+
+The completed tarball file for a successfully implemented example design can be found in:
+
+```bash
+$CL_DIR/build/scripts/example_projects/.runs/faas_1/build/checkpoints/to_aws/.Developer_CL.tar
+```
+
+For information on how to create AFI from this tarball file, follow the [Submit Generated DCP for AFI Creation](./../README.md#step-6-submit-generated-dcp-for-afi-creation) section in the HDK step-by-step quick start guide.
+
+### Runtime Example
+
+The runtime software must be compiled before the AFI can run on F2 instances. Copy the example's software directory to your preferred location and compile it using the following commands:
+
+```bash
+source $AWS_FPGA_REPO_DIR/sdk_setup.sh
+cp -r $HDK_COMMON_DIR/shell_stable/hlx/hlx_examples/build/IPI//software
+cd software
+make all
+sudo ./test_cl
+```
diff --git a/hdk/docs/IPI-GUI-Vivado-Setup.md b/hdk/docs/IPI-GUI-Vivado-Setup.md
new file mode 100644
index 000000000..6ab6e8089
--- /dev/null
+++ b/hdk/docs/IPI-GUI-Vivado-Setup.md
@@ -0,0 +1,205 @@
+# Vivado IP Integrator Setup
+
+## Table of Contents
+
+- [Vivado IP Integrator Setup](#vivado-ip-integrator-setup)
+ - [Table of Contents](#table-of-contents)
+ - [Overview](#overview)
+ - [Installation in Linux](#installation-in-linux)
+ - [Switching between HDK and HLx flows](#switching-between-hdk-and-hlx-flows)
+ - [Vivado Overview](#vivado-overview)
+ - [Sources Tab](#sources-tab)
+ - [Hierarchy Tab](#hierarchy-tab)
+ - [IP Sources Tab](#ip-sources-tab)
+ - [Flow Navigator](#flow-navigator)
+ - [PROJECT MANAGER](#project-manager)
+ - [IP INTEGRATOR](#ip-integrator)
+ - [SIMULATION](#simulation)
+ - [RTL ANALYSIS](#rtl-analysis)
+ - [SYNTHESIS](#synthesis)
+ - [IMPLEMENTATION](#implementation)
+ - [TCL Commands](#tcl-commands)
+ - [Design Runs Tab](#design-runs-tab)
+ - [Vivado Flows Overview](#vivado-flows-overview)
+ - [IP Integration flow](#ip-integration-flow)
+ - [General Environment](#general-environment)
+ - [Design Constraints in Project](#design-constraints-in-project)
+ - [Synthesis/Implementation](#synthesisimplementation)
+ - [Next Steps](#next-steps)
+
+## Overview
+
+This document assumes you have cloned the developer kit and sourced the [`hdk_setup.sh`](./../../hdk_setup.sh). It is highly recommended that you get familiar with the HDK development flow by following the [step-by-step quick start guide for customer hardware development](./../README.md) prior to using the Vivado IP Integrator (IPI).
+
+After you become familiar with building an example AFI and running it on F2 instances, refer to [IP Integrator Quick Start Examples](./IPI-GUI-Examples.md) documentation for help with example designs, new designs, and additional tutorials.
+
+## Installation in Linux
+
+Using a text editor, open either `~/.Xilinx/Vivado/init.tcl` or `~/.Xilinx/Vivado/Vivado_init.tcl`. If neither files exists, run the following command to create one under `~/.Xilinx/Vivado/`.
+
+``` bash
+touch Vivado_init.tcl
+```
+
+To get the absolute path of `$HDK_SHELL_DIR` , use this command:
+
+``` bash
+echo $HDK_SHELL_DIR
+```
+
+**NOTE**: If your `$HDK_SHELL_DIR` is empty or does not display when echoed, you need to source the [hdk_setup.sh](../../../../../hdk_setup.sh).
+
+In `init.tcl` or `Vivado_init.tcl`, append the following lines based upon the `$HDK_SHELL_DIR` path to the end of the file.
+
+``` bash
+set shell small_shell
+source $::env(HDK_SHELL_DIR)/hlx/hlx_setup.tcl
+```
+
+**NOTE**: A `shell` variable must be specified for the flow to pair the customer design with the correct shell variant. Valid values are `xdma_shell` (coming soon) or `small_shell`.
+
+### Switching between HDK and HLx flows
+
+- Vivado automatically sources either `~/.Xilinx/Vivado/init.tcl` or `~/.Xilinx/Vivado/Vivado_init.tcl` at startup. After completing the setup steps above, the IPI features will load automatically each time you launch Vivado.
+
+- To switch back to the HDK flow, please remove the `source $::env(HDK_SHELL_DIR)/hlx/hlx_setup.tcl` line from your `init.tcl` or `Vivado_init.tcl` file.
+
+## Vivado Overview
+
+This section provides a basic overview of the Vivado GUI. The GUI environment enables developers of all experience levels to:
+
+- Quickly set project options and strategies to meet design requirements
+- Access interactive reports and design views
+- Efficiently resolve timing and area issues
+
+The IP Integrator (IPI) is a design entry tool in the Vivado HLx Design Suite. It allows developers to connect IPs at a block level and generates 'what you see is what you get' RTL files in either VHDL or Verilog format. The IPI flow enhances the standard RTL flow by providing designer assistance features, including:
+
+- Simplified connectivity of IPs through interface-based connections
+- Block automation that adds helper IPs (such as interconnects, DMAs, and other support blocks) based on IP configuration
+- Connectivity automation for routing interfaces, clocks, and resets between blocks
+- Design Rule Checks (DRCs) for ensuring proper interface connectivity and clock domain crossing
+- Advanced hardware debug capabilities enabling transaction-level debugging
+
+For detailed information and design methodology guidelines, refer to the following documentation:
+
+- [Vivado Design Suite User Guide (UG892)](https://docs.amd.com/r/en-US/ug892-vivado-design-flows-overview)
+- [Designing IP Sybsystems UsingIP Integrator (UG994)](https://docs.amd.com/r/en-US/ug994-vivado-ip-subsystems)
+- [UltraFast Design Methodology Guide for FPGAs and SoCs (UG949)](https://docs.amd.com/r/en-US/ug949-vivado-design-methodology)
+
+To open the GUI, run command `vivado`. After Vivado loads, create an empty project by selecting `Create New Project` and following the prompts until you see a blank canvas. The sections below describe the tabs and menus, refer to the screenshot below. Exploring these tabs and menus in your blank project is encouraged.
+
+
+
+### Sources Tab
+
+The box in yellow contains the design sources.
+
+#### Hierarchy Tab
+
+The 'Sources' tab is divided into three different categories.
+
+1. Design Sources: contains synthesis/implementation sources
+2. Constraints: contains timing constraint (XDC) files
+3. Simulation Sources: contains simulation-only sources
+
+Clicking on a file displays its information in the 'Properties' tab (under 'Sources'). In this tab, you can specify how the file is used in the design flow:
+
+- RTL/IP sources are typically marked for:
+ - Synthesis, implementation, simulation
+ - Synthesis, implementation
+ - Simulation
+
+- XDC files are typically marked for:
+ - Synthesis, implementation
+ - Synthesis
+ - Implementation
+
+#### IP Sources Tab
+
+When an IP is added to your project, the 'IP Sources' tab becomes visible. This tab contains imported IP sources.
+
+### Flow Navigator
+
+The 'Flow Navigator', located in the green box, allows you to launch predefined design flow steps, such as synthesis and implementation.
+
+#### PROJECT MANAGER
+
+The 'PROJECT MANAGER' section allows you to add sources (RTL, IP, and XDC files), access Language Templates for common RTL constructs, XDCs and DEBUG, and use IP Catalog to add IPs to the project. This portion targets the RTL flow.
+
+The IP Catalog allows you to search for specific IPs or browse through IP categories. When using IP Catalog, you are responsible for adding and connecting the IP to your RTL design.
+
+#### IP INTEGRATOR
+
+This section allows you to open and modify the 'Block Design' and generate the 'Block Design' after validation.
+
+***Note***: The HLx flow pre-creates the 'Block Design' framework with AWS IP and board, so 'Create Block Design' is not necessary.
+
+Double-clicking an IP in the 'Block Design' opens the 'Re-customize IP' dialog box, where you can review or modify IP settings. When connecting designs, you can use 'Run Connection Automation' to automatically connect interfaces.
+
+#### SIMULATION
+
+In this section, you can modify simulation settings by right-clicking 'SIMULATION'. To run a simulation, select 'Run Simulation' → 'Run Behavioral Simulation'.
+
+#### RTL ANALYSIS
+
+Clicking 'Open Elaborate Design' analyzes the RTL files, allowing you to verify RTL structures and syntax before synthesis.
+
+#### SYNTHESIS
+
+Right-clicking 'SYNTHESIS' allows you to view synthesis settings and launch a synthesis run. After synthesis completes, click 'Open Synthesized Design' to access the post-synthesis checkpoint for analysis. This stage is crucial for developing timing constraints for the CL.
+
+#### IMPLEMENTATION
+
+Right-clicking 'IMPLEMENTATION' allows you to view implementation settings and launch an implementation run. After implementation completes, click 'Open Implemented Design' to access the post-implementation checkpoint for analysis of the SH (Shell) and CL (Custom Logic).
+
+### TCL Commands
+
+The orange box is where you enter Tcl commands. The 'Tcl Console' tab above displays the command outputs.
+
+### Design Runs Tab
+
+The 'Design Runs' are located in the blue box. This area provides functionality similar to the 'SYNTHESIS' and 'IMPLEMENTATION' sections in the 'Flow Navigator'. The examples and tutorials demonstrate how to use 'synth_1' and 'impl_1' runs to build your design.
+
+## Vivado Flows Overview
+
+The Vivado HLx environment supports IP Integrator (IPI) flow. This section provides a top-level overview of these flows. For detailed information, see [HLx GUI Flows with Vivado IP Integrator](./IPI-GUI-Flows.md).
+
+### IP Integration flow
+
+You can easily create a full design by adding Vivado IP to the block diagram. Use RTL module referencing to add custom RTL as IP to the block diagram. This flow supports both RTL and IP additions as IP blocks. Find examples in the [IP Integrator Quick Start Examples](./IPI-GUI-Examples.md#hlx-ipi-examples-using-ip-integration-flow).
+
+
+
+### General Environment
+
+#### Design Constraints in Project
+
+Top-level clocks from the Shell are provided for synthesis in:
+
+- cl_clocks_aws.xdc – Top-level clock constraints for the CL
+
+The following files are available for adding custom constraints:
+
+- cl_synth_user.xdc – User synthesis constraints
+- cl_pnr_user.xdc – User timing and floorplanning constraints
+
+#### Synthesis/Implementation
+
+By default, synthesis is using the `Default` directive and all implementation steps are using the `Explore` directive.
+
+To modify implementation settings, right-click 'IMPLEMENTATION', click 'Implementation Settings...' and selection the 'Implementation' option in 'Project Settings'. Modify directives only for
+
+- opt_design
+- place_design
+- phys_opt_design
+- route_design
+
+NOTE: Do not change the `Strategy` option, as this will override HLx environment settings.
+
+For getting started, refer to [IP Integrator Quick Start Examples](./IPI-GUI-Examples.md).
+
+## Next Steps
+
+1. Review the [AWS IP](./IPI-GUI-AWS-IP.md) documentation to familiarize yourself with shell features available in the IPI environment.
+2. Test [building an IPI example design in Vivado GUI](./IPI-GUI-Flows.md)
+3. Proceed to the [IPI Quick Start Examples](./IPI-GUI-Examples.md) for guidance on creating example designs, developing new designs and following additional tutorials.
diff --git a/hdk/docs/Virtual_JTAG_XVC.md b/hdk/docs/Virtual_JTAG_XVC.md
index 457f7c525..bd43903ac 100644
--- a/hdk/docs/Virtual_JTAG_XVC.md
+++ b/hdk/docs/Virtual_JTAG_XVC.md
@@ -61,7 +61,7 @@ To begin debugging a CL design, the developer must first install the XVC driver
:warning: Developers may encounter a compilation error in some operating systems due to driver incompatibility, like this:
```bash
-/home/ubuntu/driver_v0.4/xvc_pcie_driver_base.c:306:25: error: too many arguments to function ‘class_create’
+/home//driver_v0.4/xvc_pcie_driver_base.c:306:25: error: too many arguments to function ‘class_create’
306 | xvc_dev_class = class_create(THIS_MODULE, "xil_xvc_class");
```
diff --git a/hdk/scripts/create_afi.py b/hdk/scripts/create_afi.py
new file mode 100755
index 000000000..5e6ed5a9f
--- /dev/null
+++ b/hdk/scripts/create_afi.py
@@ -0,0 +1,567 @@
+#!/usr/bin/env python3
+
+# =============================================================================
+# Amazon FPGA Hardware Development Kit
+#
+# Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+#
+# Licensed under the Amazon Software License (the "License"). You may not use
+# this file except in compliance with the License. A copy of the License is
+# located at
+#
+# http://aws.amazon.com/asl/
+#
+# or in the "license" file accompanying this file. This file is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
+# implied. See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+
+"""
+AFI Creation Helper Script
+
+Interactive tool to create Amazon FPGA Images (AFIs) from Design Checkpoint (DCP) files.
+Guides users through region selection, S3 setup, DCP selection, and AFI creation.
+"""
+
+import argparse
+import glob
+import json
+import os
+import re
+import subprocess
+import sys
+import tarfile
+import time
+import traceback
+from datetime import datetime
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Tuple
+
+import boto3
+from mypy_boto3_ec2.client import EC2Client
+from mypy_boto3_ec2.type_defs import CreateFpgaImageResultTypeDef
+from mypy_boto3_s3.client import S3Client
+from mypy_boto3_s3.type_defs import ListObjectsV2OutputTypeDef
+from pydantic import BaseModel, Field, field_validator
+
+
+DEFAULT_POLL_INTERVAL = 300
+
+
+class AfiMetadata(BaseModel):
+ name: str = Field(..., min_length=1, max_length=255)
+ description: str = Field(..., min_length=1, max_length=255)
+ dcp_path: str = Field(...)
+ bucket: str = Field(...)
+ dcp_s3_path: str = Field(...)
+ logs_s3_path: str = Field(...)
+ region: str = Field(...)
+
+ @field_validator("bucket")
+ @classmethod
+ def validate_bucket_name(cls, v: str) -> str:
+ if not re.match(r"^[a-z0-9][a-z0-9\-]*[a-z0-9]$", v) or not (3 <= len(v) <= 63):
+ raise ValueError("Invalid S3 bucket name format")
+ return v
+
+ @field_validator("dcp_path")
+ @classmethod
+ def validate_dcp_file(cls, v: str) -> str:
+ print(f"🔎 Validating DCP file is a proper tar archive: {v}")
+ if not v.lower().endswith(".tar"):
+ raise ValueError("DCP file must have .tar extension")
+
+ try:
+ file_size = os.path.getsize(v)
+ if file_size == 0:
+ raise ValueError("DCP file is empty")
+
+ with tarfile.open(v, "r") as tar:
+ if not tar.getnames():
+ raise ValueError("DCP tar file contains no files")
+
+ except (OSError, tarfile.TarError) as e:
+ raise ValueError(f"Invalid DCP file: {e}")
+
+ print(f"✓ DCP file validation passed: {v} ({file_size / (1024 * 1024):.1f}MB)")
+ return v
+
+ def get_create_args(self) -> Dict[str, Any]:
+ return {
+ "InputStorageLocation": {"Bucket": self.bucket, "Key": self.dcp_s3_path},
+ "LogsStorageLocation": {"Bucket": self.bucket, "Key": self.logs_s3_path},
+ "Name": self.name,
+ "Description": self.description,
+ }
+
+
+class RegionManager:
+ CACHE_FILE = Path.home() / ".aws" / "fpga_regions_cache.json"
+ CACHE_TTL = 24 * 60 * 60 # 24 hours
+ KNOWN_F2_REGIONS = ["us-east-1", "us-west-2", "ap-southeast-2", "eu-west-2"]
+ F2_INSTANCE_TYPES = ["f2.6xlarge", "f2.12xlarge", "f2.48xlarge"]
+
+ @staticmethod
+ def get_supported_regions() -> List[str]:
+ # Get the regions from the cache if it's less than 24 hours old
+ try:
+ if (
+ RegionManager.CACHE_FILE.exists()
+ and (time.time() - RegionManager.CACHE_FILE.stat().st_mtime) < RegionManager.CACHE_TTL
+ ):
+ with RegionManager.CACHE_FILE.open("r") as f:
+ regions = json.load(f)["regions"]
+ if regions:
+ return regions
+ except Exception:
+ pass
+
+ # Get regions from AWS or fallback to known list
+ regions = (
+ RegionManager._get_current_f2_region_list()
+ if boto3.Session().get_credentials()
+ else RegionManager.KNOWN_F2_REGIONS
+ )
+
+ # Save the latest regions in the cache
+ try:
+ RegionManager.CACHE_FILE.parent.mkdir(parents=True, exist_ok=True)
+ with RegionManager.CACHE_FILE.open("w") as f:
+ json.dump({"regions": regions, "timestamp": time.time()}, f)
+ except Exception:
+ pass
+
+ return regions
+
+ @staticmethod
+ def _get_current_f2_region_list() -> List[str]:
+ print("🔍 Discovering F2 instance supported regions via AWS API...")
+ regions: List[str] = []
+ for region in boto3.Session().get_available_regions("ec2"):
+ try:
+ ec2 = boto3.client("ec2", region_name=region)
+ if ec2.describe_instance_type_offerings(
+ Filters=[{"Name": "instance-type", "Values": RegionManager.F2_INSTANCE_TYPES}]
+ )["InstanceTypeOfferings"]:
+ regions.append(region)
+ print(f" ✓ Found F2 support in {region}")
+ except Exception:
+ continue
+ return regions
+
+ @staticmethod
+ def validate_region_supports_f2(region: str) -> None:
+ if region not in RegionManager.get_supported_regions():
+ raise ValueError(
+ f"Region '{region}' does not support F2 instances.\n"
+ f"Supported regions: {', '.join(RegionManager.get_supported_regions())}"
+ )
+
+
+class UserInterface:
+ @staticmethod
+ def get_choice_from_options(prompt: str, options: List[str], default: int = 0) -> int:
+ print(f"\n{prompt}")
+ [print(f"{i + 1}) {opt}") for i, opt in enumerate(options)]
+
+ while True:
+ try:
+ choice = input(f"Choice [{default + 1}]: ").strip() or str(default + 1)
+ idx = int(choice) - 1
+ if 0 <= idx < len(options):
+ return idx
+ print("Invalid choice.")
+ except ValueError:
+ print("Enter a number.")
+
+ @staticmethod
+ def get_input(prompt: str) -> str:
+ while True:
+ value = input(f"\n{prompt}").strip()
+ if value:
+ return value
+ print("Input cannot be empty.")
+
+ @staticmethod
+ def confirm(message: str) -> bool:
+ return UserInterface.get_choice_from_options(message, ["Yes", "No"], default=1) == 0
+
+
+class DCPDiscovery:
+ @staticmethod
+ def find_hdk_dir() -> Optional[str]:
+ if os.environ.get("HDK_DIR"):
+ return os.environ.get("HDK_DIR")
+
+ try:
+ result = subprocess.run(["git", "rev-parse", "--show-toplevel"], capture_output=True, text=True, check=True)
+ hdk_path = os.path.join(result.stdout.strip(), "hdk")
+ if os.path.isdir(hdk_path):
+ print(f"✓ Found HDK directory: {hdk_path}")
+ return hdk_path
+ except (subprocess.CalledProcessError, FileNotFoundError):
+ pass
+
+ return DCPDiscovery.search_for_repo_root_from_current_script_dir()
+
+ @staticmethod
+ def search_for_repo_root_from_current_script_dir() -> Optional[str]:
+ current_path = Path(__file__).resolve().parent
+ while current_path != current_path.parent:
+ hdk_dir = current_path / "hdk"
+ if (current_path / "hdk_setup.sh").is_file() and (hdk_dir).is_dir():
+ print(f"✓ Found HDK directory via repo root: {hdk_dir}")
+ return str(hdk_dir)
+ current_path = current_path.parent
+ print("⚠️ Could not find an HDK directory", file=sys.stderr)
+ return None
+
+ def find_dcp_files_in_hdk_workspace(self) -> List[Tuple[str, str]]:
+ hdk_dir = DCPDiscovery.find_hdk_dir()
+ if not hdk_dir:
+ return []
+
+ dcp_paths = glob.glob(os.path.join(hdk_dir, "cl", "examples", "*", "build", "checkpoints", "*.tar"))
+ return [(path, self._create_display_name(path)) for path in sorted(dcp_paths)]
+
+ def _create_display_name(self, path: str) -> str:
+ name = os.path.basename(path)
+ info = [name]
+
+ date_match = re.search(r"(\d{4}_\d{2}_\d{2}-\d{6})", name)
+ if date_match:
+ try:
+ build_date = datetime.strptime(date_match.group(1), "%Y_%m_%d-%H%M%S")
+ info.append(f"Built: {build_date.strftime('%b %d, %Y at %H:%M')}")
+ except ValueError:
+ pass
+
+ if os.path.exists(path):
+ size_mb = os.path.getsize(path) / (1024 * 1024)
+ info.append(f"Size: {size_mb:.1f}MB")
+
+ return f"{info[0]} ({', '.join(info[1:])})" if len(info) > 1 else info[0]
+
+ def get_dcp_path_interactive(self) -> str:
+ if (
+ UserInterface.get_choice_from_options(
+ "Would you like to scan for DCP files in your HDK workspace?",
+ ["Yes, scan automatically", "No, I'll provide the path manually"],
+ )
+ != 0
+ ):
+ return UserInterface.get_input("Please enter the path to your DCP file: ")
+
+ dcp_files = self.find_dcp_files_in_hdk_workspace()
+ if not dcp_files:
+ return UserInterface.get_input("No DCP files found. Please enter the path: ")
+
+ options = [f"{display}\n Path: {path}" for path, display in dcp_files]
+ options.append("Other path (specify manually)")
+
+ idx = UserInterface.get_choice_from_options("Select DCP file from your HDK workspace:", options)
+ return dcp_files[idx][0] if idx < len(dcp_files) else UserInterface.get_input("Enter DCP file path: ")
+
+
+class S3Manager:
+ def __init__(self, region: str):
+ self.region = region
+ self.s3_client: S3Client = boto3.client("s3")
+
+ def get_regional_buckets(self) -> List[str]:
+ buckets: List[str] = []
+ for bucket_name in [b.get("Name", "") for b in self.s3_client.list_buckets()["Buckets"]]:
+ try:
+ location = self.s3_client.get_bucket_location(Bucket=bucket_name).get("LocationConstraint")
+ if location == self.region or (location is None and self.region == "us-east-1"):
+ buckets.append(bucket_name)
+ except Exception:
+ continue
+ return buckets
+
+ def create_bucket(self, bucket_name: str) -> None:
+ kwargs: Dict[str, Any] = {"Bucket": bucket_name}
+ if self.region != "us-east-1":
+ kwargs["CreateBucketConfiguration"] = {"LocationConstraint": self.region}
+ self.s3_client.create_bucket(**kwargs)
+ print(f"✓ Created S3 bucket: {bucket_name}")
+
+ def ensure_folder_exists(self, bucket: str, folder_path: str) -> None:
+ folder_path = folder_path.rstrip("/") + "/"
+ if not self.s3_client.list_objects_v2(Bucket=bucket, Prefix=folder_path).get("Contents"):
+ self.s3_client.put_object(Bucket=bucket, Key=folder_path)
+ print(f"✓ Created S3 folder: s3://{bucket}/{folder_path}")
+
+ def upload_file(self, local_path: str, bucket: str, key: str) -> None:
+ print(f"Uploading: {local_path} -> s3://{bucket}/{key}")
+ self.s3_client.upload_file(local_path, bucket, key)
+ print("✓ Upload completed")
+
+ def get_bucket_interactive(self) -> str:
+ buckets = self.get_regional_buckets()
+ options = buckets + ["Create new bucket"]
+ idx = UserInterface.get_choice_from_options(f"Select {self.region} S3 bucket:", options, default=len(buckets))
+
+ if idx < len(buckets):
+ return buckets[idx]
+
+ bucket_name = UserInterface.get_input("Enter new bucket name: ")
+ self.create_bucket(bucket_name)
+ return bucket_name
+
+ def get_s3_paths_interactive(self, bucket: str) -> Tuple[str, str]:
+ msg = "Store DCP file and logs in the same directory?"
+ same_dir = UserInterface.get_choice_from_options(msg, ["Yes, same directory", "No, separate directories"]) == 0
+
+ if same_dir:
+ base_path = self._get_s3_base_path(bucket)
+ return base_path, base_path
+
+ print("\nConfiguring DCP storage path:")
+ dcp_path = self._get_s3_base_path(bucket)
+ print("\nConfiguring logs storage path:")
+ logs_path = self._get_s3_base_path(bucket)
+ return dcp_path, logs_path
+
+ def _get_s3_base_path(self, bucket: str) -> str:
+ response: ListObjectsV2OutputTypeDef = self.s3_client.list_objects_v2(Bucket=bucket, Delimiter="/", MaxKeys=10)
+ folders = [p.get("Prefix", "").rstrip("/") for p in response.get("CommonPrefixes", [])]
+
+ if folders:
+ options = folders + ["Enter custom path"]
+ idx = UserInterface.get_choice_from_options(
+ f"Select folder in bucket '{bucket}':", options, default=len(folders)
+ )
+ if idx < len(folders):
+ return folders[idx]
+
+ return UserInterface.get_input("Enter S3 path (without leading/trailing slashes): ").strip("/")
+
+
+class AFICreator:
+ def __init__(self, region: str, interactive: bool = True):
+ self.region = region
+ self.interactive = interactive
+ self.s3_manager = S3Manager(region)
+ self.dcp_discovery = DCPDiscovery()
+ self.ec2_client: EC2Client = boto3.client("ec2", region_name=region)
+
+ def create_afi(
+ self, afi_data: Dict[str, str], create_bucket: bool = False, poll_interval: Optional[int] = None
+ ) -> CreateFpgaImageResultTypeDef:
+ complete_data = self._complete_afi_data(afi_data)
+ afi_metadata = AfiMetadata(**complete_data)
+
+ self._prepare_s3_resources(afi_metadata, create_bucket)
+
+ if self.interactive:
+ self._confirm_operations(afi_metadata)
+
+ result = self.ec2_client.create_fpga_image(**afi_metadata.get_create_args())
+ print("✓ AFI creation started successfully!")
+ print(f" AFI ID: {result['FpgaImageId']}")
+ print(f" AGFI ID: {result['FpgaImageGlobalId']}")
+
+ if poll_interval:
+ self._handle_polling(result["FpgaImageId"], poll_interval)
+ return result
+
+ def _complete_afi_data(self, data: Dict[str, str]) -> Dict[str, str]:
+ if not data.get("name") and self.interactive:
+ data["name"] = UserInterface.get_input("Enter AFI name: ")
+
+ if not data.get("description") and self.interactive:
+ data["description"] = UserInterface.get_input("Enter AFI description: ")
+
+ if not data.get("dcp_path") and self.interactive:
+ data["dcp_path"] = self.dcp_discovery.get_dcp_path_interactive()
+
+ if not data.get("bucket") and self.interactive:
+ data["bucket"] = self.s3_manager.get_bucket_interactive()
+
+ if not data.get("dcp_s3_path") or not data.get("logs_s3_path") and self.interactive:
+ data["dcp_s3_path"], data["logs_s3_path"] = self.s3_manager.get_s3_paths_interactive(data["bucket"])
+
+ data["region"] = self.region
+ return data
+
+ def _prepare_s3_resources(self, afi: AfiMetadata, create_bucket: bool) -> None:
+ if create_bucket:
+ self.s3_manager.create_bucket(afi.bucket)
+
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ afi_folder = f"{afi.name}_{timestamp}"
+
+ final_dcp_path = f"{afi.dcp_s3_path}/{afi_folder}/"
+ final_logs_path = f"{afi.logs_s3_path}/{afi_folder}/logs/"
+
+ self.s3_manager.ensure_folder_exists(afi.bucket, final_dcp_path)
+ self.s3_manager.ensure_folder_exists(afi.bucket, final_logs_path)
+
+ dcp_key = f"{final_dcp_path}{os.path.basename(afi.dcp_path)}"
+ self.s3_manager.upload_file(afi.dcp_path, afi.bucket, dcp_key)
+
+ afi.dcp_s3_path = dcp_key
+ afi.logs_s3_path = final_logs_path
+
+ def _confirm_operations(self, afi: AfiMetadata) -> None:
+ print(f"""
+Operations to execute:
+1. Upload DCP: {afi.dcp_path} -> s3://{afi.bucket}/{afi.dcp_s3_path}
+2. Create AFI: '{afi.name}' - '{afi.description}' in {self.region}""")
+
+ if not UserInterface.confirm("Proceed with these operations?"):
+ raise KeyboardInterrupt("Operation cancelled by user")
+
+ def _handle_polling(self, afi_id: str, interval: int) -> None:
+ should_poll = not self.interactive or UserInterface.confirm(
+ f"Poll AFI status every {interval} seconds until completion?"
+ )
+
+ if should_poll:
+ self._poll_afi_status(afi_id, interval)
+
+ def _poll_afi_status(self, afi_id: str, interval: int) -> None:
+ print(f"\nPolling AFI status every {interval // 60} minutes...")
+
+ while True:
+ try:
+ state = self.ec2_client.describe_fpga_images(FpgaImageIds=[afi_id])["FpgaImages"][0]["State"]
+ status_code = state["Code"]
+
+ print(f"[{datetime.now():%Y-%m-%d %H:%M:%S}] AFI Status: {status_code}")
+
+ if status_code == "available":
+ print("\n🎉 AFI creation completed successfully!")
+ break
+ if status_code in ["failed", "unavailable"]:
+ print(f"\n❌ AFI creation failed: {status_code}")
+ print(f"Error: {state.get('Message', 'MISSING')}")
+ break
+
+ time.sleep(interval)
+
+ except KeyboardInterrupt:
+ print(f"\nPolling stopped. Check status with: aws ec2 describe-fpga-images --fpga-image-ids {afi_id}")
+ break
+ except Exception as e:
+ print(f"Error polling AFI status: {e}")
+ break
+
+ def provide_next_steps(self, agfi_id: str) -> None:
+ hdk_dir = DCPDiscovery.find_hdk_dir()
+ repo_root_str = "$AWS_FPGA_REPO_DIR" if not hdk_dir else os.path.dirname(hdk_dir)
+ # TODO: Insert documentation pointing to the AMI's
+ # TODO: Insert documentation pointing to the tools
+
+ print(f"""
+{"=" * 80}
+🚀 NEXT STEPS: Load Your FPGA Image
+{"=" * 80}
+
+Your AFI is now ready! Here's how to load it on an F2 instance:
+
+📋 STEP 1: Launch an Amazon EC2 F2 instance (f2.6xlarge, f2.12xlarge, or f2.48xlarge)
+ Make sure to use an FPGA Runtime AMI or the FPGA Developer AMI for the best experience
+
+📋 STEP 2: Set Up Your Environment from the aws-fpga repository root
+ cd {repo_root_str}
+ source sdk_setup.sh
+
+📋 STEP 3: Load Your FPGA Image with the AGFI ID
+ sudo fpga-load-local-image -S 0 -I {agfi_id}
+
+📋 STEP 4: Check that your AFI has 'loaded' successfully with your AGFI ID
+ sudo fpga-describe-local-image -S 0 -H
+
+{"=" * 80}""")
+
+
+parser = argparse.ArgumentParser(
+ description="Create Amazon FPGA Images (AFIs) from Design Checkpoint files",
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ epilog=f"""
+Examples:
+# Interactive mode (default)
+python3 create_afi.py
+
+# Non-interactive mode
+python3 create_afi.py --region us-east-1 --name "my-design" \\
+--description "Custom FPGA logic" --dcp-path /path/to/dcp.tar \\
+--bucket my-afi-bucket --create-bucket --poll-interval {DEFAULT_POLL_INTERVAL}
+ """,
+)
+
+supported_regions = RegionManager.get_supported_regions()
+parser.add_argument("--interactive", action="store_true", default=True, help="Run in interactive mode (default)")
+parser.add_argument("--non-interactive", dest="interactive", action="store_false", help="Run in non-interactive mode")
+parser.add_argument("--region", choices=supported_regions, help="AWS region for AFI creation")
+parser.add_argument("--name", help="AFI name")
+parser.add_argument("--description", help="AFI description")
+parser.add_argument("--dcp-path", help="Path to DCP tarball file")
+parser.add_argument("--bucket", help="S3 bucket name for DCP and logs")
+parser.add_argument("--dcp-s3-path", help="S3 path for DCP")
+parser.add_argument("--logs-s3-path", help="S3 path for logs")
+parser.add_argument("--create-bucket", action="store_true", help="Create S3 bucket if it doesn't exist")
+parser.add_argument(
+ "--poll-interval",
+ type=int,
+ default=DEFAULT_POLL_INTERVAL,
+ help=f"Polling interval in seconds (default: {DEFAULT_POLL_INTERVAL})",
+)
+
+
+class AFIManager:
+ def handle_interactive_mode(self, args: argparse.Namespace) -> str:
+ if args.interactive and not args.region:
+ idx = UserInterface.get_choice_from_options("Select AWS region:", supported_regions)
+ return supported_regions[idx]
+ return args.region
+
+ def create_afi_request(self, args: argparse.Namespace, region: str):
+ creator = AFICreator(region=region, interactive=args.interactive)
+ result = creator.create_afi(
+ afi_data=vars(args),
+ create_bucket=args.create_bucket,
+ poll_interval=args.poll_interval,
+ )
+
+ self.print_success(result, region, args.interactive)
+ creator.provide_next_steps(result["FpgaImageGlobalId"])
+ return result
+
+ @staticmethod
+ def print_success(result: CreateFpgaImageResultTypeDef, region: str, interactive: bool):
+ print("\n✅ AFI creation request submitted successfully!")
+ print(f"AFI ID: {result['FpgaImageId']}")
+ print(f"AGFI ID: {result['FpgaImageGlobalId']}")
+
+ if not interactive:
+ print("\nMonitor progress with:")
+ print(f"aws ec2 describe-fpga-images --fpga-image-ids {result['FpgaImageId']} --region {region}")
+
+
+def main():
+ try:
+ args = parser.parse_args()
+ except SystemExit as e:
+ return 0 if e.code == 0 else 1
+
+ try:
+ afi_manager = AFIManager()
+ region = afi_manager.handle_interactive_mode(args)
+ RegionManager.validate_region_supports_f2(region)
+ afi_manager.create_afi_request(args, region)
+ return 0
+
+ except KeyboardInterrupt:
+ print("\n⚠️ Operation cancelled by user", file=sys.stderr)
+ except Exception as e:
+ print(f"❌ Error: {e}", file=sys.stderr)
+ traceback.print_exc()
+ return 1
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/hdk/scripts/start_venv.sh b/hdk/scripts/start_venv.sh
new file mode 100755
index 000000000..c307ec91a
--- /dev/null
+++ b/hdk/scripts/start_venv.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# Check if the script is being sourced
+if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
+ echo "Error: This script must be sourced. Please run:"
+ echo " source ${0}"
+ exit 1
+fi
+
+# Get the directory of the script
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+
+# Create a requirements.txt file in the script directory
+cat > "$SCRIPT_DIR/requirements.txt" << EOF
+boto3>=1.33.13
+boto3-stubs>=1.34.4
+botocore>=1.33.13
+botocore-stubs>=1.34.4
+mypy-boto3-ec2>=1.34.4
+mypy-boto3-s3>=1.34.0
+pydantic==2.5.3
+pydantic-core==2.14.6
+EOF
+
+echo "Creating and activating virtual environment in the script directory"
+python3 -m venv "$SCRIPT_DIR/venv"
+source "$SCRIPT_DIR/venv/bin/activate"
+
+# Upgrade pip
+python3 -m pip install --upgrade pip
+
+# Install requirements
+python3 -m pip install -r "$SCRIPT_DIR/requirements.txt"
+
+echo "Virtual environment created in $SCRIPT_DIR/venv and packages installed successfully!"
diff --git a/hdk/scripts/test_create_afi.py b/hdk/scripts/test_create_afi.py
new file mode 100644
index 000000000..fe95933c2
--- /dev/null
+++ b/hdk/scripts/test_create_afi.py
@@ -0,0 +1,704 @@
+#!/usr/bin/env python3
+
+# =============================================================================
+# Amazon FPGA Hardware Development Kit
+#
+# Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+#
+# Licensed under the Amazon Software License (the "License"). You may not use
+# this file except in compliance with the License. A copy of the License is
+# located at
+#
+# http://aws.amazon.com/asl/
+#
+# or in the "license" file accompanying this file. This file is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
+# implied. See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+
+
+if __name__ == "__main__":
+ import coverage
+ import os
+
+ current_dir = os.path.dirname(os.path.abspath(__file__))
+ cov = coverage.Coverage(source=[current_dir], omit=["*test*.py"])
+ cov.start()
+
+from pathlib import Path
+import shutil
+import os
+import sys
+import subprocess
+import tempfile
+import unittest
+from io import StringIO
+from unittest.mock import MagicMock, Mock, patch, mock_open, DEFAULT
+from typing import List, Tuple, Optional
+import boto3
+from moto import mock_s3
+from pydantic import ValidationError
+
+# Import the module under test
+from create_afi import (
+ AFICreator,
+ AfiMetadata,
+ DCPDiscovery,
+ RegionManager,
+ S3Manager,
+ UserInterface,
+ AFIManager,
+ main,
+)
+
+
+class TestAfiMetadata(unittest.TestCase):
+ def setUp(self):
+ self.valid_data = {
+ "name": "test-afi",
+ "description": "Test AFI",
+ "dcp_path": "/path/to/test.tar",
+ "bucket": "test-bucket",
+ "dcp_s3_path": "dcp/path",
+ "logs_s3_path": "logs/path",
+ "region": "us-east-1",
+ }
+
+ @patch("os.path.getsize")
+ @patch("tarfile.open")
+ def test_valid_metadata(self, mock_tarfile, mock_getsize):
+ mock_getsize.return_value = 1024
+ mock_tarfile.return_value.__enter__.return_value.getnames.return_value = ["test.dcp"]
+
+ metadata = AfiMetadata(**self.valid_data)
+ self.assertEqual(metadata.name, "test-afi")
+ self.assertEqual(metadata.bucket, "test-bucket")
+
+ def test_bucket_validation(self):
+ invalid_buckets = ["UPPER", "under_score", "ab", "a" * 64, "-bucket", "bucket-"]
+ valid_buckets = ["test-bucket", "my-bucket-123", "abc"]
+
+ for bucket in invalid_buckets:
+ with self.subTest(bucket=bucket):
+ data = self.valid_data.copy()
+ data["bucket"] = bucket
+ with self.assertRaises(ValidationError):
+ AfiMetadata(**data)
+
+ for bucket in valid_buckets:
+ with self.subTest(bucket=bucket):
+ with patch("os.path.getsize", return_value=1024), patch("tarfile.open") as mock_tar:
+ mock_tar.return_value.__enter__.return_value.getnames.return_value = ["test.dcp"]
+ data = self.valid_data.copy()
+ data["bucket"] = bucket
+ metadata = AfiMetadata(**data)
+ self.assertEqual(metadata.bucket, bucket)
+
+ @patch("os.path.getsize")
+ @patch("tarfile.open")
+ def test_dcp_validation(self, mock_tarfile, mock_getsize):
+ mock_getsize.return_value = 1024
+ mock_tarfile.return_value.__enter__.return_value.getnames.return_value = ["test.dcp"]
+
+ # Test invalid extension
+ data = self.valid_data.copy()
+ data["dcp_path"] = "test.zip"
+ with self.assertRaises(ValidationError):
+ AfiMetadata(**data)
+
+ # Test empty file
+ mock_getsize.return_value = 0
+ with self.assertRaises(ValidationError):
+ AfiMetadata(**self.valid_data)
+
+ # Test empty tar
+ mock_getsize.return_value = 1024
+ mock_tarfile.return_value.__enter__.return_value.getnames.return_value = []
+ with self.assertRaises(ValidationError):
+ AfiMetadata(**self.valid_data)
+
+ def test_get_create_args(self):
+ with patch("os.path.getsize", return_value=1024), patch("tarfile.open") as mock_tar:
+ mock_tar.return_value.__enter__.return_value.getnames.return_value = ["test.dcp"]
+ metadata = AfiMetadata(**self.valid_data)
+ args = metadata.get_create_args()
+
+ expected = {
+ "InputStorageLocation": {"Bucket": "test-bucket", "Key": "dcp/path"},
+ "LogsStorageLocation": {"Bucket": "test-bucket", "Key": "logs/path"},
+ "Name": "test-afi",
+ "Description": "Test AFI",
+ }
+ self.assertEqual(args, expected)
+
+
+class TestRegionManager(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.test_regions = ["us-east-1", "us-west-2", "eu-west-1"]
+ cls.temp_dir = tempfile.mkdtemp()
+ cls.cache_file = Path(cls.temp_dir) / ".aws" / "fpga_regions_cache.json"
+ cls.cache_file.parent.mkdir(parents=True)
+
+ @classmethod
+ def tearDownClass(cls):
+ shutil.rmtree(cls.temp_dir)
+
+ def setUp(self):
+ if self.cache_file.exists():
+ self.cache_file.unlink()
+
+ def setup_cache_mock(self, mock_cache_file, mock_time, exists=True, mtime=999000):
+ """Helper to set up common cache mocking."""
+ mock_time.return_value = 1000000
+ mock_cache_file.exists.return_value = exists
+ mock_cache_file.stat.return_value.st_mtime = mtime
+ return mock_open()
+
+ @patch("create_afi.RegionManager.CACHE_FILE")
+ @patch("time.time")
+ def test_cache_operations(self, mock_time, mock_cache_file):
+ """Test all cache-related scenarios."""
+ test_cases: List[Tuple[str, bool, Optional[str], List[str], bool]] = [
+ ("read valid", True, '{"regions": ["us-east-1"], "timestamp": 1000000}', ["us-east-1"], False),
+ ("read invalid json", True, "invalid json", self.test_regions, False),
+ ("read missing regions", True, '{"timestamp": 1000000}', self.test_regions, False),
+ ("write error", False, None, self.test_regions, False),
+ ("json dump error", False, None, self.test_regions, True),
+ ]
+
+ for scenario, exists, content, expected, json_error in test_cases:
+ with self.subTest(scenario=scenario):
+ mock_file = self.setup_cache_mock(mock_cache_file, mock_time, exists)
+ if content:
+ mock_file.return_value.__enter__.return_value.read.return_value = content
+ mock_cache_file.open = mock_file
+
+ with patch("boto3.Session") as mock_session:
+ mock_session.return_value.get_credentials.return_value = MagicMock()
+ with patch.object(RegionManager, "_get_current_f2_region_list", return_value=self.test_regions):
+ if json_error:
+ # Make json.dump raise an exception
+ with patch("json.dump", side_effect=TypeError("JSON serialization error")):
+ regions = RegionManager.get_supported_regions()
+ else:
+ regions = RegionManager.get_supported_regions()
+ self.assertEqual(regions, expected)
+
+ @patch("boto3.Session")
+ @patch("boto3.client")
+ def test_region_discovery(self, mock_client, mock_session):
+ """Test region discovery scenarios."""
+ mock_session.return_value.get_available_regions.return_value = ["us-east-1", "us-west-2", "eu-west-1"]
+
+ # Create mock EC2 clients with different behaviors
+ mock_ec2_clients = {
+ "us-east-1": MagicMock(
+ **{
+ "describe_instance_type_offerings.return_value": {
+ "InstanceTypeOfferings": [{"InstanceType": "f2.6xlarge"}]
+ }
+ }
+ ),
+ "us-west-2": MagicMock(**{"describe_instance_type_offerings.return_value": {"InstanceTypeOfferings": []}}),
+ "eu-west-1": MagicMock(**{"describe_instance_type_offerings.side_effect": Exception("API Error")}),
+ }
+
+ def mock_ec2_client(service, region_name):
+ return mock_ec2_clients[region_name]
+
+ mock_client.side_effect = mock_ec2_client
+
+ regions = RegionManager._get_current_f2_region_list()
+ self.assertEqual(regions, ["us-east-1"])
+
+ # Verify each region was checked with correct parameters
+ for region, mock_ec2 in mock_ec2_clients.items():
+ if region != "eu-west-1": # Skip error case
+ mock_ec2.describe_instance_type_offerings.assert_called_once_with(
+ Filters=[{"Name": "instance-type", "Values": RegionManager.F2_INSTANCE_TYPES}]
+ )
+
+ def test_region_validation(self):
+ """Test region validation."""
+ with patch.object(RegionManager, "get_supported_regions", return_value=self.test_regions):
+ RegionManager.validate_region_supports_f2("us-east-1") # Should not raise
+ with self.assertRaisesRegex(ValueError, "does not support F2 instances"):
+ RegionManager.validate_region_supports_f2("invalid-region")
+
+
+class TestUserInterface(unittest.TestCase):
+ def setUp(self):
+ self.options = ["Option 1", "Option 2"]
+ self.prompt = "Select option:"
+
+ @patch("builtins.input", return_value="")
+ def test_get_choice_default(self, mock_input):
+ self.assertEqual(UserInterface.get_choice_from_options(self.prompt, self.options, default=1), 1)
+
+ @patch("builtins.input", return_value="2")
+ def test_get_choice_valid(self, mock_input):
+ self.assertEqual(UserInterface.get_choice_from_options(self.prompt, self.options), 1)
+
+ @patch("builtins.input", side_effect=["invalid", "0", "2"])
+ def test_get_choice_invalid_then_valid(self, mock_input):
+ result = UserInterface.get_choice_from_options(self.prompt, self.options)
+ self.assertEqual(result, 1)
+ self.assertEqual(mock_input.call_count, 3)
+
+ @patch("builtins.input", side_effect=["5", "1"])
+ def test_get_choice_out_of_range(self, mock_input):
+ result = UserInterface.get_choice_from_options(self.prompt, self.options)
+ self.assertEqual(result, 0)
+ self.assertEqual(mock_input.call_count, 2)
+
+ @patch("builtins.input", side_effect=["", " ", "valid"])
+ def test_get_input(self, mock_input):
+ self.assertEqual(UserInterface.get_input("Prompt:"), "valid")
+ self.assertEqual(mock_input.call_count, 3)
+
+ @patch.object(UserInterface, "get_choice_from_options")
+ def test_confirm(self, mock_get_choice_from_options):
+ mock_get_choice_from_options.return_value = 0
+ self.assertTrue(UserInterface.confirm("Confirm?"))
+ mock_get_choice_from_options.return_value = 1
+ self.assertFalse(UserInterface.confirm("Confirm?"))
+
+
+class TestDCPDiscovery(unittest.TestCase):
+ def setUp(self):
+ self.dcp_discovery = DCPDiscovery()
+
+ def test_find_hdk_dir_scenarios(self):
+ # From environment variable
+ with patch.dict(os.environ, {"HDK_DIR": "/test/hdk"}):
+ self.assertEqual(DCPDiscovery.find_hdk_dir(), "/test/hdk")
+
+ # From git
+ with patch.dict(os.environ, {}, clear=True):
+ with patch("subprocess.run") as mock_run:
+ with patch("os.path.isdir", return_value=True):
+ mock_run.return_value.stdout = "/repo/root\n"
+ self.assertEqual(DCPDiscovery.find_hdk_dir(), "/repo/root/hdk")
+
+ # Fallback case
+ with patch.dict(os.environ, {}, clear=True):
+ with patch("subprocess.run", side_effect=subprocess.CalledProcessError(1, "git")):
+ with patch.object(
+ DCPDiscovery, "search_for_repo_root_from_current_script_dir", return_value="/fallback/hdk"
+ ):
+ self.assertEqual(DCPDiscovery.find_hdk_dir(), "/fallback/hdk")
+
+ @patch("create_afi.Path")
+ def test_search_for_repo_root(self, mock_path):
+ mock_parent, mock_root = MagicMock(), MagicMock()
+ mock_path.return_value.resolve.return_value.parent = mock_parent
+ mock_parent.parent = mock_root
+ mock_root.parent = mock_root
+
+ # Test HDK not found
+ mock_parent.__truediv__.return_value.is_file.return_value = False
+ self.assertIsNone(DCPDiscovery.search_for_repo_root_from_current_script_dir())
+
+ # Test HDK found
+ mock_hdk_setup, mock_hdk_dir = MagicMock(), MagicMock()
+ mock_hdk_setup.is_file.return_value = True
+ mock_hdk_dir.is_dir.return_value = True
+ mock_hdk_dir.__str__.return_value = "/test/repo/hdk"
+ mock_parent.__truediv__ = MagicMock(
+ side_effect=lambda p: mock_hdk_setup if p == "hdk_setup.sh" else mock_hdk_dir
+ )
+ self.assertEqual(DCPDiscovery.search_for_repo_root_from_current_script_dir(), "/test/repo/hdk")
+
+ def test_dcp_operations(self):
+ # Test find_dcp_files scenarios
+ with patch.object(DCPDiscovery, "find_hdk_dir") as mock_find_hdk, patch("glob.glob") as mock_glob:
+ mock_find_hdk.return_value = None
+ self.assertEqual(self.dcp_discovery.find_dcp_files_in_hdk_workspace(), [])
+
+ mock_find_hdk.return_value = "/test/hdk"
+ mock_glob.return_value = ["/test/path/test.tar"]
+ results = self.dcp_discovery.find_dcp_files_in_hdk_workspace()
+ self.assertEqual(len(results), 1)
+
+ # Test display name creation
+ self.assertEqual(
+ self.dcp_discovery._create_display_name("/test/file_2024_13_99-999999.tar"), "file_2024_13_99-999999.tar"
+ )
+
+ with patch("os.path.exists", return_value=True), patch("os.path.getsize", return_value=2 * 1024 * 1024):
+ self.assertIn(
+ "Built: Jan 01, 2024 at 12:00",
+ self.dcp_discovery._create_display_name("/test/file_2024_01_01-120000.tar"),
+ )
+
+ def test_interactive_path_selection(self):
+ with patch.object(DCPDiscovery, "find_dcp_files_in_hdk_workspace") as mock_find:
+ with patch.object(UserInterface, "get_choice_from_options") as mock_choice:
+ with patch.object(UserInterface, "get_input") as mock_input:
+ # Manual path
+ mock_choice.return_value = 1
+ mock_input.return_value = "/test/path.tar"
+ self.assertEqual(self.dcp_discovery.get_dcp_path_interactive(), "/test/path.tar")
+
+ # Empty list
+ mock_choice.return_value = 0
+ mock_find.return_value = []
+ self.assertEqual(self.dcp_discovery.get_dcp_path_interactive(), "/test/path.tar")
+
+ # Choose from list
+ mock_find.return_value = [("/path1.tar", "d1"), ("/path2.tar", "d2")]
+ mock_choice.side_effect = [0, 1]
+ self.assertEqual(self.dcp_discovery.get_dcp_path_interactive(), "/path2.tar")
+
+ # Choose other path
+ mock_choice.side_effect = [0, 2]
+ self.assertEqual(self.dcp_discovery.get_dcp_path_interactive(), "/test/path.tar")
+
+
+class TestS3Manager(unittest.TestCase):
+ def setUp(self):
+ self.region = "us-east-1"
+ self.bucket_name = "test-bucket"
+ self.test_content = "test content"
+
+ def setup_mock_bucket(self):
+ self.s3_client = boto3.client("s3", region_name=self.region)
+ S3Manager(self.region).create_bucket(self.bucket_name)
+ return S3Manager(self.region)
+
+ @mock_s3
+ def test_bucket_operations(self):
+ self.s3_client = boto3.client("s3", region_name=self.region)
+ s3_manager = S3Manager(self.region)
+
+ # Test bucket creation in different regions
+ self.s3_client.create_bucket(Bucket="us-east-1-bucket")
+
+ # Test non-default region bucket creation
+ west_manager = S3Manager("us-west-2")
+ west_manager.create_bucket("west-bucket") # This will use LocationConstraint
+
+ location = self.s3_client.get_bucket_location(Bucket="west-bucket")["LocationConstraint"]
+ self.assertEqual(location, "us-west-2")
+
+ # Test error handling
+ original_get_location = s3_manager.s3_client.get_bucket_location
+
+ def mock_get_location(**kwargs):
+ if kwargs["Bucket"] == "error-bucket":
+ raise boto3.exceptions.ClientError(
+ {"Error": {"Code": "AccessDenied", "Message": "Access Denied"}}, "GetBucketLocation"
+ )
+ return original_get_location(**kwargs)
+
+ s3_manager.s3_client.get_bucket_location = mock_get_location
+ self.s3_client.create_bucket(Bucket="error-bucket")
+
+ buckets = s3_manager.get_regional_buckets()
+ self.assertIn("us-east-1-bucket", buckets)
+ self.assertNotIn("west-bucket", buckets)
+ self.assertNotIn("error-bucket", buckets)
+
+ @mock_s3
+ def test_folder_and_file_operations(self):
+ s3_manager = self.setup_mock_bucket()
+ folder_path = "test/folder"
+
+ # Test folder operations
+ s3_manager.ensure_folder_exists(self.bucket_name, folder_path)
+ self.s3_client.put_object(Bucket=self.bucket_name, Key=f"{folder_path}/existing.txt", Body=b"test")
+ s3_manager.ensure_folder_exists(self.bucket_name, folder_path)
+ objects = self.s3_client.list_objects_v2(Bucket=self.bucket_name, Prefix=f"{folder_path}/")
+ self.assertEqual(len(objects.get("Contents", [])), 2)
+
+ # Test file upload
+ with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
+ f.write(self.test_content)
+ temp_file = f.name
+ try:
+ s3_manager.upload_file(temp_file, self.bucket_name, "test/file.txt")
+ content = self.s3_client.get_object(Bucket=self.bucket_name, Key="test/file.txt")["Body"].read().decode()
+ self.assertEqual(content, self.test_content)
+ finally:
+ os.unlink(temp_file)
+
+ @mock_s3
+ @patch.object(UserInterface, "get_choice_from_options")
+ @patch.object(UserInterface, "get_input")
+ def test_interactive_operations(self, mock_input, mock_choice):
+ s3_manager = self.setup_mock_bucket()
+ self.s3_client.put_object(Bucket=self.bucket_name, Key="existing_folder1/")
+
+ # Test bucket selection (both existing and new)
+ mock_choice.side_effect = [0, 1] # First select existing, then create new
+ mock_input.return_value = "new-test-bucket"
+ self.assertEqual(s3_manager.get_bucket_interactive(), self.bucket_name)
+ self.assertEqual(s3_manager.get_bucket_interactive(), "new-test-bucket")
+
+ # Test path selection - same directory
+ mock_choice.reset_mock()
+ mock_input.reset_mock()
+ mock_choice.side_effect = [0, 0] # Same directory, select existing folder
+ dcp_path, logs_path = s3_manager.get_s3_paths_interactive(self.bucket_name)
+ self.assertEqual(dcp_path, logs_path)
+ self.assertEqual(dcp_path, "existing_folder1")
+
+ # Test path selection - separate directories
+ mock_choice.reset_mock()
+ mock_input.reset_mock()
+ mock_choice.side_effect = [1, 0, 2] # Separate dirs, existing folder, custom path
+ mock_input.return_value = "custom/path"
+ dcp_path, logs_path = s3_manager.get_s3_paths_interactive(self.bucket_name)
+ self.assertEqual(dcp_path, "existing_folder1")
+ self.assertEqual(logs_path, "custom/path")
+
+
+class TestAFICreator(unittest.TestCase):
+ def setUp(self):
+ self.region = "us-east-1"
+ self.afi_creator = AFICreator(self.region, interactive=False)
+ self.mock_afi_data = {
+ "name": "test-afi",
+ "description": "Test description",
+ "dcp_path": "/path/to/test.tar",
+ "bucket": "test-bucket",
+ "dcp_s3_path": "dcp/path",
+ "logs_s3_path": "logs/path",
+ }
+ # Set up string buffer to capture output
+ self.output = StringIO()
+ self._stdout = sys.stdout
+ sys.stdout = self.output
+
+ def tearDown(self):
+ sys.stdout = self._stdout
+ self.output.close()
+
+ @patch("boto3.client")
+ def test_initialization(self, _):
+ creator = AFICreator("us-west-2", interactive=True)
+ self.assertEqual(creator.region, "us-west-2")
+ self.assertTrue(creator.interactive)
+ self.assertIsInstance(creator.s3_manager, S3Manager)
+ self.assertIsInstance(creator.dcp_discovery, DCPDiscovery)
+
+ @patch.multiple("create_afi.AFICreator", _complete_afi_data=DEFAULT, _poll_afi_status=DEFAULT)
+ @patch.multiple("os.path", getsize=DEFAULT)
+ @patch("boto3.client")
+ @patch("tarfile.open")
+ @patch("create_afi.UserInterface.confirm")
+ def test_create_afi_workflow(self, mock_confirm, mock_tarfile, mock_client, **mocks):
+ # Setup basic mocks
+ mock_tarfile.return_value.__enter__.return_value.getnames.return_value = ["test.dcp"]
+ mocks["getsize"].return_value = 1024 * 1024
+ mocks["_complete_afi_data"].return_value = {**self.mock_afi_data, "region": self.region}
+
+ self.afi_creator.ec2_client = MagicMock()
+ self.afi_creator.ec2_client.create_fpga_image.return_value = {
+ "FpgaImageId": "afi-12345",
+ "FpgaImageGlobalId": "agfi-67890",
+ }
+ self.afi_creator.s3_manager = MagicMock()
+ self.afi_creator.interactive = True
+
+ # Test create_bucket and polling
+ mock_confirm.side_effect = [True, True] # First call
+ result = self.afi_creator.create_afi({"name": "test"}, create_bucket=True, poll_interval=300)
+ self.afi_creator.s3_manager.create_bucket.assert_called_once()
+ mocks["_poll_afi_status"].assert_called_once_with("afi-12345", 300)
+
+ # Reset mocks and set up for cancellation test
+ mock_confirm.reset_mock()
+ mock_confirm.side_effect = None # Clear side_effect
+ mock_confirm.return_value = False # Second call
+ with self.assertRaises(KeyboardInterrupt) as cm:
+ self.afi_creator.create_afi({"name": "test"})
+ self.assertEqual(str(cm.exception), "Operation cancelled by user")
+
+ def test_afi_data_completion(self):
+ result = self.afi_creator._complete_afi_data(self.mock_afi_data.copy())
+ self.assertEqual(result, {**self.mock_afi_data, "region": self.region})
+
+ with patch.multiple(
+ "create_afi.UserInterface", get_input=MagicMock(side_effect=["New AFI", "New Description"])
+ ):
+ with patch.multiple(
+ "create_afi.S3Manager",
+ get_s3_paths_interactive=MagicMock(return_value=("dcp/path", "logs/path")),
+ get_bucket_interactive=MagicMock(return_value="test-bucket"),
+ ):
+ with patch.multiple(
+ "create_afi.DCPDiscovery", get_dcp_path_interactive=MagicMock(return_value="/path/to/dcp")
+ ):
+ result = AFICreator(self.region, interactive=True)._complete_afi_data({})
+ self.assertEqual(result["name"], "New AFI")
+ self.assertEqual(result["description"], "New Description")
+
+ @patch("time.sleep")
+ def test_polling_scenarios(self, mock_sleep):
+ self.afi_creator.ec2_client = MagicMock()
+
+ test_cases = [
+ ({"Code": "available"}, "🎉 AFI creation completed successfully!"),
+ ({"Code": "failed"}, "❌ AFI creation failed: failed"),
+ ({"Code": "unavailable", "Message": "Error message"}, "Error: Error message"),
+ ]
+
+ # Capture all output
+ for state, expected_output in test_cases:
+ self.afi_creator.ec2_client.describe_fpga_images.return_value = {"FpgaImages": [{"State": state}]}
+ self.afi_creator._poll_afi_status("afi-12345", 60)
+ self.assertIn(expected_output, self.output.getvalue())
+ self.output.truncate(0)
+ self.output.seek(0)
+
+ # Test interrupts
+ mock_sleep.side_effect = KeyboardInterrupt()
+ self.afi_creator.ec2_client.describe_fpga_images.return_value = {"FpgaImages": [{"State": {"Code": "pending"}}]}
+ self.afi_creator._poll_afi_status("afi-12345", 60)
+ self.assertIn("Polling stopped", self.output.getvalue())
+ self.output.truncate(0)
+ self.output.seek(0)
+
+ # Test generic exception
+ self.afi_creator.ec2_client.describe_fpga_images.side_effect = Exception("Test error")
+ self.afi_creator._poll_afi_status("afi-12345", 60)
+ self.assertIn("Error polling AFI status: Test error", self.output.getvalue())
+
+ @patch.object(DCPDiscovery, "find_hdk_dir")
+ def test_provide_next_steps(self, mock_find_hdk):
+ for hdk_dir in ["/path/to/hdk", None]:
+ mock_find_hdk.return_value = hdk_dir
+ self.afi_creator.provide_next_steps("agfi-12345")
+
+
+class TestAFIManager(unittest.TestCase):
+ def setUp(self):
+ self.afi_manager = AFIManager()
+ self.mock_args = Mock(interactive=False, region="us-east-1", create_bucket=False, poll_interval=30)
+ self.mock_result = {"FpgaImageId": "afi-12345", "FpgaImageGlobalId": "agfi-67890"}
+
+ @patch.object(UserInterface, "get_choice_from_options")
+ def test_handle_interactive_mode(self, mock_get_choice):
+ # Test non-interactive mode
+ result = self.afi_manager.handle_interactive_mode(self.mock_args)
+ self.assertEqual(result, "us-east-1")
+ mock_get_choice.assert_not_called()
+
+ # Test interactive mode with no region
+ self.mock_args.interactive = True
+ self.mock_args.region = None
+ mock_get_choice.return_value = 0
+
+ with patch("create_afi.supported_regions", ["us-east-1", "us-west-2"]):
+ result = self.afi_manager.handle_interactive_mode(self.mock_args)
+
+ self.assertEqual(result, "us-east-1")
+ mock_get_choice.assert_called_once()
+
+ @patch.object(AFICreator, "create_afi")
+ @patch.object(AFICreator, "provide_next_steps")
+ def test_create_afi_request(self, mock_provide_steps, mock_create_afi):
+ mock_create_afi.return_value = self.mock_result
+
+ with patch.object(self.afi_manager, "print_success") as mock_print_success:
+ result = self.afi_manager.create_afi_request(self.mock_args, "us-east-1")
+
+ self.assertEqual(result, self.mock_result)
+ mock_create_afi.assert_called_once_with(
+ afi_data=vars(self.mock_args),
+ create_bucket=self.mock_args.create_bucket,
+ poll_interval=self.mock_args.poll_interval,
+ )
+ mock_provide_steps.assert_called_once_with("agfi-67890")
+ mock_print_success.assert_called_once_with(self.mock_result, "us-east-1", False)
+
+ def test_print_success(self):
+ test_cases = [
+ (True, 3), # Interactive mode: 3 prints
+ (False, 5), # Non-interactive mode: 5 prints
+ ]
+
+ for interactive, expected_prints in test_cases:
+ with self.subTest(interactive=interactive):
+ with patch("builtins.print") as mock_print:
+ AFIManager.print_success(self.mock_result, "us-east-1", interactive)
+ self.assertEqual(mock_print.call_count, expected_prints)
+
+ # Verify the content of print calls
+ calls = [str(call) for call in mock_print.call_args_list]
+ self.assertIn("AFI creation request submitted successfully", calls[0])
+ self.assertIn("afi-12345", calls[1])
+ self.assertIn("agfi-67890", calls[2])
+
+ if not interactive:
+ self.assertIn("Monitor progress with", calls[3])
+ self.assertIn("describe-fpga-images", calls[4])
+
+
+class TestMain(unittest.TestCase):
+ @patch("create_afi.parser.parse_args")
+ def test_parser_exit_scenarios(self, mock_parse_args):
+ # Test successful parse
+ mock_parse_args.side_effect = SystemExit(0)
+ self.assertEqual(main(), 0)
+
+ # Test parser error
+ mock_parse_args.side_effect = SystemExit(1)
+ self.assertEqual(main(), 1)
+
+ @patch("create_afi.parser.parse_args")
+ @patch.object(AFIManager, "handle_interactive_mode")
+ @patch.object(RegionManager, "validate_region_supports_f2")
+ @patch.object(AFIManager, "create_afi_request")
+ def test_main_success(self, mock_create, mock_validate, mock_handle, mock_parse_args):
+ mock_args = Mock()
+ mock_parse_args.return_value = mock_args
+ mock_handle.return_value = "us-east-1"
+
+ self.assertEqual(main(), 0)
+ mock_handle.assert_called_once_with(mock_args)
+ mock_validate.assert_called_once_with("us-east-1")
+ mock_create.assert_called_once_with(mock_args, "us-east-1")
+
+ @patch("create_afi.parser.parse_args")
+ @patch.object(AFIManager, "handle_interactive_mode")
+ def test_main_keyboard_interrupt(self, mock_handle, mock_parse_args):
+ mock_parse_args.return_value = Mock()
+ mock_handle.side_effect = KeyboardInterrupt()
+
+ with patch("builtins.print") as mock_print:
+ self.assertEqual(main(), 1)
+ # Verify error message was printed
+ mock_print.assert_any_call("\n⚠️ Operation cancelled by user", file=sys.stderr)
+
+ @patch("create_afi.parser.parse_args")
+ @patch.object(AFIManager, "handle_interactive_mode")
+ @patch("traceback.print_exc")
+ def test_main_generic_exception(self, mock_traceback, mock_handle, mock_parse_args):
+ mock_parse_args.return_value = Mock()
+ mock_handle.side_effect = Exception("test error")
+
+ with patch("builtins.print") as mock_print:
+ self.assertEqual(main(), 1)
+ # Verify error message was printed
+ mock_print.assert_any_call("❌ Error: test error", file=sys.stderr)
+ mock_traceback.assert_called_once()
+
+
+if __name__ == "__main__":
+ try:
+ unittest.main(exit=False, buffer=True)
+ except SystemExit:
+ pass
+ cov.stop()
+ cov.save()
+ cov.report(
+ show_missing=True,
+ skip_covered=True, # Only show files that have missing lines
+ skip_empty=True, # Skip files with no executable statements
+ )
+ # Optional: Generate HTML report
+ # cov.html_report()
diff --git a/hdk_setup.sh b/hdk_setup.sh
index ee819e827..2dc0c9f49 100644
--- a/hdk_setup.sh
+++ b/hdk_setup.sh
@@ -72,6 +72,7 @@ function check_git_lfs {
echo "ERROR: git-lfs is not installed" >&2
echo "Please install git-lfs:" >&2
echo " For Ubuntu/Debian: sudo apt-get install git-lfs" >&2
+ echo " For Rocky Linux/RHEL: sudo dnf install git-lfs" >&2
return 1
fi
@@ -211,7 +212,9 @@ if [ $skip_downloads -eq 0 ]; then
return 1
fi
git submodule sync --recursive
- git submodule update --init $cl_ip_path
+ # To ensure that users don't have to manually input git credentials
+ GIT_LFS_SKIP_SMUDGE=1 git submodule update --init $cl_ip_path
+ GIT_LFS_USERNAME="" GIT_LFS_PASSWORD="" git -C $cl_ip_path lfs pull
git -C $cl_ip_path checkout $cl_ip_branch
git -C $cl_ip_path pull origin $cl_ip_branch
else
@@ -251,6 +254,26 @@ else
fi
fi
+info_msg "Setting up HLx environment"
+
+hlx_path="hdk/common/shell_stable/hlx"
+hlx_branch="Hlx_1.0-$hlx_path"
+
+if [ $skip_downloads -eq 0 ]; then
+ check_git_lfs
+ if [ $? -ne 0 ]; then
+ return 1
+ fi
+ git submodule sync --recursive
+ # To ensure that users don't have to manually input git credentials
+ GIT_LFS_SKIP_SMUDGE=1 git submodule update --init $hlx_path
+ GIT_LFS_USERNAME="" GIT_LFS_PASSWORD="" git -C $hlx_path lfs pull
+ git -C $hlx_path checkout $hlx_branch
+ git -C $hlx_path pull origin $hlx_branch
+else
+ info_msg "Skipping shell downloads and submodule setup (--skip_downloads specified)"
+fi
+
cd $current_dir
info_msg "AWS HDK setup PASSED."
diff --git a/release_version.txt b/release_version.txt
index 02b460e6d..82c9478c5 100644
--- a/release_version.txt
+++ b/release_version.txt
@@ -1 +1 @@
-RELEASE_VERSION=2.2.0
+RELEASE_VERSION=2.2.1
diff --git a/sdk/README.md b/sdk/README.md
index 5eac40f30..d93d5a97f 100644
--- a/sdk/README.md
+++ b/sdk/README.md
@@ -1,54 +1,74 @@
# AWS EC2 FPGA Software Development Kit
-This directory includes the drivers and runtime environment required by any EC2 FPGA Instance.
+The AWS FPGA SDK directory provides drivers and runtime tools for managing Amazon FPGA Images (AFIs) on EC2 FPGA instances. Use this SDK to load, clear, and interact with pre-built AFIs on F2 instances in Linux environments.
-The [SDK userspace directory](./userspace) contains the [Amazon FPGA Image (AFI) Management Tools](./userspace/fpga_mgmt_tools/README.md), which includes both the source code to the AFI Management Tools as well as detailed descriptions of the commands to use on an FPGA instance.
+**Note:** This SDK is for **deploying** AFIs, not building or registering them. For AFI development, see the [HDK](../hdk/README.md).
-The SDK is **NOT** used to build or register AFI, rather it is only used for managing and deploying pre-built AFIs. For building and registering AFIs, please refer to the [HDK](../hdk/README.md).
+## Quick Start
-**NOTE:** This SDK is designed and tested for Linux environments only.
+The AWS FPGA SDK requires `gcc` to be installed on a Linux distribution AMI: `sudo {yum|apt-get} install gcc`
-# Quick Start
+```bash
+# Clone and setup and install the SDK with env variables (if not already done)
+git clone https://github.com/aws/aws-fpga.git
+cd aws-fpga
+source sdk_setup.sh
-## Using an AFI on an EC2 FPGA Instance
+# Check FPGA management tools
+fpga-describe-local-image --help
+fpga-load-local-image --help
-You can setup and install the SDK with the following few steps. Note that the first two steps may be skipped if you have already ran them in the above HDK setup.
+# Verify SDK environment
+echo $SDK_DIR
-```bash
- # Fetch the HDK and SDK code
- git clone https://github.com/aws/aws-fpga.git
- # Move to the root directory of the repository before running the next script
- cd aws-fpga
- # Set up the envronment variables, build and install the SDK
- source sdk_setup.sh
+# Load an AFI (replace with your AFI ID and slot)
+sudo fpga-load-local-image -S 0 -I agfi-0123456789abcdef0
+
+# Verify AFI loaded
+sudo fpga-describe-local-image -S 0
+
+# Test management tools
+cd $SDK_DIR/userspace/fpga_mgmt_examples
+make
+sudo ./fpga_mgmt_example
```
-**NOTE:** The `sdk_setup.sh` would install the [FPGA management tools](./userspace/fpga_mgmt_tools/README.md) if they are not already available in `/usr/bin`. The `sdk_setup.sh` requires having `gcc` installed. if it is not installed, try running the next command to install it on Amazon Linux, Centos or Redhat distributions:
+## Core Tools
-## Notes on using AFI Management Tools
+Fully documented in [FPGA Management Tools](./userspace/fpga_mgmt_tools/README.md)
-Early release of the AFI management tools may return uninformative errors or unexpected responses. We recommend running commands a second time after waiting 15-30 seconds if an unexpected response is received. For example, if a loaded image does not show up when using the `fpga-describe-local-image` API, attempt rerunning the command prior to calling `fpga-load-local-image` again.
+| Tool | Purpose |
+|------|---------|
+| `fpga-describe-local-image-slots` | List available FPGA slots |
+| `fpga-load-local-image` | Load AFI to FPGA slot |
+| `fpga-describe-local-image` | Check AFI status |
+| `fpga-clear-local-image` | Clear AFI from slot |
-The `fpga-describe-local-image` API is currently asynchronous which will require polling with `fpga-describe-local-image` until the expected image appears. If the describe call does not provide the expected response, attempt the `fpga-load-local-image` one more time. Attempting to load images that are not compatible with the currently loaded shell will fail and may not return an informative error message. Please verify the design was built with the shell that is loaded on the instance.
+**All tools require `sudo` privileges.** Use `-help` flag for detailed options.
-Please reach out to the AWS FPGA team with any instability issues so we can help as soon as possible.
+## SDK Components
-## Additional SDK Documentation
+### Management Tools
-* [Virtual Ethernet](./apps/virtual-ethernet/README.md)
+- **[FPGA Management Tools](./userspace/fpga_mgmt_tools/README.md)** - Command-line AFI management
+- **[C API Examples](./userspace/fpga_mgmt_examples/README.md)** - Programmatic AFI control
+- **[Python Bindings](./userspace/cython_bindings/README.md)** - Python interface to FPGA APIs
-* [Virtual Ethernet SDE HW Guide](./apps/virtual-ethernet/doc/SDE_HW_Guide.md)
+### Applications
-* [Virtual Ethernet Application Guide](./apps/virtual-ethernet/doc/Virtual_Ethernet_Application_Guide.md)
+- **[Virtual Ethernet](./apps/virtual-ethernet/README.md)** - High-performance networking
+- **[MSI-X Interrupts](./apps/msix-interrupts/README.md)** - Interrupt handling implementation
-* [MSI-X Interrupts Implementation Guide](./apps/msix-interrupts/README.md)
+### Performance & Optimization
-* [FPGA Management Examples](./userspace/fpga_mgmt_examples/README.md)
+- **[Performance Optimization Guide](./docs/F2_Software_Performance_Optimization_Guide.md)**
+- **[Load Times Analysis](./docs/Load_Times.md)**
-* [Python Bindings](./userspace/cython_bindings/README.md)
+## Troubleshooting
-* [FPGA Management Tools](./userspace/fpga_mgmt_tools/README.md)
+Refer to the [FAQ section for FPGA Mgmt Tools](./userspace/fpga_mgmt_tools/README.md#faq) or respective applications and tools.
-* [F2 Software Performance Optimization Guide](./docs/F2_Software_Performance_Optimization_Guide.md)
+**Need help?**
-* [Load-Times](./docs/Load_Times.md)
+- [GitHub Issues](https://github.com/aws/aws-fpga/issues) - Code/documentation problems
+- [AWS re:Post](https://repost.aws/tags/TAc7ofO5tbQRO57aX1lBYbjA/fpga-development) - F2 instance questions
diff --git a/sdk/apps/virtual-ethernet/patches/spp-dpdk/0001-net-spp-f2-update-of-AWS-SPP-network-driver.patch b/sdk/apps/virtual-ethernet/patches/spp-dpdk/0001-net-spp-f2-update-of-AWS-SPP-network-driver.patch
index 7fb4a8264..71c47f822 100644
--- a/sdk/apps/virtual-ethernet/patches/spp-dpdk/0001-net-spp-f2-update-of-AWS-SPP-network-driver.patch
+++ b/sdk/apps/virtual-ethernet/patches/spp-dpdk/0001-net-spp-f2-update-of-AWS-SPP-network-driver.patch
@@ -1,10 +1,10 @@
-From 179266b3f16e1cdc32bb1507dc0230ee847abae9 Mon Sep 17 00:00:00 2001
-From:
-Date: Sat, 16 Nov 2024 02:44:00 +0000
+From c373650011a291ec6dc9e45fdce1a1825e4b62ac Mon Sep 17 00:00:00 2001
+From: aws-fpga-support@amazon.com
+Date: Thu, 18 Sep 2025 19:35:38 +0000
Subject: [PATCH] The AWS FPGA SPP (Streaming Packet Port) PMD uses the AWS SDE
- (Streaming Data Engine) to provide packet streaming connectivity between the
- AWS FPGA DPDK application and the AWS FPGA CL (Custom Logic). The SDE and
- the CL communicate using the AXI-Stream interface.
+ (Streaming Data Engine) to provide packet streaming connectivity between the
+ AWS FPGA DPDK application and the AWS FPGA CL (Custom Logic). The SDE and the
+ CL communicate using the AXI-Stream interface.
---
drivers/net/meson.build | 4 +
@@ -15,11 +15,11 @@ Subject: [PATCH] The AWS FPGA SPP (Streaming Packet Port) PMD uses the AWS SDE
drivers/net/spp/spp_hal.c | 1553 +++++++++++++++++++++++++++++
drivers/net/spp/spp_hal.h | 183 ++++
drivers/net/spp/spp_hal_dbg.c | 455 +++++++++
- drivers/net/spp/spp_hal_private.h | 258 +++++
+ drivers/net/spp/spp_hal_private.h | 296 ++++++
drivers/net/spp/spp_hal_regs.h | 520 ++++++++++
drivers/net/spp/spp_logs.h | 48 +
usertools/dpdk-devbind.py | 5 +-
- 12 files changed, 3653 insertions(+), 1 deletion(-)
+ 12 files changed, 3691 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/spp/meson.build
create mode 100644 drivers/net/spp/spp.h
create mode 100644 drivers/net/spp/spp_defs.h
@@ -2913,10 +2913,10 @@ index 0000000000..0f9f4e5c12
+#endif /* SPP_DBG_SW_LOOPBACK */
diff --git a/drivers/net/spp/spp_hal_private.h b/drivers/net/spp/spp_hal_private.h
new file mode 100644
-index 0000000000..c361019ca8
+index 0000000000..93700ee348
--- /dev/null
+++ b/drivers/net/spp/spp_hal_private.h
-@@ -0,0 +1,258 @@
+@@ -0,0 +1,296 @@
+/*
+ * Copyright 2015-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
@@ -2941,9 +2941,37 @@ index 0000000000..c361019ca8
+extern "C" {
+#endif
+
++static inline void
++spp_unaligned_memcpy(void *dst, const void *src, size_t len)
++{
++ size_t num_32bit_writes = len / 4; /* 4 bytes per write */
++ for (size_t i = 0; i < num_32bit_writes; i++) {
++ uint32_t source = *((const uint32_t *)src + i);
++ rte_write32_relaxed(source, dst);
++ }
++}
++
+#if !defined(SPP_DBG_SW_LOOPBACK)
+#if defined(SPP_USE_AVX2)
+
++static inline void
++spp_16byte_aligned_memcpy(void *dst, const void *src)
++{
++ __m128i xmm0;
++
++ xmm0 = _mm_loadu_si128((const __m128i *)src);
++ _mm_storeu_si128((__m128i *)dst, xmm0);
++}
++
++static inline void
++spp_32byte_aligned_memcpy(void *dst, const void *src)
++{
++ __m256i ymm0;
++
++ ymm0 = _mm256_load_si256((const __m256i *)src);
++ _mm256_store_si256((__m256i *)dst, ymm0);
++}
++
+#if defined(SPP_USE_COMPACT_DESCS)
+/* Non-debug FastPath */
+static inline void
@@ -2959,10 +2987,12 @@ index 0000000000..c361019ca8
+static inline void
+spp_rx_desc_memcpy(void *dst, const void *src)
+{
-+ __m128i xmm0;
-+
-+ xmm0 = _mm_loadu_si128((const __m128i *)src);
-+ _mm_storeu_si128((__m128i *)dst, xmm0);
++ bool pointers_aligned = (((uint64_t)dst & 0xF) == 0) && (((uint64_t)src & 0xF) == 0);
++ if (pointers_aligned) {
++ spp_16byte_aligned_memcpy(dst, src);
++ } else {
++ spp_unaligned_memcpy(dst, src, 16 /* len */);
++ }
+}
+
+/**
@@ -2972,10 +3002,12 @@ index 0000000000..c361019ca8
+static inline void
+spp_tx_desc_memcpy(void *dst, const void *src)
+{
-+ __m128i xmm0;
-+
-+ xmm0 = _mm_loadu_si128((const __m128i *)src);
-+ _mm_storeu_si128((__m128i *)dst, xmm0);
++ bool pointers_aligned = (((uint64_t)dst & 0xF) == 0) && (((uint64_t)src & 0xF) == 0);
++ if (pointers_aligned) {
++ spp_16byte_aligned_memcpy(dst, src);
++ } else {
++ spp_unaligned_memcpy(dst, src, 16 /* len */);
++ }
+}
+#else
+/* Non-debug FastPath */
@@ -2995,10 +3027,12 @@ index 0000000000..c361019ca8
+static inline void
+spp_rx_desc_memcpy(void *dst, const void *src)
+{
-+ __m128i xmm0;
-+
-+ xmm0 = _mm_loadu_si128((const __m128i *)src);
-+ _mm_storeu_si128((__m128i *)dst, xmm0);
++ bool pointers_aligned = (((uint64_t)dst & 0xF) == 0) && (((uint64_t)src & 0xF) == 0);
++ if (pointers_aligned) {
++ spp_16byte_aligned_memcpy(dst, src);
++ } else {
++ spp_unaligned_memcpy(dst, src, 16 /* len */);
++ }
+}
+
+/**
@@ -3008,11 +3042,14 @@ index 0000000000..c361019ca8
+static inline void
+spp_tx_desc_memcpy(void *dst, const void *src)
+{
-+ __m256i ymm0;
-+
-+ ymm0 = _mm256_loadu_si256((const __m256i *)src);
-+ _mm256_storeu_si256((__m256i *)dst, ymm0);
++ bool pointers_aligned = (((uint64_t)dst & 0x1F) == 0) && (((uint64_t)src & 0x1F) == 0);
++ if (pointers_aligned) {
++ spp_32byte_aligned_memcpy(dst, src);
++ } else {
++ spp_unaligned_memcpy(dst, src, 32 /* len */);
++ }
+}
++
+#endif
+
+#else /* !SPP_USE_AVX2 */
@@ -3175,6 +3212,7 @@ index 0000000000..c361019ca8
+#endif
+
+#endif
++
diff --git a/drivers/net/spp/spp_hal_regs.h b/drivers/net/spp/spp_hal_regs.h
new file mode 100644
index 0000000000..efe8e240b4
@@ -3779,5 +3817,4 @@ index a278f5e7f3..1e757d5990 100755
crypto_devices = [encryption_class, intel_processor_class]
dma_devices = [cnxk_dma, hisilicon_dma,
--
-2.40.1
-
+2.43.7
diff --git a/sdk/userspace/fpga_mgmt_tools/README.md b/sdk/userspace/fpga_mgmt_tools/README.md
index 14e0c56df..c5566ad23 100644
--- a/sdk/userspace/fpga_mgmt_tools/README.md
+++ b/sdk/userspace/fpga_mgmt_tools/README.md
@@ -1,64 +1,65 @@
# Amazon FPGA Image (AFI) Management Tools
-AWS provides the following set of command-line tools for Amazon FPGA Image (AFI) management while running on an FPGA-enabled EC2 instance (e.g., F2). **The tools currently support Linux Instances only.**
+AWS provides the following set of command-line tools for managing Amazon FPGA Images (AFIs) on FPGA-enabled EC2 instances (e.g., F2). **The tools currently support Linux Instances only and require `sudo` privileges.**
-* **`fpga-describe-local-image-slots`**
- * Returns the FPGA image slot numbers and device mappings to use for the `fpga-load-local-image`, `fpga-clear-local-image`, and `fpga-describe-local-image` commands.
+## Quick Reference
-* **`fpga-describe-local-image`**
- * Returns the status of the FPGA image for a specified FPGA image slot number. The *fpga-image-slot* parameter is an index that represents a given FPGA within an instance. Use `fpga-describe-local-image-slots` to return the available FPGA image slots for the instance.
+The FPGA management tools are automatically installed when you source the SDK.
-* **`fpga-load-local-image`**
- * Loads the specified FPGA image to the specified slot number, and returns the status of the command. The *fpga-image-slot* parameter is an index that represents a given FPGA within an instance. Use `fpga-describe-local-image` to return the FPGA image status, and `fpga-describe-local-image-slots` to return the available FPGA image slots for the instance.
+```bash
+git clone https://github.com/aws/aws-fpga.git
+cd aws-fpga
+source sdk_setup.sh # Installs tools to /usr/bin
+```
-* **`fpga-clear-local-image`**
- * Clears the specified FPGA image slot, including FPGA internal and external memories that are used by the slot. The *fpga-image-slot* parameter is an index that represents a given FPGA within an instance. Use `fpga-describe-local-image` to return the FPGA image status, and `fpga-describe-local-image-slots` to return the available FPGA image slots for the instance.
+### Essential Commands
-* **`fpga-start-virtual-jtag`**
- * Starts a Virtual JTAG XVC server, to debug tools like Vivado Lab Edition Hardware Manager to access debug cores inside the AFI. Please refer to [Virtual JTAG userguide](../../../hdk/docs/Virtual_JTAG_XVC.md).
+```bash
+# List available FPGA slots
+sudo fpga-describe-local-image-slots
-* **`fpga-get-virtual-led`**
- * Returns a bit-map representing the state (1/0) the Virtual LEDs exposed by the Custom Logic (CL) part of the AFI.
+# Load AFI to slot 0
+sudo fpga-load-local-image -S 0 -I agfi-0123456789abcdef0
-* **`fpga-get-virtual-dip-switch`**
- * Returns a bit-map representing the current setting for the Virtual DIP Switches that drives the Custom Logic (CL) part of the AFI.
+# Check AFI status
+sudo fpga-describe-local-image -S 0
-* **`fpga-set-virtual-dip-switch`**
- * Takes bit-map (in binary representation) to set for the Virtual DIP Switches that drives the Custom Logic (CL) part of the AFI.
+# Clear AFI from slot 0
+sudo fpga-clear-local-image -S 0
+```
-* **`fpga-describe-clkgen`**
- * Returns the currently loaded frequencies in MHz for each clock in each MMCM. The *fpga-image-slot* parameter is an index that represents a given FPGA within an instance. If the design loaded onto the FPGA does not have the clkgen IP, error clkgen-ip-not-found will be returned.
+### Command Reference Table
-* **`fpga-load-clkgen-dynamic`**
- * Loads a frequency into the first clock of each specified MMCMs. MMCMs that are not specified will be set to the default recipes. Returns the currently loaded frequencies in MHz for each clock in each MMCM. The *fpga-image-slot* parameter is an index that represents a given FPGA within an instance. If the design loaded onto the FPGA does not have the clkgen IP, error clkgen-ip-not-found will be returned.
+| Command | Purpose | Key Parameters |
+|---------|---------|----------------|
+| `fpga-describe-local-image-slots` | List FPGA slots and PCIe mappings | `-H` (human readable) |
+| `fpga-load-local-image` | Load AFI to specified slot and returns slot status | `-S ` `-I ` `-A` (async) |
+| `fpga-describe-local-image` | Get AFI status for a slot | `-S ` `-R` (rescan) `-H` (human readable) |
+| `fpga-clear-local-image` | Clear AFI from slot (includes internal & external FPGA memories) | `-S ` `-A` (async) |
+| `fpga-start-virtual-jtag` | Start JTAG debug server described in [Virtual JTAG Guide](../../../hdk/docs/Virtual_JTAG_XVC.md) | `-S ` `-P ` |
+| `fpga-get-virtual-led` | Read virtual LED states in binary bit-map | `-S ` |
+| `fpga-set-virtual-dip-switch` | Set virtual DIP switches in binary bit-map | `-S ` `-D ` |
+| `fpga-get-virtual-dip-switch` | Read virtual DIP switches in binary bit-map | `-S ` |
-* **`fpga-load-clkgen-recipe`**
- * Loads a clkgen recipe into the specified clocking groups. MMCMs that are not specified will be set to the default recipes. Returns the currently loaded frequencies in MHz for each clock in each MMCM. The *fpga-image-slot* parameter is an index that represents a given FPGA within an instance. If the design loaded onto the FPGA does not have the clkgen IP, error clkgen-ip-not-found will be returned.
+### Clock Management Commands
+If the design loaded onto the FPGA does not have the clkgen IP, error clkgen-ip-not-found will be returned. MMCMs that are not specified in `load` commands will be set to the default recipes.
-All of the AFI Management Tools support a `-help` option that may be used to display the full set of options.
+| Command | Purpose | Key Parameters |
+|---------|---------|----------------|
+| `fpga-describe-clkgen` | Returns the currently loaded frequencies in MHz for each clock in each MMCM | `-S ` |
+| `fpga-load-clkgen-dynamic` | Loads a frequency into the first clock of each specified MMCMs and returns the current frequencies in MHz | `-S ` `-A ` `-B ` `-C ` |
+| `fpga-load-clkgen-recipe` | Loads a clkgen recipe into the specified clocking groups and returns the current frequencies in MHz | `-S ` `-A ` `-B ` `-C ` |
-### `sudo` or `root` Privileges
+**Note:** Clock commands require AFI with clkgen IP. Returns `clkgen-ip-not-found` error if not available.
-The tools require sudo or root access rights since AFI loads and clears modify the underlying system hardware (also see the FAQ section "Q: How do the AFI Management Tools work?".
+## Key Concepts
-## Installs or Updates to the AFI Management Tools
+### FPGA Image Slots - Getting Inventory of the Available FPGA Slots
-The tools can be downloaded and installed from AWS SDK/HDK GitHub repository [aws-fpga](https://github.com/aws/aws-fpga), as follows:
-
-```bash
-git clone https://github.com/aws/aws-fpga.git
-cd aws-fpga
-source sdk_setup.sh
-```
-
-The `sdk_setup.sh` script will build the AFI Management Tools and install them in `/usr/bin`.
-
-## Quickstart
-
-Once you have the AFI Management Tools installed on your F2 instance, you can display the FPGA slot numbers and PCIe mappings for driver attachment (e.g., PCI Domain:Bus:Device:Function).
-
-### Getting Inventory of the Available FPGA Slots
+- Index representing a specific FPGA within an instance to pass to the `-S` argument to various commands
+- Use `fpga-describe-local-image-slots` to see available slots
+- F2.6xlarge has 1 slot (0), F2.48xlarge has 8 slots (0-7 shown in the example below)
```bash
sudo fpga-describe-local-image-slots -H
@@ -83,19 +84,11 @@ sudo fpga-describe-local-image-slots -H
AFIDEVICE 7 0x1d0f 0x9048 0000:b4:00.0
```
-* The above list displayed the slots in an F2.48xl instance that has 8 FPGAs on slot 0 through 7.
-
-* The VendorId is the PCIe Configuration space Vendor ID, with 0x1d0f representing the Amazon registered PCIe Vendor ID. The developer can choose the Vendor ID for their own AFIs.
-
-* The DeviceId is the PCIe Configuration space Device ID, with 0x9048 being the default.
-
-* The DBDF is the common PCIe bus topology representing the Domain:Bus#:Device#:Function#.
-
-** NOTE: ** *While each FPGA has more than one PCIe Physical Function, the AFI Management Tools will present the VendorId and DeviceId of the first PF only*.
+**NOTE:** *While each FPGA has more than one PCIe Physical Function, the AFI Management Tools will present the VendorId and DeviceId of the first PF only*.
### Describing the AFI Content Loaded on a Specific FPGA Slot
-The following command displays the current state for the given FPGA slot number. The output shows that the FPGA in the “cleared” state right after instance create.
+The output shows that the FPGA in the “cleared” state right after instance launch or after `fpga-clear-local-image`.
```bash
sudo fpga-describe-local-image -S 0 -H
@@ -108,140 +101,144 @@ sudo fpga-describe-local-image -S 0 -H
AFIDEVICE 0 0x1d0f 0x9048 0000:34:00.0
```
-### Synchronous AFI Load and Clear Operations
-#### Synchronously Loading an AFI to a Specific FPGA Slot
+### Amazon Global FPGA Image ID (AGFI)
-To load the AFI, use the FPGA slot number and Amazon Global FPGA Image ID parameters (see FAQ for AGFI). In synchronous mode, this command will wait for the AFI to transition to the "loaded" state, perform a PCI device remove and rescan in order to expose the unique AFI Vendor and Device Id, and display the final state for the given FPGA slot number.
+- Globally unique identifier for AFIs (e.g., `agfi-0123456789abcdef0`)
+- Different from regional AFI IDs used in AWS APIs
+- Same AGFI works across all AWS regions
-```bash
-sudo fpga-load-local-image -S 0 -I agfi-0fedcba9876543210 -H
+### PCIe Device Information
- ...
+The developer can choose the Vendor and Device IDs for their own AFIs by following the [HDK section on AFI PCIe IDs](../../../hdk/README.md#afi-pcie-ids).
- Type FpgaImageSlot FpgaImageId StatusName StatusCode ErrorName ErrorCode ShVersion
- AFI 0 agfi-0fedcba9876543210 loaded 0 ok 0
- Type FpgaImageSlot VendorId DeviceId DBDF
- AFIDEVICE 0 0x6789 0x1d50 0000:34:00.0
-```
+- **VendorId**: The PCIe Configuration space Vendor ID, with 0x1d0f representing the Amazon registered PCIe Vendor ID
+- **DeviceId**: The PCIe Configuration space Device ID, with 0x9048 being the default
+- **DBDF**: The common PCIe bus topology representing the Domain:Bus:Device.Function PCIe address
+- **BAR**: Base Address Register for memory-mapped access
-#### Synchronously Clearing the FPGA Image on Specific Slot
+## Usage Patterns
-The following command will clear the FPGA image, including internal and external memories. In synchronous mode, this command will wait for the AFI to transition to the "cleared" state, perform a PCI device remove and rescan in order to expose the default AFI Vendor and Device Id, and display the final state for the given FPGA slot number.
+### Synchronous Operations (Default)
+
+Commands wait for completion and perform automatic PCIe rescan:
```bash
-sudo fpga-clear-local-image -S 0 -H
+# Waits for AFI to load, then rescans PCIe bus
+$ sudo fpga-load-local-image -S 0 -I agfi-0123456789abcdef0 -H
+Type FpgaImageSlot FpgaImageId StatusName StatusCode ErrorName ErrorCode ShVersion
+AFI 0 agfi-0123456789abcdef0 loaded 0 ok 0
+Type FpgaImageSlot VendorId DeviceId DBDF
+AFIDEVICE 0 0x6789 0x1d50 0000:34:00.0
+```
- ...
+### Asynchronous Operations
- Type FpgaImageSlot FpgaImageId StatusName StatusCode ErrorName ErrorCode ShVersion
- AFI 0 No AFI cleared 1 ok 0
- Type FpgaImageSlot VendorId DeviceId DBDF
- AFIDEVICE 0 0x1d0f 0x9048 0000:34:00.0
+Use `-A` flag for non-blocking operations:
+
+```bash
+sudo fpga-load-local-image -S 0 -I agfi-0123456789abcdef0 -A
+# Returns immediately, check status separately
+sudo fpga-describe-local-image -S 0 -R # -R rescans PCIe bus
```
-### Asynchronous AFI Load and Clear Operations
-#### Asynchronously Loading an AFI to a Specific FPGA Slot
+### Multi-FPGA Operations
-To load the AFI, use the FPGA slot number and Amazon Global FPGA Image ID parameters (see FAQ for AGFI). The "-A" is used for asynchronous AFI load operations.
+Commands can target different slots simultaneously:
```bash
-sudo fpga-load-local-image -S 0 -I agfi-0fedcba9876543210 -A
+# Load same AFI to multiple slots
+sudo fpga-load-local-image -S 0 -I agfi-0123456789abcdef0 &
+sudo fpga-load-local-image -S 1 -I agfi-0123456789abcdef0 &
+wait
```
-#### Describing the AFI content loaded on a specific FPGA slot after an asynchronous AFI load
+## FAQ
-Displays the current state for the given FPGA slot number. The output shows the FPGA in the “loaded” state after the FPGA image "load" operation. **_The "-R" option performs a PCI device remove and rescan in order to expose the unique AFI Vendor and Device Id._**
+### What do I do if my AFI fails to load, hangs, or my commands time out?
-```bash
-sudo fpga-describe-local-image -S 0 -R -H
+- Verify your AGFI ID is correct with `aws ec2 describe-fpga-images`
+- Ensure AFI is compatible with current shell version
+- Check instance has F2 FPGA slots: `sudo fpga-describe-local-image-slots`
+- Wait 15-30 seconds and retry
+- Check `dmesg` for kernel messages (`sudo dmesg | tail -20`)
+- All commands require `sudo` privileges
+- Tools access `/dev/kmsg` and PCIe sysfs files
- ...
+### What do I do if my PCIe device is not visible?
- Type FpgaImageSlot FpgaImageId StatusName StatusCode ErrorName ErrorCode ShVersion
- AFI 0 agfi-0fedcba9876543210 loaded 0 ok 0
- Type FpgaImageSlot VendorId DeviceId DBDF
- AFIDEVICE 0 0x6789 0x1d50 0000:34:00.0
-```
+- Use `-R` flag with `fpga-describe-local-image` to rescan
+- Verify AFI loaded successfully before accessing device
+- Check for Amazon PCIe devices with `lspci | grep -i amazon`
-#### Asynchronously Clearing the FPGA Image on Specific Slot
+### What is the Amazon Global FPGA Image ID (AGFI)?
-The following command will clear the FPGA image, including internal and external memories. The "-A" is used for asynchronous AFI clear operations.
+- The AGFI is an AWS **globally** unique identifier that is used to reference a specific Amazon FPGA Image (AFI). Please learn more in the [Amazon FPGA Images (AFIs) Guide](./../../../hdk/docs/Amazon_FPGA_Images_Afis_Guide.md)
-```bash
-sudo fpga-clear-local-image -S 0 -A
-```
+### What is an `fpga-image-slot`?
-#### Describing the AFI content loaded on a specific FPGA slot after an asynchronous AFI clear
+- The fpga-image-slot is an index that represents a given FPGA within an instance. Use `fpga-describe-local-image-slots` to return the available FPGA image slots for the instance.
-The following command displays the current state for the given FPGA slot number. It shows that the FPGA is in the “cleared” state after the FPGA image "clear" operation. **_The "-R" option performs a PCI device remove and rescan in order to expose the default AFI Vendor and Device Id._**
+### What are the Vendor and Device IDs listed in the `fpga-describe-local-image-slots` and `fpga-describe-local-image` output?
-```bash
-sudo fpga-describe-local-image -S 0 -R -H
+- The VendorId and DeviceId represent the unique identifiers for a PCI device as seen in the PCI Configuration Header Space. These identifiers are typically used by device drivers to know which devices to attach to. The identifiers are assigned by PCI-SIG. You can use Amazon's default DeviceId, or use your own during the `CreateFpgaImage` EC2 API.
- ...
+### What is a PF?
- Type FpgaImageSlot FpgaImageId StatusName StatusCode ErrorName ErrorCode ShVersion
- AFI 0 No AFI cleared 1 ok 0
- Type FpgaImageSlot VendorId DeviceId DBDF
- AFIDEVICE 0 0x1d0f 0x9048 0000:34:00.0
-```
+- A PF refers to a PCI Physical Function that is exposed by the FPGA hardware. For example, it is accessible by a user-space programs via the sysfs filesystem in the path `/sys/bus/pci/devices/Domain:Bus:Device.Function`. The `Domain:Bus:Device.Function` syntax is the same as returned from `lspci` program output. Examples: **FPGA application PF** `0000:34:00.0`, **FPGA management PF** `0000:34:00.1`.
-## FAQ
+### What is a BAR?
+
+- A PCI Base Address Register (BAR) specifies the memory region where FPGA memory space may be accessed by an external entity (like the instance CPU or other FPGAs). Multiple BARs may be supported by a given PCI device. In this FAQ section (also see PF), BAR0 from a device may be accessed (for example) by opening and memory mapping the resource0 sysfs file in the path `/sys/bus/pci/devices/Domain:Bus:Device.Function/resource0`. Once BAR0 has been memory mapped, the BAR0 registers may be accessed through a pointer to the memory mapped region (refer to the open and mmap system calls).
+
+### What is the AFIDEVICE and how is it used?
-* **Q: What is the Amazon Global FPGA Image ID (AGFI)?**
- * The AGFI is an AWS **globally** unique identifier that is used to reference a specific Amazon FPGA Image (AFI).
- * It is used to refer to a specific AFI when using the FPGA Management tools from within an EC2 instance.
- * In the examples, `agfi-0fedcba9876543210` is specified in the `fpga-load-local-image` command in order to load a specific AFI
-into the given `fpga-image-slot`.
- * AGFI IDs should not be confused with AFI IDs. The latter are **regional** IDs that are used to refer to a specific AFI when using the AWS EC2 APIs to create or manage and AFI. For example, when copying an AFI across regions, it will preserve the same AGFI ID, but get a new regional AFI ID.
+- Within the `fpga-describe-local-image-slots` and `fpga-describe-local-image` commands the AFIDEVICE represents the PCI PF that is used to communicate with the AFI. The AFIDEVICE functionality exposed through the PF is dependent on the AFI that is loaded via the `fpga-load-local-image` command. For example, DMA and/or memory-mapped IO (MMIO) may be supported depending on the loaded AFI, which is then used to communicate with the AFI in order to perform an accelerated application-dependent task within the FPGA. User-space applications may access the AFIDEVICE PF through sysfs as is noted above in this FAQ section (also see PF).
-* **Q: What is a `fpga-image-slot`?**
- * The fpga-image-slot is an index that represents a given FPGA within an instance. Use `fpga-describe-local-image-slots` to return the available FPGA image slots for the instance.
+### How do the AFI Management Tools work?
-* **Q: What are the Vendor and Device IDs listed in the `fpga-describe-local-image-slots` and `fpga-describe-local-image` output?**
- * The VendorId and DeviceId represent the unique identifiers for a PCI device as seen in the PCI Configuration Header Space. These identifiers are typically used by device drivers to know which devices to attach to. The identifiers are assigned by PCI-SIG. You can use Amazon's default DeviceId, or use your own during the `CreateFpgaImage` EC2 API.
+- Within the F2 instance, the FPGAs expose a management PF (e.g. `0000:34:00.1`) that is used for control channel communication between the instance and AWS.
+- The FPGA management PF BAR0 is **reserved** for this communication path.
+- The FPGA application drivers **should not** access the FPGA management PF BAR0.
+- The AFI Management Tools memory map the FPGA management PF BAR0 and communicate with AWS using internally defined messages and hardware registers.
+- The Amazon FPGA Image Tools require `sudo` or `root` access level since AFI loads and clears are modifying the underlying system hardware.
+- `sudo` or `root` privilege is also required since the tools access the sysfs PCI subsystem and `/dev/kmsg` for `dmesg` logging.
-* **Q: What is a DBDF?**
- * A DBDF is simply an acronym for Domain:Bus:Device.Function (also see PF).
+### Can the AFI Management Tools work concurently on multiple FPGA image slots?
-* **Q: What is a PF?**
- * A PF refers to a PCI Physical Function that is exposed by the FPGA hardware. For example, it is accessible by a user-space programs via the sysfs filesystem in the path `/sys/bus/pci/devices/Domain:Bus:Device.Function`. The `Domain:Bus:Device.Function` syntax is the same as returned from `lspci` program output. Examples: **FPGA application PF** `0000:34:00.0`, **FPGA management PF** `0000:34:00.1`.
+- The tools can be executed on multiple FPGAs concurrently. This may be done without synchronization between processes that are using the tools.
-* **Q: What is a BAR?**
- * A PCI Base Address Register (BAR) specifies the memory region where FPGA memory space may be accessed by an external entity (like the instance CPU or other FPGAs). Multiple BARs may be supported by a given PCI device. In this FAQ section (also see PF), BAR0 from a device may be accessed (for example) by opening and memory mapping the resource0 sysfs file in the path `/sys/bus/pci/devices/Domain:Bus:Device.Function/resource0`. Once BAR0 has been memory mapped, the BAR0 registers may be accessed through a pointer to the memory mapped region (refer to the open and mmap system calls).
+### Can the AFI Management Tools work concurrently from multiple processes on the same FPGA?
-* **Q: What is the AFIDEVICE and how is it used?**
- * Within the `fpga-describe-local-image-slots` and `fpga-describe-local-image` commands the AFIDEVICE represents the PCI PF that is used to communicate with the AFI. The AFIDEVICE functionality exposed through the PF is dependent on the AFI that is loaded via the `fpga-load-local-image` command. For example, DMA and/or memory-mapped IO (MMIO) may be supported depending on the loaded AFI, which is then used to communicate with the AFI in order to perform an accelerated application-dependent task within the FPGA. User-space applications may access the AFIDEVICE PF through sysfs as is noted above in this FAQ section (also see PF).
+- Without synchronization between processes, the tools should only be executed as one worker process per FPGA (highest level of concurrency), or one worker process across all FPGAs (least level of concurrency).
+- Multiple concurrent process access to the tools using the same FPGA without proper synchronization between processes will cause response timeouts, and other indeterminate results.
-* **Q: How do the AFI Management Tools work?**
- * Within the F2 instance, the FPGAs expose a management PF (e.g. `0000:34:00.1`) that is used for control channel communication between the instance and AWS.
- * The FPGA management PF BAR0 is **reserved** for this communication path.
- * The FPGA application drivers **should not** access the FPGA management PF BAR0.
- * The AFI Management Tools memory map the FPGA management PF BAR0 and communicate with AWS using internally defined messages and hardware registers.
- * The Amazon FPGA Image Tools require `sudo` or `root` access level since AFI loads and clears are modifying the underlying system hardware.
- * `sudo` or `root` privilege is also required since the tools access the sysfs PCI subsystem and `/dev/kmsg` for `dmesg` logging.
+### What is an afi-power-violation?
-* **Q: Can the AFI Management Tools work concurently on multiple FPGA image slots?**
- * The tools can be executed on multiple FPGAs concurrently. This may be done without synchronization between processes that are using the tools.
+- The F2 system can only reliably provide a certain amount of power to the FPGA. If an AFI consumes more than this amount of power, the F2 system will disable the input clocks to the AFI. For more information on preventing, detecting, and recovering from this state, see AFI power guide (COMING SOON)
-* **Q: Can the AFI Management Tools work concurrently from multiple processes on the same FPGA?**
- * Without synchronization between processes, the tools should only be executed as one worker process per FPGA (highest level of concurrency), or one worker process across all FPGAs (least level of concurrency).
- * Multiple concurrent process access to the tools using the same FPGA without proper synchronization between processes will cause response timeouts, and other indeterminate results.
+### How can I reset the AFI?
-* **Q: What is an afi-power-violation?**
- * The F2 system can only reliably provide a certain amount of power to the FPGA. If an AFI consumes more than this amount of power, the F2 system will disable the input clocks to the AFI. For more information on preventing, detecting, and recovering from this state, see AFI power guide (COMING SOON)
+- The AFI may be reset (reloaded) via fpga-load-local-image, and/or reset back to a fully clean slate via `fpga-clear-local-image` and `fpga-load-local-image`.
-* **Q: How can I reset the AFI?**
- * The AFI may be reset (reloaded) via fpga-load-local-image, and/or reset back to a fully clean slate via `fpga-clear-local-image` and `fpga-load-local-image`.
+### Where can I reach out for additional help?
-## References
-* AWS FPGA SDK/HDK on github [aws-fpga](https://github.com/aws/aws-fpga)
+- For any issues with the devkit documentation or code, please open a [GitHub issue](https://github.com/aws/aws-fpga/issues) with all steps to reproduce.
+- For questions about F2 instances, please open a [re:Post issue with the 'FPGA Development' tag](https://repost.aws/tags/TAc7ofO5tbQRO57aX1lBYbjA/fpga-development).
+
+## Related Documentation
+
+- AWS FPGA SDK/HDK on [aws-fpga GitHub](https://github.com/aws/aws-fpga)
+- [C API Examples](../fpga_mgmt_examples/README.md) - Programmatic AFI management
+- [Python Bindings](../cython_bindings/README.md) - Python interface
+- [Virtual JTAG Guide](../../../hdk/docs/Virtual_JTAG_XVC.md) - Debug setup
+- [Clock Recipes Guide](../../../hdk/docs/Clock_Recipes_User_Guide.md) - Clock configuration
### AWS EC2 References
-* [AWS EC2 Getting Started](https://aws.amazon.com/ec2/getting-started/)
-* [AWS EC2 Instance Types](https://aws.amazon.com/ec2/instance-types/)
-* [AWS EC2 User Guide](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts.html)
-* [AWS EC2 Networking and Security](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EC2_Network_and_Security.html)
-* [AWS EC2 Key Pairs](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
-* [AWS EC2 Attach EBS Volume](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html)
-* [AWS EC2 Troubleshooting](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-troubleshoot.html)
+
+- [AWS EC2 Getting Started](https://aws.amazon.com/ec2/getting-started/)
+- [AWS EC2 Instance Types](https://aws.amazon.com/ec2/instance-types/)
+- [AWS EC2 User Guide](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts.html)
+- [AWS EC2 Networking and Security](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EC2_Network_and_Security.html)
+- [AWS EC2 Key Pairs](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
+- [AWS EC2 Attach EBS Volume](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html)
+- [AWS EC2 Troubleshooting](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-troubleshoot.html)
diff --git a/shared/bin/check_doc_links.py b/shared/bin/check_doc_links.py
index c66a7cf03..b91bda0fe 100644
--- a/shared/bin/check_doc_links.py
+++ b/shared/bin/check_doc_links.py
@@ -18,414 +18,470 @@
# =============================================================================
import argparse
-import glob
+import atexit
import logging
import os
import re
import signal
import subprocess
import sys
+import time
from collections import defaultdict
+from dataclasses import dataclass
from enum import IntEnum
-from time import sleep
-from typing import Dict, List, Match, Set
-from urllib.parse import unquote
+from pathlib import Path
+from typing import Dict, List, Optional, Set, Tuple
+from urllib.parse import unquote, urljoin
import requests
+import urllib3
+import urllib3.util
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
+logging.basicConfig(level=logging.INFO, format="%(message)s", handlers=[logging.StreamHandler(sys.stdout)])
+logger = logging.getLogger(__name__)
+
+# Suppress SSL warnings for better output
+urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
+
+# Constants
SUCCESS = 200
NOT_FOUND = 404
LOCAL_HOST = "http://localhost:3000"
-REPO_ROOT_DIR = (
- subprocess.run(
- "git rev-parse --show-toplevel".split(),
- capture_output=True,
- cwd=os.path.dirname(__file__),
- check=True,
- )
- .stdout.decode("utf-8")
- .strip()
-)
+REQUEST_TIMEOUT = 10 # Increased timeout but with better handling
+MAX_RETRIES = 1
+BACKOFF_FACTOR = 1
+ERROR = "ERROR"
+EXCEPTION = "EXCEPTION"
class ResultEnum(IntEnum):
+ """Enumeration for link check results."""
+
OK = 0
ERROR = 1
EXCEPTION = 2
-class InternalLinkType(IntEnum):
- SAME_PAGE = 1
- OTHER_PAGE = 2
-
-
-def display_links_dict(in_dict: Dict[str, Dict[str, str]], logger: logging.Logger) -> None:
- logger.info("")
- for rst_f, links_dict in in_dict.items():
- logger.info(f"{rst_f}:")
- for link_text, link_body in links_dict.items():
- logger.info(f"\t{link_text} {link_body}")
- logger.info("")
-
-
-def check_section_exists_in_html(link: str, response: requests.Response) -> int:
- _, fragment = link.split("#", 1)
- fragment = unquote(fragment) # Handle URL-encoded fragments
-
- # Works when the RST tag matches the section name
- # This can't always be accommodated, because there cannot be two identical tags across any two RST documents.
- # IE Getting started in both HDK README and SDK README
- # Can't both be .. _getting-started:
- # One should be .. _getting-started-hdk: and the other .. _getting-started-sdk:
- section_id_pattern = f'section id="{fragment}"'
- span_id_pattern = f'span id="{fragment}"'
- header_id_pattern = f'h2 id="{fragment}"'
-
- content = response.text
- found_internal_section = any(
- [pattern in content for pattern in [section_id_pattern, span_id_pattern, header_id_pattern]]
- )
- if response.status_code == SUCCESS and found_internal_section:
- return SUCCESS
- else:
- return NOT_FOUND
-
-
-def perform_github_line_check(link: str, response: requests.Response) -> int:
- # Extract line numbers from URL
- line_match = re.search(r"#L(\d+)(?:-L?(\d+))?$", link)
- if not line_match:
- return SUCCESS
-
- start_line = int(line_match.group(1))
- # If group(2) exists, it's a range. Otherwise, end_line = start_line
- end_line = int(line_match.group(2)) if line_match.group(2) else start_line
-
- page_text = response.text.splitlines()
- total_lines = len(page_text)
-
- if 1 <= start_line <= end_line <= total_lines:
- return SUCCESS
- else:
- return NOT_FOUND
- return SUCCESS
-
-
-def validate_section_exists(link: str, response: requests.Response) -> int:
- # GitHub line number links are different from other page section links:
- if "github.com" in link:
- return perform_github_line_check(link, response)
- return check_section_exists_in_html(link, response)
-
-
-def get_link_to_self_html(rst_f: str, link_body: str, internal_link_type: InternalLinkType) -> str:
- if internal_link_type == InternalLinkType.SAME_PAGE:
- rst_to_html = rst_f.replace(".rst", ".html") # Recontextualize to html
- rst_to_html = rst_to_html.replace(
- "docs-rtd/source/", ""
- ) # Lop this off because we want nothing but the path that follows
- rst_to_html = rst_to_html.replace(REPO_ROOT_DIR, "") # Get rid of the full path as well
- rst_to_html = rst_to_html.replace(
- "./", ""
- ) # Get rid of any leading ./. Not that it causes problems, it's just confusing to look at in the output
-
- # Drop leading double //s. Not that they cause issues, they're just confusing to look at in the output
- if rst_to_html[0] == "/":
- rst_to_html = rst_to_html[1:]
- return f"{LOCAL_HOST}/{rst_to_html}{link_body}"
- else:
- filename_only = rst_f.split("/")[-1]
- relative_to_docs_rtd = rst_f.replace(REPO_ROOT_DIR, "")
- relative_to_docs_dir = relative_to_docs_rtd.replace("docs-rtd/source", "")
- final_relative_link = relative_to_docs_dir.replace(filename_only, "")
- revised_link = f"{final_relative_link}/{link_body}"
- web_server_internal_link = revised_link.replace("///", "/").replace("//", "/")
- if web_server_internal_link[0] == "/":
- web_server_internal_link = web_server_internal_link[1:]
- web_server_internal_link = f"{LOCAL_HOST}/{web_server_internal_link}"
- return web_server_internal_link
-
-
-def construct_relative_link(rst_f: str, link_body: str) -> str:
- # Start by going to the location of the file that contains the relative link
- os.chdir(os.path.dirname(rst_f))
-
- # Follow the relative link
- back_pos = link_body.find("../")
- while back_pos != -1:
- os.chdir("..")
- link_body = link_body[:back_pos] + link_body[back_pos + 3 :]
- back_pos = link_body.find("../")
-
- # Obtain the specified file path that current directory is relative to
- start = os.getcwd().replace(REPO_ROOT_DIR, "").replace("/docs-rtd/source/", "")
-
- # Reassemble the link relative to the repo root
- start = f"{start}/{link_body.replace('../', '').replace('./', '')}"
- if start[-1] == "/":
- start = start[:-1]
- return start
-
-
-def do_generic_html_request(rst_f: str, link_body: str, session: requests.Session) -> requests.Response:
- response = session.head(f"{LOCAL_HOST}/{link_body}")
- if response.status_code != SUCCESS:
- relative_link = construct_relative_link(rst_f, link_body)
- response = session.head(f"{LOCAL_HOST}/{relative_link}")
- return response
-
-
-def do_other_page_section_request(rst_f: str, link_body: str, session: requests.Session) -> requests.Response:
- web_server_internal_link = get_link_to_self_html(rst_f, link_body, InternalLinkType.OTHER_PAGE)
- response = session.get(
- web_server_internal_link,
- timeout=3,
- headers={"User-Agent": "Mozilla/5.0"},
- allow_redirects=True,
- verify=True,
- )
- response.status_code = validate_section_exists(web_server_internal_link, response)
- return response
-
-
-def do_same_page_link_request(rst_f: str, link_body: str, session: requests.Session) -> requests.Response:
- web_server_internal_link = get_link_to_self_html(rst_f, link_body, InternalLinkType.SAME_PAGE)
- response = session.get(web_server_internal_link, timeout=1)
- response.status_code = validate_section_exists(web_server_internal_link, response)
- return response
-
-
-def do_external_link_request(link_body: str, session: requests.Session) -> requests.Response:
- response = session.get(
- link_body,
- timeout=3,
- headers={
- "User-Agent": "Mozilla/5.0",
- "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
- "Accept-Language": "en-US,en;q=0.5",
- },
- allow_redirects=True,
- verify=True,
- )
- if "#" in link_body:
- response.status_code = validate_section_exists(link_body, response)
- return response
-
-
-def gather_link_exceptions(exceptions_file_path: str) -> Set[str]:
- exceptions_set: Set[str] = set()
- if exceptions_file_path:
- with open(exceptions_file_path, "r") as give_pass:
- contents = give_pass.readlines()
- exceptions_set = {str(line.split(": ")[0]).strip() for line in contents}
- return exceptions_set
-
-
-def perform_request(
- link_body: str,
- rst_f: str,
- preamble: str,
- exceptions_set: Set[str],
- logger: logging.Logger,
-) -> int:
- is_external_link = link_body.startswith("http")
- is_same_page_section_link = link_body.startswith("#")
- is_other_page_section_link = "http" not in link_body and "#" in link_body
- link_is_broken = False
-
- session = requests.Session()
- retry_strategy = Retry(
- total=6,
- backoff_factor=2,
- status_forcelist=[429, 500, 502, 503, 504],
- )
- adapter = HTTPAdapter(max_retries=retry_strategy)
- session.mount("http://", adapter)
- session.mount("https://", adapter)
-
- link_is_broken = False
- status = "ERROR"
- response: requests.Response = requests.Response()
-
- if link_body in exceptions_set:
- logger.info(preamble + ": " + "OK, Exception Granted")
- return ResultEnum.EXCEPTION
-
- try:
- if is_external_link:
- response = do_external_link_request(link_body, session)
-
- elif is_same_page_section_link:
- response = do_same_page_link_request(rst_f, link_body, session)
-
- elif is_other_page_section_link:
- response = do_other_page_section_request(rst_f, link_body, session)
-
- elif "html" in link_body:
- response = do_generic_html_request(rst_f, link_body, session)
-
- else:
- file_or_directory_link = f"{LOCAL_HOST}/{link_body}"
- response = session.head(file_or_directory_link, timeout=1)
-
- link_is_broken = response.status_code != SUCCESS
- status = "ERROR" if link_is_broken else "OK"
- if status == "OK":
- logger.info(preamble + ": " + f"{status}, {response.status_code}")
- else:
- logger.error(preamble + ": " + f"{status}, {response.status_code}")
- except Exception as _:
- link_is_broken = True
- logger.error(preamble + ": " + "ERROR, Request exception thrown")
- finally:
- session.close()
- return ResultEnum(link_is_broken)
-
-
-def navigate_to_rtd_build_html_dir() -> None:
- rtd_build_html_dir = "docs-rtd/build/html"
- os.chdir(f"{REPO_ROOT_DIR}/{rtd_build_html_dir}")
-
-
-def check_links(
- files_links_dict: Dict[str, List[List[str]]],
- exceptions_file_path: str,
- logger: logging.Logger,
-) -> None:
- navigate_to_rtd_build_html_dir()
- link_server = subprocess.Popen(
- [sys.executable, "-m", "http.server", "3000"],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- )
- sleep(1)
-
- total_links_in_error = 0
- errored_links: Dict[str, Dict[str, str]] = defaultdict(dict)
-
- exceptions_granted = 0
- links_granted_exceptions: Dict[str, Dict[str, str]] = defaultdict(dict)
-
- exceptions_set: Set[str] = set()
- if exceptions_file_path:
- exceptions_set = gather_link_exceptions(exceptions_file_path)
- logger.info(exceptions_set)
-
- try:
- for rst_f, link_info in files_links_dict.items():
- logger.info(f"Now testing links from: {rst_f}")
- for link in link_info:
- link_text = link[0]
- link_body = link[1]
- skip_link = any(["mailto" in link_body, "|" in link_body])
- if skip_link:
- continue
- preamble = f"\t{link_text}, {link_body}"
- result = perform_request(link_body, rst_f, preamble, exceptions_set, logger)
- if result == ResultEnum.EXCEPTION:
- exceptions_granted += 1
- links_granted_exceptions[rst_f][link_text] = link_body
- elif result == ResultEnum.ERROR:
- total_links_in_error += 1
- errored_links[rst_f][link_text] = link_body
+class LinkListEnum(IntEnum):
+ ERROR = 0
+ EXCEPTION = 1
+
+
+class NoFilesToCheckError(Exception):
+ pass
+
+
+class WebServerNotStartedError(Exception):
+ pass
+
+
+@dataclass
+class ResponseInfo:
+ succeeded: bool = False
+ status_code: int = 0
+ msg: str = ""
+
+
+class LinkChecker:
+ def __init__(self, exceptions_file: Optional[str] = None, worker_name: str = "links_logger"):
+ self.rtd_source_dir: Path = Path(f"{self._get_repo_root()}/docs-rtd/source")
+ self.rtd_build_dir: Path = Path(f"{self._get_repo_root()}/docs-rtd/build/html")
+ self.exceptions: set[str] = self._load_exceptions(exceptions_file) if exceptions_file else set()
+ self.session: requests.Session = self._create_session()
+ self.server_process: Optional[subprocess.Popen] = None
+
+ self.total_checked = 0
+ self.total_ok = 0
+ self.total_errors = 0
+ self.total_exceptions = 0
+ self.error_links: Dict[str, List[Tuple[str, str]]] = defaultdict(list)
+ self.exception_links: Dict[str, List[Tuple[str, str]]] = defaultdict(list)
+
+ # Register cleanup
+ atexit.register(self._cleanup)
+
+ def _get_repo_root(self) -> str:
+ try:
+ result = subprocess.run(
+ ["git", "rev-parse", "--show-toplevel"],
+ capture_output=True,
+ text=True,
+ cwd=os.path.dirname(__file__),
+ timeout=5,
+ check=True,
+ )
+ return result.stdout.strip()
+ except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
+ current_dir = Path(__file__).parent.parent.parent
+ return str(current_dir.resolve())
+
+ def _load_exceptions(self, exceptions_file: str) -> Set[str]:
+ exceptions = set()
+ try:
+ with open(exceptions_file, "r", encoding="utf-8") as f:
+ for line in f:
+ line = line.strip()
+ if line and not line.startswith("#"):
+ # Extract URL from "URL: filepath" format
+ # Split on the last ": " to handle URLs with colons (https://)
+ if ": " in line:
+ url = line.rsplit(": ", 1)[0].strip()
+ if url:
+ exceptions.add(url)
+ logger.info(f"Loaded {len(exceptions)} link exceptions")
+ except FileNotFoundError:
+ logger.exception(f"Exceptions file not found: {exceptions_file}")
+ raise
+ except Exception as e:
+ logger.exception(f"Error loading exceptions: {e}")
+ raise
+
+ return exceptions
+
+ def _create_session(self) -> requests.Session:
+ session = requests.Session()
+ retry_strategy = Retry(
+ total=MAX_RETRIES,
+ backoff_factor=BACKOFF_FACTOR,
+ status_forcelist=[429, 500, 502, 503, 504],
+ allowed_methods=["HEAD", "GET", "OPTIONS"],
+ )
+
+ adapter = HTTPAdapter(max_retries=retry_strategy, pool_connections=10, pool_maxsize=10)
+ session.mount("http://", adapter)
+ session.mount("https://", adapter)
+ session.headers.update(
+ {
+ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate",
+ "Connection": "keep-alive",
+ }
+ )
+ return session
+
+ def _cleanup(self):
+ if self.session:
+ self.session.close()
+ self._stop_server()
+
+ def find_rst_files(self, file_patterns: List[str]) -> List[str]:
+ if not self.rtd_source_dir.exists():
+ raise FileNotFoundError(f"Documentation source directory not found: {self.rtd_source_dir}")
+
+ rst_file_paths = [str(f) for f in list(self.rtd_source_dir.glob("**/*.rst"))]
+ if file_patterns:
+ filtered_files = []
+ for pattern in file_patterns:
+ filtered_files.extend([f for f in rst_file_paths if pattern in f])
+ rst_file_paths = list(set(filtered_files)) # Remove duplicates
+
+ logger.info(f"Found {len(rst_file_paths)} RST files to check")
+ return rst_file_paths
+
+ def extract_links_from_file(self, rst_file: str) -> List[Tuple[str, str]]:
+ content = None
+ try:
+ with open(rst_file, "r", encoding="utf-8") as f:
+ content = f.read()
+ except Exception as e:
+ logger.exception(f"Error reading file {rst_file}: {e}")
+ raise
+
+ # Regex pattern to match RST links: `link text `__
+ # Avoiding double backticks to prevent matching code blocks
+ pattern = r"(?]+)>`__"
+
+ links: list[tuple[str, str]] = []
+ for match in re.finditer(pattern, content, re.DOTALL):
+ link_text = match.group(1).strip()
+ link_url = match.group(2).strip()
+ if any(skip in link_url for skip in ["mailto:", "|"]):
+ continue
+ links.append((link_text, link_url))
+ return links
+
+ def _start_local_server(self) -> None:
+ if not self.rtd_build_dir.exists():
+ raise FileNotFoundError(
+ f"Build directory not found: {self.rtd_build_dir}! "
+ "Please build the documentation first with 'make html' in docs-rtd/"
+ )
+
+ try:
+ self.server_process = subprocess.Popen(
+ [sys.executable, "-m", "http.server", "3000"],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ preexec_fn=os.setsid, # Create new process group
+ cwd=self.rtd_build_dir,
+ )
+ time.sleep(2)
+ try:
+ response = self.session.get(LOCAL_HOST, timeout=5)
+ if response.status_code == 200:
+ logger.info("Local HTTP server started successfully")
else:
- continue
- logger.info("")
- finally:
- link_server.send_signal(signal.SIGTERM)
- logger.info("Shutting down http server")
- logger.info(f"Total Exceptions Granted: {exceptions_granted}")
- display_links_dict(links_granted_exceptions, logger)
- logger.info(f"Total Links in Error: {total_links_in_error}")
- display_links_dict(errored_links, logger)
- link_server.wait()
-
-
-def get_link_text_and_link(link_match: Match[str]) -> List[str]:
- link_text = "".join([f"{word} " if word != "|" else "" for word in link_match.group(1).split()]).strip()
- link_body = "".join([word if word != "|" else "" for word in link_match.group(2).split()])
- link_text_link_body = [link_text, link_body]
- return link_text_link_body
-
-
-def process_file(rst_f: str, files_links_dict: Dict[str, List[List[str]]]) -> None:
- # Link looks like `Text that you would click on `__
- # Emphasized text is ``text``, so we don't want to be tricked by this.
- ignore_double_backtick = r"(?]+)>`__"
- link_pattern_regex = r"".join(
- [
- ignore_double_backtick,
- back_tick_that_starts_link,
- gets_link_text,
- gets_actual_link,
+ logger.error(f"Server responded with status {response.status_code}")
+ except Exception as e:
+ logger.exception(f"Server not responding: {e}")
+ raise WebServerNotStartedError("Local web server did not respond! Cannot check RTD doc links!")
+ except Exception as e:
+ logger.exception(f"Failed to start server: {e}")
+ raise WebServerNotStartedError("Failed to start local web server! Cannot check RTD doc links!")
+
+ def _stop_server(self) -> None:
+ if self.server_process:
+ try:
+ os.killpg(os.getpgid(self.server_process.pid), signal.SIGTERM)
+ self.server_process.wait(timeout=5)
+ logger.info("Local HTTP server stopped")
+ except (OSError, subprocess.TimeoutExpired):
+ try:
+ os.killpg(os.getpgid(self.server_process.pid), signal.SIGKILL)
+ self.server_process.wait(timeout=2)
+ except Exception:
+ pass
+
+ def _check_external_link(self, url: str) -> ResponseInfo:
+ try:
+ # For URLs with anchors, we need the content to validate the anchor
+ # so use GET request directly
+ response = None
+ if "#" in url:
+ response = self.session.get(url, timeout=REQUEST_TIMEOUT, allow_redirects=True)
+ else:
+ # Use HEAD request first for efficiency
+ response = self.session.head(url, timeout=REQUEST_TIMEOUT, allow_redirects=True)
+
+ # Some servers don't support HEAD, try GET if HEAD fails
+ if response.status_code in [405, 501]:
+ response = self.session.get(url, timeout=REQUEST_TIMEOUT, allow_redirects=True)
+
+ if "#" in url and response.status_code == 200:
+ return self._check_anchor_in_response(url, response)
+ return ResponseInfo(response.status_code == 200, response.status_code, "")
+ except Exception as e:
+ return ResponseInfo(
+ False, 0, f"Exception: {e.__class__.__name__} occurred during external link check: {str(e)}"
+ )
+
+ def _check_internal_link(self, rst_file: str, link_url: str) -> ResponseInfo:
+ try:
+ if "#" in link_url:
+ return self._check_anchor_link(rst_file, link_url)
+ else:
+ return self._check_internal_file_link(rst_file, link_url)
+
+ except Exception as e:
+ return ResponseInfo(
+ False, 0, f"Exception: {e.__class__.__name__} occurred during internal link check: {str(e)}"
+ )
+
+ def _check_anchor_link(self, rst_file: str, link_url: str) -> ResponseInfo:
+ SAME_PAGE_ANCHOR = "same-page anchor"
+ OTHER_PAGE_ANCHOR = "other-page anchor"
+
+ full_url, link_type = "", ""
+ if link_url.startswith("#"):
+ full_url = self._rst_path_to_html_url(rst_file) + link_url
+ link_type = SAME_PAGE_ANCHOR
+ else:
+ full_url = self._resolve_relative_link(rst_file, link_url)
+ link_type = OTHER_PAGE_ANCHOR
+
+ try:
+ response = self.session.get(full_url, timeout=REQUEST_TIMEOUT)
+ if response.status_code == 200:
+ return self._check_anchor_in_response(full_url, response)
+ return ResponseInfo(
+ False, response.status_code, f"Page not found when attemptiong to check anchor link: {full_url}!"
+ )
+ except Exception as e:
+ return ResponseInfo(
+ False, 0, f"Exception {e.__class__.__name__} occurred when checking {link_type}: {str(e)}"
+ )
+
+ def _check_internal_file_link(self, rst_file: str, link_url: str) -> ResponseInfo:
+ full_url = self._resolve_relative_link(rst_file, link_url)
+
+ try:
+ response = self.session.head(full_url, timeout=REQUEST_TIMEOUT)
+ return ResponseInfo(response.status_code == 200, response.status_code, "")
+ except Exception as e:
+ return ResponseInfo(
+ False, 0, f"Exception {e.__class__.__name__} occurred when checking for internal file: {str(e)}"
+ )
+
+ def _rst_path_to_html_url(self, rst_file: str) -> str:
+ rel_path = Path(rst_file).relative_to(self.rtd_source_dir)
+ html_path = rel_path.with_suffix(".html")
+ return f"{LOCAL_HOST}/{html_path}"
+
+ def _resolve_relative_link(self, rst_file: str, link_url: str) -> str:
+ relative_rst_file_path = Path(rst_file).relative_to(self.rtd_source_dir)
+ rst_file_dir_depth = len(relative_rst_file_path.parent.parts)
+ link_url_levels_up = len([part for part in Path(link_url).parts if part == ".."])
+ if link_url_levels_up > rst_file_dir_depth:
+ raise RuntimeError(f"Link: {link_url} tries to escape root dir!")
+
+ base_url = self._rst_path_to_html_url(rst_file)
+ return urljoin(base_url, link_url)
+
+ def _check_anchor_in_response(self, url: str, response: requests.Response) -> ResponseInfo:
+ _, fragment = url.split("#", 1)
+ fragment = unquote(fragment)
+
+ if urllib3.util.parse_url(url) == "github.com":
+ return self._check_github_line_numbers(fragment, response)
+
+ # Check for HTML anchors/sections
+ content = response.text.lower()
+ fragment_lower = fragment.lower()
+
+ # Common patterns for anchors in HTML - only look for actual anchor definitions
+ patterns = [
+ f'id="{fragment_lower}"',
+ f"id='{fragment_lower}'",
+ f'name="{fragment_lower}"',
+ f"name='{fragment_lower}'",
+ # Pattern for when fragment appears in content (like headings)
+ fragment_lower.replace("-", " ").replace("_", " "),
]
- )
- compiled_link_pattern = re.compile(link_pattern_regex, re.DOTALL)
-
- file_contents = ""
- with open(rst_f, "r", encoding="utf-8") as f:
- file_contents = f.read()
-
- for link_match in compiled_link_pattern.finditer(file_contents):
- files_links_dict[rst_f].append(get_link_text_and_link(link_match))
-
-
-def get_links_from_files(rst_files: List[str]) -> Dict[str, List[List[str]]]:
- files_links_dict: Dict[str, List[List[str]]] = defaultdict(list)
- for rst_f in rst_files:
- process_file(rst_f, files_links_dict)
- return files_links_dict
+ anchor_found = any(pattern in content for pattern in patterns)
+ if anchor_found:
+ return ResponseInfo(True, response.status_code, "")
+ return ResponseInfo(False, 404, f"Anchor '{fragment}' not found in link: {url}")
+
+ def _check_github_line_numbers(self, fragment: str, response: requests.Response) -> ResponseInfo:
+ line_match = re.search(r"L(\d+)(?:-L?(\d+))?$", fragment)
+ if not line_match:
+ return ResponseInfo(True, response.status_code, "") # Not a line number link
+
+ start_line = int(line_match.group(1))
+ end_line = int(line_match.group(2)) if line_match.group(2) else start_line
+ total_lines = len(response.text.splitlines())
+
+ valid_response = 1 <= start_line <= end_line <= total_lines
+ if valid_response:
+ return ResponseInfo(True, response.status_code, "")
+ return ResponseInfo(False, 404, f"Line range {start_line}-{end_line} not valid (file has {total_lines} lines)")
+
+ def _format_display_text(self, text: str, max_length: int = 150) -> str:
+ if not text.strip():
+ return "[empty link text]"
+
+ # Normalize whitespace - replace newlines, tabs, and multiple spaces with single spaces
+ normalized_text = re.sub(r"\s+", " ", text.strip())
+ return normalized_text if len(normalized_text) <= max_length else (normalized_text[: max_length - 3] + "...")
+
+ def _format_display_url(self, url: str, max_length: int = 150) -> str:
+ if len(url) <= max_length:
+ return url
+ prefix_len = max_length // 2 - 2
+ suffix_len = max_length - prefix_len - 3
+ return url[:prefix_len] + "..." + url[-suffix_len:]
+
+ def check_link(self, rst_file: str, link_text: str, link_url: str) -> ResultEnum:
+ self.total_checked += 1
+ display_text = self._format_display_text(link_text)
+
+ if link_url in self.exceptions:
+ self.total_exceptions += 1
+ self.exception_links[rst_file].append((link_text, link_url))
+ logger.warning(f" ⚠️ EXCEPTION | {display_text}")
+ logger.warning(f" | {link_url}")
+ return ResultEnum.EXCEPTION
+
+ resp_info: ResponseInfo
+ if link_url.startswith(("http://", "https://")):
+ resp_info = self._check_external_link(link_url)
+ else:
+ resp_info = self._check_internal_link(rst_file, link_url)
+
+ if resp_info.succeeded:
+ self.total_ok += 1
+ logger.info(f" ✅ OK ({resp_info.status_code:3d}) | {display_text}")
+ logger.info(f" | {link_url}")
+ return ResultEnum.OK
+
+ self.total_errors += 1
+ self.error_links[rst_file].append((link_text, link_url))
+ error_detail = f" - {resp_info.msg}" if resp_info.msg else ""
+ logger.error(f" ❌ ERROR ({resp_info.status_code:3d}) | {display_text}")
+ logger.error(f" | {link_url}{error_detail}")
+ return ResultEnum.ERROR
+
+ def check_files(self, rst_files: List[str]) -> None:
+ if not rst_files:
+ raise NoFilesToCheckError("No RST files to check!")
+
+ self._start_local_server()
+
+ logger.info(f"Checking links in {len(rst_files)} files...")
+ for rst_file in rst_files:
+ logger.info(f"\nChecking file: {rst_file}")
+ links = self.extract_links_from_file(rst_file)
+ if not links:
+ logger.warning(" No links found")
+ continue
+ logger.info(f" Found {len(links)} links")
+ for link_text, link_url in links:
+ self.check_link(rst_file, link_text, link_url)
+
+ def _print_error_or_exception_links(
+ self, to_print: Dict[str, List[Tuple[str, str]]], list_enum: LinkListEnum
+ ) -> None:
+ logger.error(f"\n{'-' * 80}")
+ logger.error(f"DETAILS for ({self.total_errors} {ERROR} links):")
+ logger.error(f"{'-' * 80}")
+ for rst_file, links in to_print.items():
+ short_file = Path(rst_file).relative_to(self.rtd_source_dir)
+ logger.error(f"\n📄 {short_file}:")
+ for link_text, link_url in links:
+ display_text = self._format_display_text(link_text, 50)
+ logger.error(f" ❌ {display_text}")
+ logger.error(f" {link_url}")
+
+ def print_summary(self) -> None:
+ logger.info("\n" + "=" * 80)
+ logger.info("LINK CHECK SUMMARY")
+ logger.info("=" * 80)
+ logger.info(f"Total links checked: {self.total_checked:4d}")
+ logger.info(f"✅ Valid links: {self.total_ok:4d}")
+ logger.info(f"❌ Broken links: {self.total_errors:4d}")
+ logger.info(f"⚠️ Exception links: {self.total_exceptions:4d}")
+ if self.error_links:
+ self._print_error_or_exception_links(self.error_links, LinkListEnum.ERROR)
-def navigate_to_rtd_sources_dir() -> None:
- rtd_sources_dir = "docs-rtd/source"
- os.chdir(f"{REPO_ROOT_DIR}/{rtd_sources_dir}")
-
-
-def gather_file_names(files_to_check: List[str]) -> List[str]:
- navigate_to_rtd_sources_dir()
- rst_file_ext = ".rst"
- rst_files = glob.glob(os.getcwd() + f"/**/*{rst_file_ext}", recursive=True)
- if not files_to_check:
- return rst_files
- return [rst_file for rst_file in rst_files for f in files_to_check if f in rst_file]
-
-
-def configure_logger(worker_name: str) -> logging.Logger:
- worker_id: str = worker_name
- worker_id = f"worker_{worker_id}"
- logger = logging.getLogger(worker_id)
- if not logger.handlers:
- logger.setLevel(logging.INFO)
-
- logs_dir_name = "links_logs"
- os.makedirs(name=logs_dir_name, exist_ok=True)
- fh = logging.FileHandler(f"{logs_dir_name}/{worker_id}.log", mode="w")
- formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
- fh.setFormatter(formatter)
- logger.addHandler(fh)
+def main():
+ parser = argparse.ArgumentParser(
+ description="RST files link checker", formatter_class=argparse.RawDescriptionHelpFormatter
+ )
+ parser.add_argument("-e", "--exceptions_file_path", type=str, help="Path to file containing link exceptions")
+ parser.add_argument("-f", "--check_file_paths", nargs="+", help="Specific file patterns to check (optional)")
+ parser.add_argument(
+ "-w",
+ "--worker_name",
+ type=str,
+ default="links_logger",
+ help="Name for the worker/logger (default: links_logger)",
+ )
- return logger
+ args = parser.parse_args()
+ checker = LinkChecker(exceptions_file=args.exceptions_file_path, worker_name=args.worker_name)
+ rst_files = checker.find_rst_files(args.check_file_paths)
+ assert rst_files, "No RST files found to check!"
-def main():
- p = argparse.ArgumentParser()
- p.add_argument("-e", "--exceptions_file_path", type=str, required=False, default="")
- p.add_argument("-f", "--check_file_paths", nargs="+", required=False, default="")
- p.add_argument("-w", "--worker_name", type=str, required=False, default="links_logger")
- args: argparse.Namespace = p.parse_args()
- logger = configure_logger(args.worker_name)
- if args.exceptions_file_path:
- logger.info(f"Exceptions path: {args.exceptions_file_path}")
- rst_files = gather_file_names(args.check_file_paths)
- files_links_dict = get_links_from_files(rst_files)
- check_links(files_links_dict, args.exceptions_file_path, logger)
+ checker.check_files(rst_files)
+ checker.print_summary()
+ assert checker.total_errors == 0, "Broken links found!"
if __name__ == "__main__":
diff --git a/vitis/ERRATA.md b/vitis/ERRATA.md
index 593ae5d0a..4cd20b377 100644
--- a/vitis/ERRATA.md
+++ b/vitis/ERRATA.md
@@ -22,9 +22,7 @@ The following examples are not currently supported by AMD:
The following examples are currently under development by AMD:
-* `rtl_kernels/rtl_streaming_free_running_k2k`
-* `rtl_kernels/rtl_streaming_k2k_mm`
-* `rtl_kernels/rtl_vadd_hw_debug`
+* `performance/axi_burst_performance`
## Hardware Emulation
diff --git a/vitis/README.md b/vitis/README.md
index 0e07e40eb..3988fe664 100644
--- a/vitis/README.md
+++ b/vitis/README.md
@@ -94,6 +94,8 @@ This section will walk you through the emulation process. The goal of hardware e
To perform hardware emulation for the `hello_world` example, run the following commands:
+`NOTE: All paths shown below are identical for customers using Rocky Linux, substituting 'rocky' for 'ubuntu'`
+
``` bash
cd $AWS_FPGA_REPO_DIR/vitis/examples/vitis_examples/hello_world
ll
diff --git a/vitis/supported_oses.txt b/vitis/supported_oses.txt
index b630ae7da..3b43d7075 100644
--- a/vitis/supported_oses.txt
+++ b/vitis/supported_oses.txt
@@ -1,2 +1,3 @@
Ubuntu 20.04
Ubuntu 24.04
+Rocky Linux 8.10
diff --git a/vitis_setup.sh b/vitis_setup.sh
index b04a4bb48..fd9a50241 100644
--- a/vitis_setup.sh
+++ b/vitis_setup.sh
@@ -73,6 +73,8 @@ valid_tool_versions["2025.1"]="true"
declare -A valid_os
valid_os["Ubuntu"]="true"
+valid_os["RockyLinux"]="true"
+
function check_os_and_tool_ver {
if [[ "${valid_tool_versions[${VITIS_TOOL_VER}]}" != "true" ]]; then
@@ -212,7 +214,7 @@ function get_xsa_file {
sudo rm -f ${destination_dir}/*${missing_xsa_file_extension}.sha256
# Grab the new XSA
- if ! sudo wget "${vitis_xsa_s3_url}/${missing_xsa}" -O "${destination_dir}/${missing_xsa}" -q; then
+ if ! sudo wget "${vitis_xsa_s3_url}/${missing_xsa}" -O "${destination_dir}/${missing_xsa}" -q; then
err_msg "Download of Vitis XSA file ${missing_xsa} failed!"
return 1
fi
@@ -301,6 +303,7 @@ function setup_xsa {
}
+
function xrt_install_check {
xrt_path=/opt/xilinx/xrt
@@ -322,10 +325,16 @@ function xrt_install_check {
declare -A xrt_install_map
xrt_install_map["Ubuntu_pkg_ext"]="deb"
-xrt_install_map["Ubuntu_install_cmd"]="sudo dpkg -i"
+xrt_install_map["Ubuntu_install_cmd"]="sudo apt install -y"
xrt_install_map["Ubuntu_xrt_pkg_prefix"]="amd64-xrt"
xrt_install_map["Ubuntu_aws_pkg_prefix"]="amd64-aws"
+xrt_install_map["RockyLinux_pkg_ext"]="rpm"
+xrt_install_map["RockyLinux_install_cmd"]="sudo dnf install -y"
+xrt_install_map["RockyLinux_xrt_pkg_prefix"]="x86_64-xrt"
+xrt_install_map["RockyLinux_aws_pkg_prefix"]="x86_64-aws"
+
+
function build_and_install_xrt {
if ! sudo -E "./${xrt_deps_script_path}"; then
err_msg "Couldn't install XRT dependencies!"
@@ -333,7 +342,7 @@ function build_and_install_xrt {
fi
if ! ./$xrt_build_script_run; then
- err_msg "Couldn't build XRT dpkgs!"
+ err_msg "Couldn't build XRT packages!"
cd $AWS_FPGA_REPO_DIR && return 1
fi
@@ -347,13 +356,13 @@ function build_and_install_xrt {
# Base XRT install first
for file in $(ls *${xrt_pkg_prefix}.${install_pkg_ext}); do
info_msg "Installing $file"
- $install_cmd $file
+ $install_cmd ./$file
done
# AWS extension install
for file in $(ls *${aws_pkg_prefix}.${install_pkg_ext}); do
info_msg "Installing $file"
- $install_cmd $file
+ $install_cmd ./$file
done
if ! source $xrt_setup_script_path; then
@@ -395,7 +404,7 @@ function set_up_xrt_repo {
declare -A commit_hash_map
commit_hash_map["2024.1"]="a0729c69dba1ec05856d3008fbf9994a665f276c"
commit_hash_map["2024.2"]="d8cf77af92e92324b038d787773b78fb7a44f812"
-commit_hash_map["2025.1"]="fe6f99daac071f3973b20ce6d0d5457df76ada34"
+commit_hash_map["2025.1"]="db8c37afa751589e72f0c47436bf3daca444d45d"
function set_up_xrt_vars {
@@ -405,7 +414,7 @@ function set_up_xrt_vars {
fi
xrt_path="/opt/xilinx/xrt"
- xrt_dpkg_install_path="${xrt_path}/XRT/build/Release"
+ xrt_pkg_install_path="${xrt_path}/XRT/build/Release"
xrt_repo_name="XRT"
xrt_repo_url="https://github.com/Xilinx/XRT.git"