From 33b1923c3ef493389b449c172c12984c0081d057 Mon Sep 17 00:00:00 2001
From: Hyacinthe Cartiaux
Date: Thu, 12 Jun 2025 16:43:28 +0200
Subject: [PATCH 1/9] Update markdown_include to 0.8.1
---
docs/environment/spack.md | 288 ++++++++++++++++++++++++++++++++++++++
mkdocs.yml | 1 +
2 files changed, 289 insertions(+)
create mode 100644 docs/environment/spack.md
diff --git a/docs/environment/spack.md b/docs/environment/spack.md
new file mode 100644
index 000000000..0e0f79d08
--- /dev/null
+++ b/docs/environment/spack.md
@@ -0,0 +1,288 @@
+# The Spack: A Package Manager on the UL HPC Platform
+
+[
](https://spack.readthedocs.io/en/latest/#)
+
+
+
+!!! note
+ This guide is also applicable to other HPC clusters where users need to manage components such as MPI libraries, compilers, and other software through the `module` system.
+
+
+## Introduction to Spack
+
+
+A brief introduction to Spack will be added here.
+
+## Setting up Spack
+
+### Connection to a compute node
+
+
+```{.sh .copy}
+si -N 1 -n 16 -c 1 -t 0-02:00:00 # on iris: -C broadwell or -C skylake
+```
+
+??? note "Allocation Details"
+
+ `si` is a shell function that wraps the `salloc` command to simplify interactive Slurm job allocation.
+ It stands for:
+
+ ```bash
+ salloc -p interactive --qos debug -C batch ${options}
+ ```
+ - `${options}`: any additional arguments passed to `si` (e.g., `-N`, `-n`, `-c`, `-t`, etc.)
+
+ ```bash
+ si -N 1 -n 16 -c 1 -t 0-02:00:00
+ ```
+
+ This allocates:
+
+ - 1 node (`-N 1`)
+ - 16 MPI tasks (`-n 16`)
+ - 1 CPU per task (`-c 1`)
+ - for a wall time of 2 hours (`-t 0-02:00:00`)
+
+ !!! info "Iris Cluster"
+
+ On the **Iris** cluster,
+
+ - Use `-C broadwell` or `-C skylake`
+
+ **Examples:**
+ ```bash
+ si -N 1 -n 16 -c 1 -t 0-02:00:00 -C broadwell
+ ```
+
+
+### Clone & Setup Spack
+
+Clone and setup spack in `$HOME` - it has better much better performance for
+small files than `$SCRATCH`
+
+``` { .sh .copy }
+cd $HOME
+git clone --depth=2 https://github.com/spack/spack.git
+cd spack
+```
+To make Spack available in your shell session, source its environment setup script:
+
+``` { .sh .copy }
+source $HOME/spack/share/spack/setup-env.sh
+```
+You may want to add this line to the file .`bashrc`for convenience.
+
+### Define System-Provided Packages
+To avoid rebuilding packages already available as modules on your cluster (e.g., compilers, MPI, libraries), create a packages.yaml file under: `$HOME/.spack/packages.yaml`
+
+``` { .sh .copy }
+touch $HOME/.spack/packages.yaml
+```
+
+with the following contents:
+
+``` { .sh .copy }
+ packages:
+ gcc:
+ externals:
+ - spec: gcc@13.2.0+binutils languages:='c,c++,fortran'
+ modules:
+ - compiler/GCC/13.2.0
+ extra_attributes:
+ compilers:
+ c: /opt/apps/easybuild/systems/aion/rhel810-20250405/2023b/epyc/software/GCCcore/13.2.0/bin/gcc
+ cxx: /opt/apps/easybuild/systems/aion/rhel810-20250405/2023b/epyc/software/GCCcore/13.2.0/bin/g++
+ fortran: /opt/apps/easybuild/systems/aion/rhel810-20250405/2023b/epyc/software/GCCcore/13.2.0/bin/gfortran
+ buildable: false
+ binutils:
+ externals:
+ - spec: binutils@2.40
+ modules:
+ - tools/binutils/2.40-GCCcore-13.2.0
+ buildable: false
+ libevent:
+ externals:
+ - spec: libevent@2.1.12
+ modules:
+ - lib/libevent/2.1.12-GCCcore-13.2.0
+ buildable: false
+ libfabric:
+ externals:
+ - spec: libfabric@1.19.0
+ modules:
+ - lib/libfabric/1.19.0-GCCcore-13.2.0
+ buildable: false
+ libpciaccess:
+ externals:
+ - spec: libpciaccess@0.17
+ modules:
+ - system/libpciaccess/0.17-GCCcore-13.2.0
+ buildable: false
+ libxml2:
+ externals:
+ - spec: libxml2@2.11.5
+ modules:
+ - lib/libxml2/2.11.5-GCCcore-13.2.0
+ buildable: false
+ hwloc:
+ externals:
+ - spec: hwloc@2.9.2+libxml2
+ modules:
+ - system/hwloc/2.9.2-GCCcore-13.2.0
+ buildable: false
+ mpi:
+ buildable: false
+ munge:
+ externals:
+ - spec: munge@0.5.13
+ prefix: /usr
+ buildable: false
+ numactl:
+ externals:
+ - spec: numactl@2.0.16
+ modules:
+ - tools/numactl/2.0.16-GCCcore-13.2.0
+ buildable: false
+ openmpi:
+ variants: fabrics=ofi,ucx schedulers=slurm
+ externals:
+ - spec: openmpi@4.1.6
+ modules:
+ - mpi/OpenMPI/4.1.6-GCC-13.2.0
+ buildable: false
+ pmix:
+ externals:
+ - spec: pmix@4.2.6
+ modules:
+ - lib/PMIx/4.2.6-GCCcore-13.2.0
+ buildable: false
+ slurm:
+ externals:
+ - spec: slurm@23.11.10 sysconfdir=/etc/slurm
+ prefix: /usr
+ buildable: false
+ ucx:
+ externals:
+ - spec: ucx@1.15.0
+ modules:
+ - lib/UCX/1.15.0-GCCcore-13.2.0
+ buildable: false
+ zlib:
+ externals:
+ - spec: zlib@1.2.13
+ modules:
+ - lib/zlib/1.2.13-GCCcore-13.2.0
+ buildable: false
+
+```
+This tells Spack to use the system available GCC, binutils and OpenMPI with the native fabrics.
+
+## Building FEniCS
+
+Create an environment and install FEniCS
+``` { .sh .copy }
+cd ~
+spack env create -d fenicsx-main-20230126/
+spack env activate fenicsx-main-20230126/
+spack add py-fenics-dolfinx@main fenics-dolfinx+adios2 adios2+python petsc+mumps
+# Change @main to e.g. @0.7.2 in the above if you want a fixed version.
+spack concretize
+spack install -j16
+```
+or the same directly in `spack.yaml` in `$SPACK_ENV`
+
+``` { .sh .copy }
+spack:
+ # add package specs to the `specs` list
+ specs:
+ - py-fenics-dolfinx@main
+ - fenics-dolfinx@main+adios2
+ - petsc+mumps
+ - adios2+python
+ view: true
+ concretizer:
+ unify: true
+```
+The following are also commonly used in FEniCS scripts and may be useful
+
+``` { .sh .copy }
+spack add gmsh+opencascade py-numba py-scipy py-matplotlib
+```
+It is possible to build a specific version (git ref) of DOLFINx. Note that the hash must be the full hash. It is best to specify appropriate git refs on all components.
+
+``` { .sh .copy }
+# This is a Spack Environment file.
+#
+# It describes a set of packages to be installed, along with
+# configuration settings.
+spack:
+ # add package specs to the `specs` list
+ specs:
+ - fenics-dolfinx@git.4f575964c70efd02dca92f2cf10c125071b17e4d=main+adios2
+ - py-fenics-dolfinx@git.4f575964c70efd02dca92f2cf10c125071b17e4d=main
+
+ - py-fenics-basix@git.2e2a7048ea5f4255c22af18af3b828036f1c8b50=main
+ - fenics-basix@git.2e2a7048ea5f4255c22af18af3b828036f1c8b50=main
+
+ - py-fenics-ufl@git.b15d8d3fdfea5ad6fe78531ec4ce6059cafeaa89=main
+
+ - py-fenics-ffcx@git.7bc8be738997e7ce68ef0f406eab63c00d467092=main
+
+ - fenics-ufcx@git.7bc8be738997e7ce68ef0f406eab63c00d467092=main
+
+ - petsc+mumps
+ - adios2+python
+ view: true
+ concretizer:
+ unify: true
+```
+
+It is also possible to build only the C++ layer using
+
+
+``` { .sh .copy }
+spack add fenics-dolfinx@main+adios2 py-fenics-ffcx@main petsc+mumps
+```
+To rebuild FEniCSx from main branches inside an existing environment
+
+
+``` { .sh .copy }
+spack install --overwrite -j16 fenics-basix py-fenics-basix py-fenics-ffcx fenics-ufcx py-fenics-ufl fenics-dolfinx py-fenics-dolfinx
+```
+
+
+## Testing the build
+
+Quickly test the build with
+``` { .sh .copy }
+srun python -c "from mpi4py import MPI; import dolfinx"
+```
+
+## Using the build
+
+See the uni.lu documentation for full details - using the environment should be as
+simple as adding the following where `...` is the name/folder of your environment.
+
+``` { .sh .copy }
+#!/bin/bash -l
+source $HOME/spack/share/spack/setup-env.sh
+spack env activate ...
+```
+
+## Known issues
+
+Workaround for broken Python module find for gmsh on uni.lu cluster
+
+``` { .sh .copy }
+
+export PYTHONPATH=$SPACK_ENV/.spack-env/view/lib64/:$PYTHONPATH
+
+```
+Workaround for broken Python module find for adios2 (seems broken in Spack)
+
+``` { .sh .copy }
+
+export PYTHONPATH=$(find $SPACK_ENV/.spack-env -type d -name 'site-packages' | grep venv):$PYTHONPATH
+
+```
\ No newline at end of file
diff --git a/mkdocs.yml b/mkdocs.yml
index f2c736140..16e287976 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -68,6 +68,7 @@ nav:
- Easybuild: 'environment/easybuild.md'
- Containers: 'environment/containers.md'
- Conda: 'environment/conda.md'
+ - Spack: 'environment/spack.md'
###########
- Policies:
- Acceptable Use Policy (AUP): 'policies/aup.md'
From f639f09a6c152f363ed78645642753c7c40199e9 Mon Sep 17 00:00:00 2001
From: Md Jahid Hassan
Date: Sat, 19 Jul 2025 01:30:59 +0200
Subject: [PATCH 2/9] added spack section + primary docs( for feedback )
---
docs/environment/spack.md | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/docs/environment/spack.md b/docs/environment/spack.md
index 0e0f79d08..931e8c156 100644
--- a/docs/environment/spack.md
+++ b/docs/environment/spack.md
@@ -1,12 +1,7 @@
-# The Spack: A Package Manager on the UL HPC Platform
+# Spack: A Package Manager on the UL HPC Platform
[
](https://spack.readthedocs.io/en/latest/#)
-
-
-!!! note
- This guide is also applicable to other HPC clusters where users need to manage components such as MPI libraries, compilers, and other software through the `module` system.
## Introduction to Spack
@@ -16,6 +11,10 @@ A brief introduction to Spack will be added here.
## Setting up Spack
+!!! note
+ The guide is also applicable to other HPC clusters where users need to manage components such as MPI libraries, compilers, and other software through the `module` system.
+
+
### Connection to a compute node
@@ -71,15 +70,17 @@ To make Spack available in your shell session, source its environment setup scri
``` { .sh .copy }
source $HOME/spack/share/spack/setup-env.sh
```
-You may want to add this line to the file .`bashrc`for convenience.
+For convenience, this line can be added to the .`bashrc` file to make Spack automatically available in every new shell session.
### Define System-Provided Packages
-To avoid rebuilding packages already available as modules on your cluster (e.g., compilers, MPI, libraries), create a packages.yaml file under: `$HOME/.spack/packages.yaml`
+
+`packages.yaml` A spack configuration file used to tell Spack what tools and versions already exist on the cluster, so Spack can use those instead of building everything again.Create a packages.yaml file under: `$HOME/.spack/packages.yaml`
``` { .sh .copy }
touch $HOME/.spack/packages.yaml
```
+
with the following contents:
``` { .sh .copy }
From 9e49f3b79f771f294210168e2c58a33e9aaae493 Mon Sep 17 00:00:00 2001
From: Md Jahid Hassan
Date: Mon, 21 Jul 2025 21:18:33 +0200
Subject: [PATCH 3/9] Added initial Spack overview and link important docs
---
docs/environment/spack.md | 181 ++++++++++++++++++++++++++++++++++++--
mkdocs.yml | 4 +-
2 files changed, 177 insertions(+), 8 deletions(-)
diff --git a/docs/environment/spack.md b/docs/environment/spack.md
index 931e8c156..ee56eaff0 100644
--- a/docs/environment/spack.md
+++ b/docs/environment/spack.md
@@ -6,17 +6,102 @@
## Introduction to Spack
+ Spack is an open-source package manager designed for installing, building, and managing scientific software across a wide range of systems—from personal laptops to the world’s largest supercomputers. It supports multiple versions, compilers, and configurations of packages, all coexisting without conflict. Spack is especially popular in high-performance computing (HPC) environments due to its non-destructive installations, flexible dependency resolution, and robust support for complex software stacks.
-A brief introduction to Spack will be added here.
+### Why spack ?
+
+Spack simplifies the software installation process in scientific computing. It uses a concise spec syntax to let users define versions, compilers, build options, and dependencies in a readable format. Package recipes in Spack are written in pure Python, allowing contributors to manage many builds with a single file. With over 8,500 official packages available, Spack offers great flexibility—and it's not limited to pre-existing packages. Users can create custom ( e.g. package.py ) files for software not yet available in the Spack pre-defined packages. [1]
+
+### Key Features of Spack
+
+
+- Multiple Versions & Configurations: Easily install multiple versions of the same software with different compilers or build options.
+
+- Custom Dependencies: Flexibly control dependencies, even choosing between alternative implementations.
+
+- Non-destructive Installs: New installations do not interfere with existing packages.
+
+- Package Coexistence: Different builds of the same software can live side by side.
+
+ - Easy Package Creation: Write simple ( e.g.
package.py ) files in Python to add new software to Spack.
+
+ - Virtual Environments: Create isolated environments for experiments or projects.
+
+
+
+
+### Key Resources
+
+Below are essential resources including its official documentation, usage guides, packaging tutorials, and community links:
+
+=== "Important Resources"
+
+ - **Official Documentation**
+
+ - **Spack Tutorial**
+
+ - **Package Index**
+
+ - **Package Fundamentals**
+
+ - **Spack Environments**
+
+ - **Spack Build Cache**
+
+
+=== "Additional Resources"
+
+ - **Spack Configuration file **
+
+ - **Advance Packaging Guide**
+
+ - **Concretization Settings**
+
+
+ - **Spack Community & Discussions**
+
+???+ tabs "Spack Resources"
+
+ === "Important Resources"
+
+ - **Official Documentation**
+
+ - **Spack Tutorial**
+
+
+ - **Package Fundamentals**
+
+ - **Package Index**
+
+ - **Spack Environments**
+
+
+ - **Spack Build Cache**
+
+
+ === "Additional Resources"
+
+ - **Spack Configuration file **
+
+ - **Advance Packaging Guide**
+
+ - **Concretization Settings**
+
+ - **Spack Community & Discussions**
+
+
+
+
+
+## Setting up Spack.
-## Setting up Spack
!!! note
The guide is also applicable to other HPC clusters where users need to manage components such as MPI libraries, compilers, and other software through the `module` system.
### Connection to a compute node
-
+For all tests and compilation with Spack, it is essential to run on a **compute node**, not in the login/access node. Here's an example of how to allocate an [interactive session](../jobs/interactive.md) in **Aion cluster**.
```{.sh .copy}
si -N 1 -n 16 -c 1 -t 0-02:00:00 # on iris: -C broadwell or -C skylake
@@ -57,24 +142,106 @@ si -N 1 -n 16 -c 1 -t 0-02:00:00 # on iris: -C broadwell or -C skylake
### Clone & Setup Spack
-Clone and setup spack in `$HOME` - it has better much better performance for
-small files than `$SCRATCH`
+Cloning and setting up Spack in `$HOME` directory is recommended, as it provides significantly better performance for handling small files compared to `$SCRATCH`.
+To clone the Spack Repository:
``` { .sh .copy }
cd $HOME
git clone --depth=2 https://github.com/spack/spack.git
cd spack
```
-To make Spack available in your shell session, source its environment setup script:
+To make Spack available in the shell session, source its environment setup script:
``` { .sh .copy }
source $HOME/spack/share/spack/setup-env.sh
```
For convenience, this line can be added to the .`bashrc` file to make Spack automatically available in every new shell session.
+??? note "Test some basic functionality"
+
+ Once Spack is sourced, the installation can be verified and basic functionality explored using the following commands:
+
+ **Check Spack Version:**
+ ```sh
+ # Displays the currently installed version of Spack
+ spack --version
+ ```
+ **Search for Available Packages:**
+ ```sh
+ # Lists all available packages in Spack
+ spack list
+ ```
+
+ **Search for a specific one:**
+ ```sh
+ # Shows all packages whose names contain "cmake"
+ spack list cmake
+ ```
+
+ **Find Installed Packages:**
+ ```sh
+ # Lists all currently installed packages
+ spack find
+ ```
+ !!! note
+
+ If Spack was just installed, this list will likely be empty. Installed packages will appear here after the first successful build.
+
+ For more details :
+
+ - **Spack Basic Tutorial.**
+
+### Useful Spack Commands.
+
+
+The following tables summarizes the basic commands for managing software packages with Spack, from searching and installation to managing the software environment.
+
+| Spack Command | Description |
+|------------------------------|--------------------------------------------------------------|
+|`spack list`| Lists all available packages. |
+|`spack list ` | Searches for packages matching the name or keyword.|
+|` spack info ` | displays detailed information about that package|
+| `spack install ` | Installs a new package on the cluster. |
+| `spack uninstall ` | Removes an installed package from the cluster. |
+| `spack load ` | Makes a package ready for use in the current session. |
+| `spack unload ` | Removes a package from the current session's environment. |
+| `spack versions ` | Shows all available versions of a package for installation on the cluster. |
+| `spack help` | Displays general help and available subcommands. |
+| `spack help ` | Shows help for a specific subcommand. |
+|`spack config get`| Shows current Spack configuration settings |
+|`spack compiler find `| Detects and registers available compilers on the system |
+|`spack dependencies `| Lists dependencies of a package |
+
+??? info "Further Reference"
+ For a comprehensive list of commands and advanced usage options, refer to the official Spack documentation:Spack Command Index
+
+
+### Spack Environments
+
+A Spack environment is a powerful feature that allows users to manage sets of software packages, dependencies, and configurations in an isolated and reproducible way.
+
+Below is a list of commonly used Spack environment commands:
+
+| Spack Command | Description |
+|-----------------------------------|--------------------------------------------------------------|
+| `spack env status` | Displays the currently active Spack environment. |
+| `spack env list` | Lists all existing Spack environments. |
+| `spack env create ` | Creates a new Spack environment with the specified name. |
+| `spack env activate ` | Activates the specified Spack environment. |
+| `spack env deactivate` | Deactivates the currently active environment. |
+|`spack concretize`| Prepares a full dependency spec for an environment or package before install |
+| `spack install --add ` | Installs a package into the currently active environment. |
+
+
+??? info "Further Reference"
+ For more technical details, see the official Spack documentation:Spack Environments
+
+
+
### Define System-Provided Packages
-`packages.yaml` A spack configuration file used to tell Spack what tools and versions already exist on the cluster, so Spack can use those instead of building everything again.Create a packages.yaml file under: `$HOME/.spack/packages.yaml`
+Spack allows users to control how software is built using the`packages.yaml` configuration file. This enables users to choose preferred implementations for virtual dependencies (like MPI or BLAS/LAPACK), choose particular compilers, and even configure Spack to use external installed software that are already available on the system while avoiding the need to rebuild everything from source.[2]
Create a `packages.yaml` file under: `$HOME/.spack/packages.yaml`
+
``` { .sh .copy }
touch $HOME/.spack/packages.yaml
diff --git a/mkdocs.yml b/mkdocs.yml
index 16e287976..cf913382e 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -419,7 +419,9 @@ markdown_extensions:
social_url_shorthand: true
- pymdownx.snippets:
base_path: snippets
- - pymdownx.tabbed
+
+ - pymdownx.tabbed:
+ alternate_style: true
- pymdownx.smartsymbols
# code blocks with syntax highlighting, graphs
- pymdownx.superfences
From c3bbef3af1c6c3882dcea2a3051b1033dfab383e Mon Sep 17 00:00:00 2001
From: Md Jahid Hassan
Date: Thu, 31 Jul 2025 13:55:39 +0200
Subject: [PATCH 4/9] Update introductory section based on initial feedback
---
docs/environment/spack.md | 377 +++++++++++---------------------------
mkdocs.yml | 4 +-
2 files changed, 106 insertions(+), 275 deletions(-)
diff --git a/docs/environment/spack.md b/docs/environment/spack.md
index ee56eaff0..10813be4b 100644
--- a/docs/environment/spack.md
+++ b/docs/environment/spack.md
@@ -1,255 +1,139 @@
# Spack: A Package Manager on the UL HPC Platform
-[
](https://spack.readthedocs.io/en/latest/#)
-
-
+
## Introduction to Spack
- Spack is an open-source package manager designed for installing, building, and managing scientific software across a wide range of systems—from personal laptops to the world’s largest supercomputers. It supports multiple versions, compilers, and configurations of packages, all coexisting without conflict. Spack is especially popular in high-performance computing (HPC) environments due to its non-destructive installations, flexible dependency resolution, and robust support for complex software stacks.
-
-### Why spack ?
-
-Spack simplifies the software installation process in scientific computing. It uses a concise spec syntax to let users define versions, compilers, build options, and dependencies in a readable format. Package recipes in Spack are written in pure Python, allowing contributors to manage many builds with a single file. With over 8,500 official packages available, Spack offers great flexibility—and it's not limited to pre-existing packages. Users can create custom ( e.g. package.py ) files for software not yet available in the Spack pre-defined packages. [1]
-
-### Key Features of Spack
-
-
-- Multiple Versions & Configurations: Easily install multiple versions of the same software with different compilers or build options.
-
-- Custom Dependencies: Flexibly control dependencies, even choosing between alternative implementations.
-
-- Non-destructive Installs: New installations do not interfere with existing packages.
-
-- Package Coexistence: Different builds of the same software can live side by side.
-
- - Easy Package Creation: Write simple ( e.g.
package.py ) files in Python to add new software to Spack.
-
- - Virtual Environments: Create isolated environments for experiments or projects.
-
-
-
-
-### Key Resources
-
-Below are essential resources including its official documentation, usage guides, packaging tutorials, and community links:
-
-=== "Important Resources"
-
- - **Official Documentation**
-
- - **Spack Tutorial**
-
- - **Package Index**
-
- - **Package Fundamentals**
-
- - **Spack Environments**
-
- - **Spack Build Cache**
-
-
-=== "Additional Resources"
-
- - **Spack Configuration file **
-
- - **Advance Packaging Guide**
-
- - **Concretization Settings**
-
-
- - **Spack Community & Discussions**
-
-???+ tabs "Spack Resources"
-
- === "Important Resources"
+[Spack](https://spack.io/about/) is an open-source package manager designed for installing, building, and managing scientific software across a wide range of system including from personal computers to super computers. It supports multiple versions, compilers, and configurations of software packages, all coexisting in a single system without conflict. Spack provides with [over 8,500 ](https://packages.spack.io/)official software packages available since the `v1.0.0` release.Additionally users can also create [custom packages](https://spack-tutorial.readthedocs.io/en/latest/tutorial_packaging.html) via `package.py` files for software not yet available in the Spack pre-defined [packages](https://spack.readthedocs.io/en/latest/package_fundamentals.html).
- - **Official Documentation**
- - **Spack Tutorial**
+Similar to [EasyBuild](https://docs.easybuild.io/), [Spack](https://spack.io/about/) is also available on the UL HPC platform for managing and installing scientific software in more flexible and customizable way.
+At present, the UL HPC environment includes a pre-installed version of Spack,namely `devel/Spack/0.21.2` which can be accessed via the module system.
+??? question "Why use automatic building tools like [Easybuild](https://docs.easybuild.io) or [Spack](https://spack.io/) on HPC environments?"
- - **Package Fundamentals**
+ While it may seem obvious to some of you, scientific software is often surprisingly difficult to build. Not all software packages rely on standard building tools like Autotools/Automake (the famous `configure; make; make install`) or CMake. Even with standard building tools, parsing the available option to ensure that the build matches the underlying hardware is time consuming and error-prone. Furthermore, scientific software often contains hardcoded build parameters or the documentation on how to optimize the build is poorly maintained.
- - **Package Index**
+ Software build and installation frameworks like Easybuild or Spack allow reproducible builds, handle complex dependency trees, and automatically generate corresponding environment modulefiles (e.g., LMod) for easy usage. In the ULHPC platform, EasyBuild is the primary tool used to ensure optimized software builds. However, Spack is also available and can be valuable in more flexible or user-driven contexts.Spack Some HPC sites use both [1].
- - **Spack Environments**
+ _Resources_
-
- - **Spack Build Cache**
-
-
- === "Additional Resources"
-
- - **Spack Configuration file **
+ 1. See this [talk](https://www.archer2.ac.uk/training/courses/200617-spack-easybuild/) by William Lucas at EPCC for instance.
- - **Advance Packaging Guide**
+??? question "when should consider [Spack](https://spack.io/)?"
- - **Concretization Settings**
+ While EasyBuild is the primary and most integrated software management system on ULHPC, there are specific scenarios where users should consider using Spack.
- - **Spack Community & Discussions**
+ Spack is particularly suitable when users need greater flexibility and customization in building software. For example, if a user requires selecting specific compilers, enabling/disabling features like MPI or CUDA, or managing large and complex dependency chains easily, Spack offers a more configurable environment than EasyBuild.While EasyBuild is often favored by HPC system administrators for its robust and repeatable system-wide deployments, Spack is more focused on HPC developers and advanced users due to its easier-to-tweak nature.
+ Additionally, Spack supports user-level installations, allowing users to create isolated environments without administrative privileges, making it highly suitable for personal or experimental setups. Spack's environment definition files (e.g., spack.yaml) further enhance its utility by allowing users to precisely replicate the same software stack elsewhere.
+ In essence, Spack is the better choice when customization, portability, or broader package availability are required beyond what EasyBuild typically offers.
## Setting up Spack.
+For all tests and compilation with Spack, it is essential to run on a **compute node**, not in the login/access node.
-!!! note
- The guide is also applicable to other HPC clusters where users need to manage components such as MPI libraries, compilers, and other software through the `module` system.
-
-
-### Connection to a compute node
-For all tests and compilation with Spack, it is essential to run on a **compute node**, not in the login/access node. Here's an example of how to allocate an [interactive session](../jobs/interactive.md) in **Aion cluster**.
-
-```{.sh .copy}
-si -N 1 -n 16 -c 1 -t 0-02:00:00 # on iris: -C broadwell or -C skylake
-```
-
-??? note "Allocation Details"
-
- `si` is a shell function that wraps the `salloc` command to simplify interactive Slurm job allocation.
- It stands for:
+??? note "Connection to a compute node"
- ```bash
- salloc -p interactive --qos debug -C batch ${options}
- ```
- - `${options}`: any additional arguments passed to `si` (e.g., `-N`, `-n`, `-c`, `-t`, etc.)
+ Here's an example of how to allocate an [interactive session](../jobs/interactive.md) in **Aion cluster**.
- ```bash
+ ```{.sh .copy}
si -N 1 -n 16 -c 1 -t 0-02:00:00
```
- This allocates:
-
- - 1 node (`-N 1`)
- - 16 MPI tasks (`-n 16`)
- - 1 CPU per task (`-c 1`)
- - for a wall time of 2 hours (`-t 0-02:00:00`)
+ This command requests a [job](../jobs/submit.md) with 1 node, 16 MPI processes (-n 16), and 1 CPU core per process (-c 1).The -n 16 option allows running up to 16 parallel processes, which can accelerate builds spack.However, these values are only examples and are not mandatory. Users may adjust the resource allocation according to their requirements or omit certain options entirely for simpler use cases.
- !!! info "Iris Cluster"
-
- On the **Iris** cluster,
-
- - Use `-C broadwell` or `-C skylake`
-
- **Examples:**
- ```bash
- si -N 1 -n 16 -c 1 -t 0-02:00:00 -C broadwell
- ```
### Clone & Setup Spack
Cloning and setting up Spack in `$HOME` directory is recommended, as it provides significantly better performance for handling small files compared to `$SCRATCH`.
+
To clone the Spack Repository:
``` { .sh .copy }
cd $HOME
-git clone --depth=2 https://github.com/spack/spack.git
+git clone -c feature.manyFiles=true https://github.com/spack/spack.git
+```
+
+Cloning the Spack repository creates a directory named spack, and by default, it uses the develop branch. However for improved stability switching to the latest official [release](https://github.com/spack/spack/releases) is recommended. The current release tags at that time `v1.0.0`. and to checkout the most recnet release `v1.0.0` :
+
+``` { .sh .copy }
cd spack
+git checkout v1.0.0
```
+
To make Spack available in the shell session, source its environment setup script:
``` { .sh .copy }
source $HOME/spack/share/spack/setup-env.sh
```
+
For convenience, this line can be added to the .`bashrc` file to make Spack automatically available in every new shell session.
-??? note "Test some basic functionality"
- Once Spack is sourced, the installation can be verified and basic functionality explored using the following commands:
- **Check Spack Version:**
- ```sh
- # Displays the currently installed version of Spack
- spack --version
- ```
- **Search for Available Packages:**
- ```sh
- # Lists all available packages in Spack
- spack list
- ```
+??? note "Verfifying Spack installtion"
- **Search for a specific one:**
- ```sh
- # Shows all packages whose names contain "cmake"
- spack list cmake
+ Once Spack is sourced, the following command should display the path to the Spack executable and confirming that the environment is correctly set up:
+ ``` { .sh .copy }
+ which spack
```
-
- **Find Installed Packages:**
- ```sh
- # Lists all currently installed packages
- spack find
- ```
- !!! note
-
- If Spack was just installed, this list will likely be empty. Installed packages will appear here after the first successful build.
+ !!! note "Expected output resembles:"
- For more details :
-
- - **Spack Basic Tutorial.**
-
-### Useful Spack Commands.
-
-
-The following tables summarizes the basic commands for managing software packages with Spack, from searching and installation to managing the software environment.
+ ``` { .sh }
+ spack ()
+ {
+ : this is a shell function from: /home/users//spack/share/spack/setup-env.sh;
+ : the real spack script is here: /home/users//spack/bin/spack;
+ _spack_shell_wrapper "$@";
+ return $?
+ }
+ ```
+ This confirms that Spack’s environment is correctly loaded and ready for use.
+
-| Spack Command | Description |
-|------------------------------|--------------------------------------------------------------|
-|`spack list`| Lists all available packages. |
-|`spack list ` | Searches for packages matching the name or keyword.|
-|` spack info ` | displays detailed information about that package|
-| `spack install ` | Installs a new package on the cluster. |
-| `spack uninstall ` | Removes an installed package from the cluster. |
-| `spack load ` | Makes a package ready for use in the current session. |
-| `spack unload ` | Removes a package from the current session's environment. |
-| `spack versions ` | Shows all available versions of a package for installation on the cluster. |
-| `spack help` | Displays general help and available subcommands. |
-| `spack help ` | Shows help for a specific subcommand. |
-|`spack config get`| Shows current Spack configuration settings |
-|`spack compiler find `| Detects and registers available compilers on the system |
-|`spack dependencies `| Lists dependencies of a package |
+### Spack Configuration Scopes
-??? info "Further Reference"
- For a comprehensive list of commands and advanced usage options, refer to the official Spack documentation:Spack Command Index
+Spack’s behavior is controlled by [configuration files](https://spack.readthedocs.io/en/latest/configuration.html) in different scopes, which determine settings like installation paths, compilers, and package preferences and so on.Spack’s default configuration settings reside in `$SPACK_ROOT/etc/spack/defaults`. Spack provides six distinct configuration scopes to handle this customization, applied in order of decreasing priority.
+| Scope | Directory |
+|--------------|------------------------------------------------|
+| Environment | In environment base directory (`spack.yaml`) |
+| Custom | Custom directory, specified with `--config-scope` |
+| User | `~/.spack/` |
+| Site | `$SPACK_ROOT/etc/spack/` |
+| System | `/etc/spack/` |
+| Defaults | `$SPACK_ROOT/etc/spack/defaults/` |
-### Spack Environments
+The user configuration scope, stored in `~/.spack/` is ideal for defining personal preferences, compiler settings, and package defaults that apply across multiple projects and environments.The settings of this scope affect all instances of Spack. For more details see the [official tutorials](https://spack-tutorial.readthedocs.io/en/isc22/tutorial_configuration.html#configs-tutorial)
-A Spack environment is a powerful feature that allows users to manage sets of software packages, dependencies, and configurations in an isolated and reproducible way.
+### Define System-Provided Packages
-Below is a list of commonly used Spack environment commands:
+Spack allows fine-grained control over how software is built through the [`packages.yaml`](https://spack.readthedocs.io/en/latest/packages_yaml.html) configuration file. This enables users to choose preferred implementations for virtual dependencies, choose particular compilers, and even configure Spack to use external installed software that are already available on the system while avoiding the need to rebuild everything from source.
-| Spack Command | Description |
-|-----------------------------------|--------------------------------------------------------------|
-| `spack env status` | Displays the currently active Spack environment. |
-| `spack env list` | Lists all existing Spack environments. |
-| `spack env create ` | Creates a new Spack environment with the specified name. |
-| `spack env activate ` | Activates the specified Spack environment. |
-| `spack env deactivate` | Deactivates the currently active environment. |
-|`spack concretize`| Prepares a full dependency spec for an environment or package before install |
-| `spack install --add ` | Installs a package into the currently active environment. |
+Spack’s build defaults are in the `etc/spack/defaults/packages.yaml` file.Most commonly, users define custom preferences in a user-level [configuration Scopes](https://spack.readthedocs.io/en/latest/configuration.html#configuration-scopes), which should be placed at`~/.spack/packages.yaml`.
+!!! question "Why is it crucial for users to define external packages in packages.yaml?"
-??? info "Further Reference"
- For more technical details, see the official Spack documentation:Spack Environments
+ While Spack can build everything from source, fundamental libraries like [MPI](../software/swsets/mpi.md) and [BLAS](https://www.netlib.org/blas/)/[LAPACK](https://www.netlib.org/lapack/)are often highly optimized and meticulously tuned by system administrators to leverage the specific hardware capabilities of the HPC clusters (e.g., network interconnects, CPU features, GPU architectures).
+ Using Spack's generic builds for these core libraries often results in sub-optimal performance compared to the finely-tuned system-provided versions. Declaring optimized external packages in `packages.yaml` ensures that Spack-built applications link against the most performant versions available in the [ULHPC software collection](https://hpc-docs.uni.lu/software/), thereby maximizing the efficiency of scientific computations. This avoids the overhead of rebuilding everything from source unnecessarily and guarantees users code benefits from HPC system's specialized hardware optimizations.
-### Define System-Provided Packages
-
-Spack allows users to control how software is built using the`packages.yaml` configuration file. This enables users to choose preferred implementations for virtual dependencies (like MPI or BLAS/LAPACK), choose particular compilers, and even configure Spack to use external installed software that are already available on the system while avoiding the need to rebuild everything from source.[2] Create a `packages.yaml` file under: `$HOME/.spack/packages.yaml`
+To create a `packages.yaml` file at the user-level configuration scope `~/.spack/`:
``` { .sh .copy }
+mkdir -p $HOME/.spack/
touch $HOME/.spack/packages.yaml
```
-with the following contents:
-
+Then, add the following contents, which instructs Spack to use system-provided versions of `GCC`, `binutils`, and `OpenMPI` configured with native fabrics:
``` { .sh .copy }
packages:
gcc:
@@ -344,113 +228,62 @@ with the following contents:
buildable: false
```
-This tells Spack to use the system available GCC, binutils and OpenMPI with the native fabrics.
-
-## Building FEniCS
-
-Create an environment and install FEniCS
-``` { .sh .copy }
-cd ~
-spack env create -d fenicsx-main-20230126/
-spack env activate fenicsx-main-20230126/
-spack add py-fenics-dolfinx@main fenics-dolfinx+adios2 adios2+python petsc+mumps
-# Change @main to e.g. @0.7.2 in the above if you want a fixed version.
-spack concretize
-spack install -j16
-```
-or the same directly in `spack.yaml` in `$SPACK_ENV`
-
-``` { .sh .copy }
-spack:
- # add package specs to the `specs` list
- specs:
- - py-fenics-dolfinx@main
- - fenics-dolfinx@main+adios2
- - petsc+mumps
- - adios2+python
- view: true
- concretizer:
- unify: true
-```
-The following are also commonly used in FEniCS scripts and may be useful
-
-``` { .sh .copy }
-spack add gmsh+opencascade py-numba py-scipy py-matplotlib
-```
-It is possible to build a specific version (git ref) of DOLFINx. Note that the hash must be the full hash. It is best to specify appropriate git refs on all components.
-
-``` { .sh .copy }
-# This is a Spack Environment file.
-#
-# It describes a set of packages to be installed, along with
-# configuration settings.
-spack:
- # add package specs to the `specs` list
- specs:
- - fenics-dolfinx@git.4f575964c70efd02dca92f2cf10c125071b17e4d=main+adios2
- - py-fenics-dolfinx@git.4f575964c70efd02dca92f2cf10c125071b17e4d=main
-
- - py-fenics-basix@git.2e2a7048ea5f4255c22af18af3b828036f1c8b50=main
- - fenics-basix@git.2e2a7048ea5f4255c22af18af3b828036f1c8b50=main
-
- - py-fenics-ufl@git.b15d8d3fdfea5ad6fe78531ec4ce6059cafeaa89=main
-
- - py-fenics-ffcx@git.7bc8be738997e7ce68ef0f406eab63c00d467092=main
-
- - fenics-ufcx@git.7bc8be738997e7ce68ef0f406eab63c00d467092=main
-
- - petsc+mumps
- - adios2+python
- view: true
- concretizer:
- unify: true
-```
+!!! note " Defining CUDA as an External Package"
+
+ Similarly, users can configure Spack to use a system-provided CUDA toolkit by adding the following example to the `packages.yaml` file. This helps Spack avoid rebuilding CUDA from source and ensures compatibility with the system GPU drivers and libraries:
+ ``` { .sh .copy }
+ packages:
+ cuda:
+ externals:
+ - spec:
+ modules:
+ -
+ buildable: false
+ ```
-It is also possible to build only the C++ layer using
+## Software installtion with Spack
+!!! Note
+ This section will include examples and detailed instructions on how to install software using Spack. Relevant official documentation will also be linked to guide users through advanced usage and best practices.
-``` { .sh .copy }
-spack add fenics-dolfinx@main+adios2 py-fenics-ffcx@main petsc+mumps
-```
-To rebuild FEniCSx from main branches inside an existing environment
+### Useful Spack Commands.
-``` { .sh .copy }
-spack install --overwrite -j16 fenics-basix py-fenics-basix py-fenics-ffcx fenics-ufcx py-fenics-ufl fenics-dolfinx py-fenics-dolfinx
-```
+The following tables summarizes the basic commands for managing software packages with Spack, from searching and installation to managing the software environment.
-## Testing the build
+| Spack Command | Description |
+|------------------------------|--------------------------------------------------------------|
+|`spack list ` | Searches for packages matching the name or keyword.|
+|` spack info ` | displays detailed information about that package|
+| `spack install ` | Installs a new package on the cluster. |
+| `spack uninstall ` | Removes an installed package from the cluster. |
+| `spack load ` | Makes a package ready for use in the current session. |
+| `spack unload ` | Removes a package from the current session's environment. |
+| `spack help` | Displays general help and available subcommands. |
-Quickly test the build with
-``` { .sh .copy }
-srun python -c "from mpi4py import MPI; import dolfinx"
-```
-## Using the build
+??? info "Further Reference"
+ For a comprehensive list of commands and advanced usage options, see the official Spack documentation:Spack Command Index
-See the uni.lu documentation for full details - using the environment should be as
-simple as adding the following where `...` is the name/folder of your environment.
-``` { .sh .copy }
-#!/bin/bash -l
-source $HOME/spack/share/spack/setup-env.sh
-spack env activate ...
-```
+### Spack Environments
-## Known issues
+A Spack environment is a powerful feature that allows users to manage sets of software packages, dependencies, and configurations in an isolated and reproducible way.
-Workaround for broken Python module find for gmsh on uni.lu cluster
+Below is a list of commonly used Spack environment commands:
-``` { .sh .copy }
+| Spack Command | Description |
+|-----------------------------------|-----------------------------------------------------------------------------|
+| `spack env create ` | Creates a new Spack environment with the specified name. |
+| `spack env activate ` | Activates the specified Spack environment. |
+| `spack env status` | Displays the currently active Spack environment. |
+| `spack env deactivate` | Deactivates the currently active environment. |
+| `spack concretize` | Prepares a full dependency spec for an environment or package before install. |
-export PYTHONPATH=$SPACK_ENV/.spack-env/view/lib64/:$PYTHONPATH
-```
-Workaround for broken Python module find for adios2 (seems broken in Spack)
+??? info "Further Reference"
+ For more technical details, see the official Spack documentation:Spack Environments
-``` { .sh .copy }
-export PYTHONPATH=$(find $SPACK_ENV/.spack-env -type d -name 'site-packages' | grep venv):$PYTHONPATH
-```
\ No newline at end of file
diff --git a/mkdocs.yml b/mkdocs.yml
index cf913382e..14d70e697 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -418,10 +418,8 @@ markdown_extensions:
repo_url_shorthand: true
social_url_shorthand: true
- pymdownx.snippets:
- base_path: snippets
-
+ base_path: snippets
- pymdownx.tabbed:
- alternate_style: true
- pymdownx.smartsymbols
# code blocks with syntax highlighting, graphs
- pymdownx.superfences
From c9c6f20ad671795f850ef2c68752fc7e5564176e Mon Sep 17 00:00:00 2001
From: Georgios Kafanas
Date: Fri, 1 Aug 2025 15:45:35 +0200
Subject: [PATCH 5/9] [REFACTOR:spack] Rebase past the build system update
---
mkdocs.yml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/mkdocs.yml b/mkdocs.yml
index 14d70e697..4f777ff25 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -66,9 +66,9 @@ nav:
- Overview: 'environment/index.md'
- Modules: 'environment/modules.md'
- Easybuild: 'environment/easybuild.md'
+ - Spack: 'environment/spack.md'
- Containers: 'environment/containers.md'
- Conda: 'environment/conda.md'
- - Spack: 'environment/spack.md'
###########
- Policies:
- Acceptable Use Policy (AUP): 'policies/aup.md'
@@ -418,8 +418,8 @@ markdown_extensions:
repo_url_shorthand: true
social_url_shorthand: true
- pymdownx.snippets:
- base_path: snippets
- - pymdownx.tabbed:
+ base_path: snippets
+ - pymdownx.tabbed
- pymdownx.smartsymbols
# code blocks with syntax highlighting, graphs
- pymdownx.superfences
From 5f0a0c4511e212f2ff8d9fd9e9fa9d5fe0682e5d Mon Sep 17 00:00:00 2001
From: Md Jahid Hassan
Date: Tue, 5 Aug 2025 16:18:39 +0200
Subject: [PATCH 6/9] refine spack introductory section
---
docs/environment/spack.md | 75 +++++++++++++++++----------------------
1 file changed, 33 insertions(+), 42 deletions(-)
diff --git a/docs/environment/spack.md b/docs/environment/spack.md
index 10813be4b..f989e887e 100644
--- a/docs/environment/spack.md
+++ b/docs/environment/spack.md
@@ -7,8 +7,8 @@
[Spack](https://spack.io/about/) is an open-source package manager designed for installing, building, and managing scientific software across a wide range of system including from personal computers to super computers. It supports multiple versions, compilers, and configurations of software packages, all coexisting in a single system without conflict. Spack provides with [over 8,500 ](https://packages.spack.io/)official software packages available since the `v1.0.0` release.Additionally users can also create [custom packages](https://spack-tutorial.readthedocs.io/en/latest/tutorial_packaging.html) via `package.py` files for software not yet available in the Spack pre-defined [packages](https://spack.readthedocs.io/en/latest/package_fundamentals.html).
-Similar to [EasyBuild](https://docs.easybuild.io/), [Spack](https://spack.io/about/) is also available on the UL HPC platform for managing and installing scientific software in more flexible and customizable way.
-At present, the UL HPC environment includes a pre-installed version of Spack,namely `devel/Spack/0.21.2` which can be accessed via the module system.
+
??? question "Why use automatic building tools like [Easybuild](https://docs.easybuild.io) or [Spack](https://spack.io/) on HPC environments?"
@@ -34,18 +34,9 @@ At present, the UL HPC environment includes a pre-installed version of Spack,nam
## Setting up Spack.
-For all tests and compilation with Spack, it is essential to run on a **compute node**, not in the login/access node.
-
-??? note "Connection to a compute node"
-
- Here's an example of how to allocate an [interactive session](../jobs/interactive.md) in **Aion cluster**.
-
- ```{.sh .copy}
- si -N 1 -n 16 -c 1 -t 0-02:00:00
- ```
-
- This command requests a [job](../jobs/submit.md) with 1 node, 16 MPI processes (-n 16), and 1 CPU core per process (-c 1).The -n 16 option allows running up to 16 parallel processes, which can accelerate builds spack.However, these values are only examples and are not mandatory. Users may adjust the resource allocation according to their requirements or omit certain options entirely for simpler use cases.
+!!! warning "Connect to a compute node"
+ For all tests and compilation with Spack, it is essential to run on a [**compute node**](../systems/iris/compute.md), not in the [**login/access**](../connect/access.md). For detailed information on resource allocation and job submission, visit the [**Slurm Job Management System**](../slurm/index.md).
### Clone & Setup Spack
@@ -56,10 +47,10 @@ To clone the Spack Repository:
``` { .sh .copy }
cd $HOME
-git clone -c feature.manyFiles=true https://github.com/spack/spack.git
+git clone --depth=2 --branch=releases/v1.0.0 https://github.com/spack/spack.git
```
-Cloning the Spack repository creates a directory named spack, and by default, it uses the develop branch. However for improved stability switching to the latest official [release](https://github.com/spack/spack/releases) is recommended. The current release tags at that time `v1.0.0`. and to checkout the most recnet release `v1.0.0` :
+Cloning the Spack repository creates a directory named spack, and by default, it uses the develop branch. However for improved stability switching to the latest official [release](https://github.com/spack/spack/releases) is recommended. The current release tags at that time `v1.0.0`. and to checkout the most recent release `v1.0.0` :
``` { .sh .copy }
cd spack
@@ -229,6 +220,7 @@ Then, add the following contents, which instructs Spack to use system-provided v
```
!!! note " Defining CUDA as an External Package"
+ // i need to adjust that .
Similarly, users can configure Spack to use a system-provided CUDA toolkit by adding the following example to the `packages.yaml` file. This helps Spack avoid rebuilding CUDA from source and ensures compatibility with the system GPU drivers and libraries:
``` { .sh .copy }
@@ -242,48 +234,47 @@ Then, add the following contents, which instructs Spack to use system-provided v
```
-## Software installtion with Spack
+## Installing softwares with Spack
!!! Note
- This section will include examples and detailed instructions on how to install software using Spack. Relevant official documentation will also be linked to guide users through advanced usage and best practices.
+ In this section i will include examples and detailed instructions on how to install software using Spack and link to the relevant official documentation.
+
+### Spack Environments
+A Spack [environment](https://spack.readthedocs.io/en/latest/environments.html) lets users manage software and dependencies in an isolated and reproducible way.
-### Useful Spack Commands.
+!!! info
+ On shared clusters, it's highly recommended to use Spack environments to keep installations clean, avoid conflicts, and and simplify sharing or reproduction.
-The following tables summarizes the basic commands for managing software packages with Spack, from searching and installation to managing the software environment.
+To create and activate a Spack [environmen](https://spack.readthedocs.io/en/latest/environments.html):
-| Spack Command | Description |
-|------------------------------|--------------------------------------------------------------|
-|`spack list ` | Searches for packages matching the name or keyword.|
-|` spack info ` | displays detailed information about that package|
-| `spack install ` | Installs a new package on the cluster. |
-| `spack uninstall ` | Removes an installed package from the cluster. |
-| `spack load ` | Makes a package ready for use in the current session. |
-| `spack unload ` | Removes a package from the current session's environment. |
-| `spack help` | Displays general help and available subcommands. |
+``` { .sh .copy }
+spack env create test-env
+spack env activate test-env
+```
+This command creates a Spack environment in the directory `$SPACK_ROOT/var/spack/environments/test-env`. It also generates a `spack.yaml` file—the main configuration file where users specify packages to install, compilers to use, and other settings specific to that `test-env` environment.For more details see the official [Spack Environment Tutorial](https://spack-tutorial.readthedocs.io/en/latest/tutorial_environments.html).
-??? info "Further Reference"
- For a comprehensive list of commands and advanced usage options, see the official Spack documentation:Spack Command Index
+### Spack Packages Installation:
+Spack makes it easy to install software [packages](https://spack-tutorial.readthedocs.io/en/pearc22/tutorial_packaging.html#what-is-a-spack-package) from its extensive repository. To [install any package](https://spack.readthedocs.io/en/latest/package_fundamentals.html#installing-and-uninstalling) listed by spack list, use the following command: `spack install `
-### Spack Environments
-A Spack environment is a powerful feature that allows users to manage sets of software packages, dependencies, and configurations in an isolated and reproducible way.
+!!! details "Spack Packages Spec"
+
+ Spack uses a specific syntax to describe [package](https://spack.readthedocs.io/en/latest/packaging_guide_creation.html#structure-of-a-package) configurations during installation. Each configuration is called a [spec](https://spack.readthedocs.io/en/latest/spec_syntax.html) — a concise way to define package versions, compiler choices, variants, and dependencies.
+
+ ``` { .sh .copy }
+ spack install hdf5@1.10.7 +mpi ^mpich@3.3.2 ^zlib@1.2.11 %gcc@13.2.0
+ ```
-Below is a list of commonly used Spack environment commands:
+ This installs `HDF5` package in version `1.10.7` with MPI support, explicitly specifying `mpich` version 3.3.2 and `zlib` version 1.2.11 as dependencies, all built with GCC 13.2.0.
-| Spack Command | Description |
-|-----------------------------------|-----------------------------------------------------------------------------|
-| `spack env create ` | Creates a new Spack environment with the specified name. |
-| `spack env activate ` | Activates the specified Spack environment. |
-| `spack env status` | Displays the currently active Spack environment. |
-| `spack env deactivate` | Deactivates the currently active environment. |
-| `spack concretize` | Prepares a full dependency spec for an environment or package before install. |
+
-??? info "Further Reference"
- For more technical details, see the official Spack documentation:Spack Environments
+### Creating your own packages
+### Spack Binary Cache
From f31b55294c0f130cfdc0dd15dc6932dfb5a87543 Mon Sep 17 00:00:00 2001
From: Md Jahid Hassan
Date: Mon, 11 Aug 2025 23:58:03 +0200
Subject: [PATCH 7/9] rewrite Fenicsx build instruction with Spack
---
docs/software/cae/fenics.md | 313 ++++++++++++++++++++----------------
1 file changed, 177 insertions(+), 136 deletions(-)
diff --git a/docs/software/cae/fenics.md b/docs/software/cae/fenics.md
index 40fabfbb7..6f99e9dc1 100644
--- a/docs/software/cae/fenics.md
+++ b/docs/software/cae/fenics.md
@@ -1,161 +1,202 @@
[{: style="width:200px;float: right;" }](https://fenicsproject.org/)
-[FEniCS](https://fenicsproject.org/) is a popular open-source (LGPLv3) computing platform for
-solving partial differential equations (PDEs).
-FEniCS enables users to quickly translate scientific models
-into efficient finite element code. With the high-level
-Python and C++ interfaces to FEniCS, it is easy to get started,
-but FEniCS offers also powerful capabilities for more
-experienced programmers. FEniCS runs on a multitude of
-platforms ranging from laptops to high-performance clusters.
-
-## How to access the FEniCS through [Anaconda](https://www.anaconda.com/products/individual)
-The following steps provides information about how to installed
-on your local path.
-```bash
-# From your local computer
-$ ssh -X iris-cluster # OR ssh -Y iris-cluster on Mac
-# Reserve the node for interactive computation with grahics view (plots)
-$ si --x11 --ntasks-per-node 1 -c 4
-# salloc -p interactive --qos debug -C batch --x11 --ntasks-per-node 1 -c 4
+
-# Go to scratch directory
-$ cds
+[FEniCS](https://fenicsproject.org/) is a popular open-source computing platform for solving partial differential equations (PDEs) using the finite element method ([FEM](https://en.wikipedia.org/wiki/Finite_element_method)). Originally developed in 2003, the earlier version is now known as legacy FEniCS. In 2020, the next-generation framework [FEniCSx](https://docs.fenicsproject.org/) was introduced, with the latest stable [release v0.9.0](https://fenicsproject.org/blog/v0.9.0/) in October 2024. Though it builds on the legacy FEniCS but introduces significant improvements over the legacy libraries. FEniCSx is comprised of the libraries [UFL](https://github.com/FEniCS/ufl), [Basix](https://github.com/FEniCS/basix), [FFCx](https://github.com/FEniCS/ffcx), and [DOLFINx](https://github.com/FEniCS/dolfinx) which are the build blocks of it. And new users are encouraged to adopt [FEniCSx](https://docs.fenicsproject.org/) for its modern features and active development support.
-/scratch/users/ $ Anaconda3-2020.07-Linux-x86_64.sh
-/scratch/users/ $ chmod +x Anaconda3-2020.07-Linux-x86_64.sh
-/scratch/users/ $ ./Anaconda3-2020.07-Linux-x86_64.sh
-Do you accept the license terms? [yes|no]
-yes
-Anaconda3 will now be installed into this location:
-/home/users//anaconda3
+
- - Press ENTER to confirm the location
- - Press CTRL-C to abort the installation
- - Or specify a different location below
-# You can choose your path where you want to install it
-[/home/users//anaconda3] >>> /scratch/users//Anaconda3
+
-# To activate the anaconda
-/scratch/users/ $ source /scratch/users//Anaconda3/bin/activate
+## Installation of FEniCSx
-# Install the fenics in anaconda environment
-/scratch/users/ $ conda create -n fenicsproject -c conda-forge fenics
+FEniCSx can be installed on [ULHPC](https://www.uni.lu/research-en/core-facilities/hpc/) systems using [Easybuild](https://docs.easybuild.io) or [Spack](https://spack.io/), Below are detailed instructions for each method,
-# Install matplotlib for the visualization
-/scratch/users/ $ conda install -c conda-forge matplotlib
-```
-Once you have installed the anaconda, you can always
-activate it by calling the `source activate` path where `anaconda`
-has been installed.
-## Working example
-### Interactive mode
-```bash
-# From your local computer
-$ ssh -X iris-cluster # or ssh -Y iris-cluster on Mac
-# Reserve the node for interactive computation with grahics view (plots)
-$ si --ntasks-per-node 1 -c 4 --x11
-# salloc -p interactive --qos debug -C batch --x11 --ntasks-per-node 1 -c 4
+### Building FEniCS With Spack
-# Activate anaconda
-$ source /${SCRATCH}/Anaconda3/bin/activate
-# activate the fenicsproject
-$ conda activate fenicsproject
+Building FEniCSx with Spack requires that Spack is already installed, configured, and its environment sourced on the [ULHPC] system. If Spack is not yet configured, follow the [spack documentation](../../environment/spack.md) for installation and configuration.
-# execute the Poisson.py example (you can uncomment the plot lines in Poission.py example)
-$ python3 Poisson.py
-```
+!!! note
+ Spack can a good choice to build FEniCSx with its many complex dependencies, leveraging the system-provided packages defined in ~/.spack/packages.yaml for optimal performance.
+
+Create and Activate a Spack Environment:
+
+To maintain an isolated installation, create a dedicated Spack environment in a chosen directory.
+The following example builds FEniCSx in the `home` directory:
+
+ cd ~
+ spack env create -d fenicsx-main-20230126/
+ spack env activate fenicsx-main-20230126/
+
+
+Add the core FEniCSx components and common dependencies:
+
+ spack add py-fenics-dolfinx@0.9.0+petsc4py fenics-dolfinx+adios2+petsc adios2+python petsc+mumps
+
+ # Change @0.9.0 to any version in the above if you want a another version.
+ spack concretize
+ spack install -j16
+
+
+!!! note
+
+ `spack concretize` resolves all dependencies and selects compatible versions for the specified packages. `-j16` sets the number of parallel build jobs. Using a higher number can speed up the build but should be chosen based on available CPU cores and cluster policies.
+
+
+
+or the same directly in `spack.yaml` in `$SPACK_ENV`
+
+ spack:
+ # add package specs to the `specs` list
+ specs:
+ - py-fenics-dolfinx@0.9.0+petsc4py
+ - fenics-dolfinx+adios2+petsc
+ - petsc+mumps
+ - adios2+python
+
+ view: true
+ concretizer:
+ unify: true
+
+The following are also commonly used in FEniCS scripts and may be useful
+
+ spack add gmsh+opencascade py-numba py-scipy py-matplotlib
+
+It is possible to build a specific version (git ref) of DOLFINx.
+Note that the hash must be the full hash.
+It is best to specify appropriate git refs on all components.
+
+ # This is a Spack Environment file.
+ #
+ # It describes a set of packages to be installed, along with
+ # configuration settings.
+ spack:
+ # add package specs to the `specs` list
+ specs:
+ - fenics-dolfinx@git.4f575964c70efd02dca92f2cf10c125071b17e4d=main+adios2
+ - py-fenics-dolfinx@git.4f575964c70efd02dca92f2cf10c125071b17e4d=main
+
+ - py-fenics-basix@git.2e2a7048ea5f4255c22af18af3b828036f1c8b50=main
+ - fenics-basix@git.2e2a7048ea5f4255c22af18af3b828036f1c8b50=main
+
+ - py-fenics-ufl@git.b15d8d3fdfea5ad6fe78531ec4ce6059cafeaa89=main
+
+ - py-fenics-ffcx@git.7bc8be738997e7ce68ef0f406eab63c00d467092=main
+
+ - fenics-ufcx@git.7bc8be738997e7ce68ef0f406eab63c00d467092=main
+
+ - petsc+mumps
+ - adios2+python
+ view: true
+ concretizer:
+ unify: true
+
+It is also possible to build only the C++ layer using
+
+ spack add fenics-dolfinx@main+adios2 py-fenics-ffcx@main petsc+mumps
+
+To rebuild FEniCSx from main branches inside an existing environment
+
+ spack install --overwrite -j16 fenics-basix py-fenics-basix py-fenics-ffcx fenics-ufcx py-fenics-ufl fenics-dolfinx py-fenics-dolfinx
+
+#### Testing the build
+
+Quickly test the build with
+
+ srun python -c "from mpi4py import MPI; import dolfinx"
+
+#### Using the build
+
+See the uni.lu documentation for full details - using the environment should be as
+simple as adding the following where `...` is the name/folder of your environment.
+
+ #!/bin/bash -l
+ source $HOME/spack/share/spack/setup-env.sh
+ spack env activate ...
+
+#### Known issues
+
+Workaround for inability to find gmsh Python package:
+
+ export PYTHONPATH=$SPACK_ENV/.spack-env/view/lib64/:$PYTHONPATH
+
+Workaround for inability to find adios2 Python package:
+
+ export PYTHONPATH=$(find $SPACK_ENV/.spack-env -type d -name 'site-packages' | grep venv):$PYTHONPATH
+
+
+### Building FEniCS With EasyBuild
-### Batch script
-```bash
-#!/bin/bash -l
-#SBATCH -J FEniCS
-#SBATCH -N 1
-###SBATCH -A
-###SBATCH --ntasks-per-node=1
-#SBATCH -c 1
-#SBATCH --time=00:05:00
-#SBATCH -p batch
-
-echo "== Starting run at $(date)"
-echo "== Job ID: ${SLURM_JOBID}"
-echo "== Node list: ${SLURM_NODELIST}"
-echo "== Submit dir. : ${SLURM_SUBMIT_DIR}"
-
-# activate the anaconda source
-source ${SCRATCH}/Anaconda3/bin/activate
-
-# activate the fenicsproject from anaconda
-conda activate fenicsproject
-
-# execute the poisson.py through python
-srun python3 Poisson.py
-```
### Example (Poisson.py)
```bash
-# FEniCS tutorial demo program: Poisson equation with Dirichlet conditions.
-# Test problem is chosen to give an exact solution at all nodes of the mesh.
-# -Laplace(u) = f in the unit square
-# u = u_D on the boundary
-# u_D = 1 + x^2 + 2y^2
-# f = -6
-
-from __future__ import print_function
-from fenics import *
-import matplotlib.pyplot as plt
-
-# Create mesh and define function space
-mesh = UnitSquareMesh(8, 8)
-V = FunctionSpace(mesh, 'P', 1)
-
-# Define boundary condition
-u_D = Expression('1 + x[0]*x[0] + 2*x[1]*x[1]', degree=2)
-
-def boundary(x, on_boundary):
- return on_boundary
-
-bc = DirichletBC(V, u_D, boundary)
-
-# Define variational problem
-u = TrialFunction(V)
-v = TestFunction(V)
-f = Constant(-6.0)
-a = dot(grad(u), grad(v))*dx
-L = f*v*dx
-
-# Compute solution
-u = Function(V)
-solve(a == L, u, bc)
-
-# Plot solution and mesh
-#plot(u)
-#plot(mesh)
-
-# Save solution to file in VTK format
-vtkfile = File('poisson/solution.pvd')
-vtkfile << u
-
-# Compute error in L2 norm
-error_L2 = errornorm(u_D, u, 'L2')
-
-# Compute maximum error at vertices
-vertex_values_u_D = u_D.compute_vertex_values(mesh)
-vertex_values_u = u.compute_vertex_values(mesh)
+
+# Demo possion problem
+# https://docs.fenicsproject.org/dolfinx/main/python/demos/demo_poisson.html
+
+from mpi4py import MPI
+from petsc4py.PETSc import ScalarType
+
import numpy as np
-error_max = np.max(np.abs(vertex_values_u_D - vertex_values_u))
-# Print errors
-print('error_L2 =', error_L2)
-print('error_max =', error_max)
+import ufl
+from dolfinx import fem, mesh
+from dolfinx.fem.petsc import LinearProblem
+
+# Create mesh
+msh = mesh.create_rectangle(
+ comm=MPI.COMM_WORLD,
+ points=((0.0, 0.0), (2.0, 1.0)),
+ n=(32, 16),
+ cell_type=mesh.CellType.triangle,
+)
+
+# Function space
+V = fem.functionspace(msh, ("Lagrange", 1))
+
+# Boundary facets (x=0 and x=2)
+facets = mesh.locate_entities_boundary(
+ msh,
+ dim=(msh.topology.dim - 1),
+ marker=lambda x: np.isclose(x[0], 0.0) | np.isclose(x[0], 2.0),
+)
+dofs = fem.locate_dofs_topological(V=V, entity_dim=1, entities=facets)
+
+# Dirichlet BC u = 0
+bc = fem.dirichletbc(value=ScalarType(0), dofs=dofs, V=V)
+
+# Variational problem
+u = ufl.TrialFunction(V)
+v = ufl.TestFunction(V)
+x = ufl.SpatialCoordinate(msh)
+f = 10 * ufl.exp(-((x[0] - 0.5) ** 2 + (x[1] - 0.5) ** 2) / 0.02)
+g = ufl.sin(5 * x[0])
+a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx
+L = ufl.inner(f, v) * ufl.dx + ufl.inner(g, v) * ufl.ds
+
+# Create problem (no petsc_options_prefix in 0.9.0)
+problem = LinearProblem(
+ a,
+ L,
+ bcs=[bc],
+ petsc_options={"ksp_type": "preonly", "pc_type": "lu", "ksp_error_if_not_converged": True},
+)
+
+# Solve
+uh = problem.solve()
+
+# Only print from rank 0 to avoid MPI spam
+if MPI.COMM_WORLD.rank == 0:
+ print("First 10 values of the solution vector:", uh.x.array[:10])
+
+assert isinstance(uh, fem.Function)
+
-# Hold plot
-#plt.show()
```
## Additional information
From c87a40ab8ac77059570760d55f6d7c765237ad09 Mon Sep 17 00:00:00 2001
From: Md Jahid Hassan
Date: Tue, 19 Aug 2025 12:35:31 +0200
Subject: [PATCH 8/9] refine docs and link to demos programs
---
docs/software/cae/fenics.md | 114 +++++++++---------------------------
1 file changed, 27 insertions(+), 87 deletions(-)
diff --git a/docs/software/cae/fenics.md b/docs/software/cae/fenics.md
index 6f99e9dc1..6d6dccb96 100644
--- a/docs/software/cae/fenics.md
+++ b/docs/software/cae/fenics.md
@@ -2,27 +2,27 @@
-[FEniCS](https://fenicsproject.org/) is a popular open-source computing platform for solving partial differential equations (PDEs) using the finite element method ([FEM](https://en.wikipedia.org/wiki/Finite_element_method)). Originally developed in 2003, the earlier version is now known as legacy FEniCS. In 2020, the next-generation framework [FEniCSx](https://docs.fenicsproject.org/) was introduced, with the latest stable [release v0.9.0](https://fenicsproject.org/blog/v0.9.0/) in October 2024. Though it builds on the legacy FEniCS but introduces significant improvements over the legacy libraries. FEniCSx is comprised of the libraries [UFL](https://github.com/FEniCS/ufl), [Basix](https://github.com/FEniCS/basix), [FFCx](https://github.com/FEniCS/ffcx), and [DOLFINx](https://github.com/FEniCS/dolfinx) which are the build blocks of it. And new users are encouraged to adopt [FEniCSx](https://docs.fenicsproject.org/) for its modern features and active development support.
+[FEniCS](https://fenicsproject.org/) is a popular open-source computing platform for solving partial differential equations (PDEs) using the finite element method ([FEM](https://en.wikipedia.org/wiki/Finite_element_method)). Originally developed in 2003, the earlier version is now known as legacy FEniCS. In 2020, the next-generation framework [FEniCSx](https://docs.fenicsproject.org/) was introduced, with the latest stable [release v0.9.0](https://fenicsproject.org/blog/v0.9.0/) in October 2024. Though it builds on the legacy FEniCS but introduces significant improvements over the legacy libraries. FEniCSx is composed of the following libraries that support typical workflows: [UFL](https://github.com/FEniCS/ufl) → [FFCx](https://github.com/FEniCS/ffcx) → [Basix](https://github.com/FEniCS/basix) → [DOLFINx](https://github.com/FEniCS/dolfinx), which are the build blocks of it. And new users are encouraged to adopt [FEniCSx](https://fenicsproject.org/documentation/) for its modern features and active development support.
-
+(Maybe add a short intro into the stack the software depends on, and further more the internal dependencies UFL->FFCx ...)
-
-## Installation of FEniCSx
FEniCSx can be installed on [ULHPC](https://www.uni.lu/research-en/core-facilities/hpc/) systems using [Easybuild](https://docs.easybuild.io) or [Spack](https://spack.io/), Below are detailed instructions for each method,
-
+
### Building FEniCS With Spack
-Building FEniCSx with Spack requires that Spack is already installed, configured, and its environment sourced on the [ULHPC] system. If Spack is not yet configured, follow the [spack documentation](../../environment/spack.md) for installation and configuration.
+Building FEniCSx with Spack on the [ULHPC](https://www.uni.lu/research-en/core-facilities/hpc/) system requires that Users already installed Spack and sourced its enviroment on the cluster. If Spack is not yet configured, follow the [spack documentation](../../environment/spack.md) for installation and configuration.
+
!!! note
- Spack can a good choice to build FEniCSx with its many complex dependencies, leveraging the system-provided packages defined in ~/.spack/packages.yaml for optimal performance.
+
+ Spack would be a good choice for building FEniCSx because it automatically manages complex dependencies, allows to isolates all installations in a dedicated environment, leverages system-provided packages in ~/.`spack/packages.yaml` for optimal performance, and simplifies reproducibility and maintenance across different systems.
Create and Activate a Spack Environment:
@@ -30,8 +30,8 @@ To maintain an isolated installation, create a dedicated Spack environment in a
The following example builds FEniCSx in the `home` directory:
cd ~
- spack env create -d fenicsx-main-20230126/
- spack env activate fenicsx-main-20230126/
+ spack env create -d fenicsx-0.9.0/
+ spack env activate fenicsx-0.9.0/
Add the core FEniCSx components and common dependencies:
@@ -43,13 +43,13 @@ Add the core FEniCSx components and common dependencies:
spack install -j16
-!!! note
+!!! question " why concretize and -j16 ? "
- `spack concretize` resolves all dependencies and selects compatible versions for the specified packages. `-j16` sets the number of parallel build jobs. Using a higher number can speed up the build but should be chosen based on available CPU cores and cluster policies.
+ `spack concretize` resolves all dependencies and selects compatible versions for the specified packages. `-j16` sets the number cores to use for building. Using a higher number can speed up the build but should be chosen based on available CPU cores and cluster policies.
-or the same directly in `spack.yaml` in `$SPACK_ENV`
+or its also possible to define build packages in `$SPACK_ENV` in a `spack.yaml` file.
spack:
# add package specs to the `specs` list
@@ -63,7 +63,13 @@ or the same directly in `spack.yaml` in `$SPACK_ENV`
concretizer:
unify: true
-The following are also commonly used in FEniCS scripts and may be useful
+!!! question " why unify : true ? "
+
+ `unify: true` ensures all packages share the same dependency versions, preventing multiple builds of the same library. Without it, each `spec` could resolve dependencies independently, leading to potential conflicts and redundant installations.
+
+
+
+The following are also common dependencies used in FEniCS scripts:
spack add gmsh+opencascade py-numba py-scipy py-matplotlib
@@ -72,7 +78,6 @@ Note that the hash must be the full hash.
It is best to specify appropriate git refs on all components.
# This is a Spack Environment file.
- #
# It describes a set of packages to be installed, along with
# configuration settings.
spack:
@@ -96,11 +101,11 @@ It is best to specify appropriate git refs on all components.
concretizer:
unify: true
-It is also possible to build only the C++ layer using
+It is also possible to build only the C++ layer using (Need to comment about why we add python depndencies?)
- spack add fenics-dolfinx@main+adios2 py-fenics-ffcx@main petsc+mumps
+ spack add fenics-dolfinx@0.9.0+adios2 py-fenics-ffcx@0.9.0 petsc+mumps
-To rebuild FEniCSx from main branches inside an existing environment
+To rebuild FEniCSx from main branches inside an existing environment:
spack install --overwrite -j16 fenics-basix py-fenics-basix py-fenics-ffcx fenics-ufcx py-fenics-ufl fenics-dolfinx py-fenics-dolfinx
@@ -110,14 +115,11 @@ Quickly test the build with
srun python -c "from mpi4py import MPI; import dolfinx"
-#### Using the build
+!!! info "Try the Build Explicitly"
-See the uni.lu documentation for full details - using the environment should be as
-simple as adding the following where `...` is the name/folder of your environment.
+ After installation, the [FEniCSx](https://fenicsproject.org/documentation/) build can be tried explicitly by running the demo problems corresponding to the installed release version, as provided in the [FEniCSx documentation](https://docs.fenicsproject.org/).
+ For [DOLFINx](https://docs.fenicsproject.org/dolfinx/main/python/) Python bindings, see for example the demos in the [stable release v0.9.0](https://docs.fenicsproject.org/dolfinx/v0.9.0/python/demos.html).
- #!/bin/bash -l
- source $HOME/spack/share/spack/setup-env.sh
- spack env activate ...
#### Known issues
@@ -129,75 +131,13 @@ Workaround for inability to find adios2 Python package:
export PYTHONPATH=$(find $SPACK_ENV/.spack-env -type d -name 'site-packages' | grep venv):$PYTHONPATH
+
-### Building FEniCS With EasyBuild
-
-
-### Example (Poisson.py)
-```bash
-# Demo possion problem
-# https://docs.fenicsproject.org/dolfinx/main/python/demos/demo_poisson.html
-
-from mpi4py import MPI
-from petsc4py.PETSc import ScalarType
-
-import numpy as np
-
-import ufl
-from dolfinx import fem, mesh
-from dolfinx.fem.petsc import LinearProblem
-
-# Create mesh
-msh = mesh.create_rectangle(
- comm=MPI.COMM_WORLD,
- points=((0.0, 0.0), (2.0, 1.0)),
- n=(32, 16),
- cell_type=mesh.CellType.triangle,
-)
-
-# Function space
-V = fem.functionspace(msh, ("Lagrange", 1))
-
-# Boundary facets (x=0 and x=2)
-facets = mesh.locate_entities_boundary(
- msh,
- dim=(msh.topology.dim - 1),
- marker=lambda x: np.isclose(x[0], 0.0) | np.isclose(x[0], 2.0),
-)
-dofs = fem.locate_dofs_topological(V=V, entity_dim=1, entities=facets)
-
-# Dirichlet BC u = 0
-bc = fem.dirichletbc(value=ScalarType(0), dofs=dofs, V=V)
-
-# Variational problem
-u = ufl.TrialFunction(V)
-v = ufl.TestFunction(V)
-x = ufl.SpatialCoordinate(msh)
-f = 10 * ufl.exp(-((x[0] - 0.5) ** 2 + (x[1] - 0.5) ** 2) / 0.02)
-g = ufl.sin(5 * x[0])
-a = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx
-L = ufl.inner(f, v) * ufl.dx + ufl.inner(g, v) * ufl.ds
-
-# Create problem (no petsc_options_prefix in 0.9.0)
-problem = LinearProblem(
- a,
- L,
- bcs=[bc],
- petsc_options={"ksp_type": "preonly", "pc_type": "lu", "ksp_error_if_not_converged": True},
-)
-
-# Solve
-uh = problem.solve()
-
-# Only print from rank 0 to avoid MPI spam
-if MPI.COMM_WORLD.rank == 0:
- print("First 10 values of the solution vector:", uh.x.array[:10])
+### Building FEniCS With EasyBuild
-assert isinstance(uh, fem.Function)
-```
## Additional information
FEniCS provides the [technical documentation](https://fenicsproject.org/documentation/),
From 429bce8f81e5645ed61c49227d8ff5d401141601 Mon Sep 17 00:00:00 2001
From: Md Jahid Hassan
Date: Mon, 29 Sep 2025 15:17:50 +0200
Subject: [PATCH 9/9] refine fenics sections
---
docs/software/cae/fenics.md | 62 ++++++++++++++++---------------------
1 file changed, 27 insertions(+), 35 deletions(-)
diff --git a/docs/software/cae/fenics.md b/docs/software/cae/fenics.md
index 6d6dccb96..551b6a0c6 100644
--- a/docs/software/cae/fenics.md
+++ b/docs/software/cae/fenics.md
@@ -4,22 +4,14 @@
[FEniCS](https://fenicsproject.org/) is a popular open-source computing platform for solving partial differential equations (PDEs) using the finite element method ([FEM](https://en.wikipedia.org/wiki/Finite_element_method)). Originally developed in 2003, the earlier version is now known as legacy FEniCS. In 2020, the next-generation framework [FEniCSx](https://docs.fenicsproject.org/) was introduced, with the latest stable [release v0.9.0](https://fenicsproject.org/blog/v0.9.0/) in October 2024. Though it builds on the legacy FEniCS but introduces significant improvements over the legacy libraries. FEniCSx is composed of the following libraries that support typical workflows: [UFL](https://github.com/FEniCS/ufl) → [FFCx](https://github.com/FEniCS/ffcx) → [Basix](https://github.com/FEniCS/basix) → [DOLFINx](https://github.com/FEniCS/dolfinx), which are the build blocks of it. And new users are encouraged to adopt [FEniCSx](https://fenicsproject.org/documentation/) for its modern features and active development support.
-
-(Maybe add a short intro into the stack the software depends on, and further more the internal dependencies UFL->FFCx ...)
-
-
-
-
FEniCSx can be installed on [ULHPC](https://www.uni.lu/research-en/core-facilities/hpc/) systems using [Easybuild](https://docs.easybuild.io) or [Spack](https://spack.io/), Below are detailed instructions for each method,
### Building FEniCS With Spack
-
Building FEniCSx with Spack on the [ULHPC](https://www.uni.lu/research-en/core-facilities/hpc/) system requires that Users already installed Spack and sourced its enviroment on the cluster. If Spack is not yet configured, follow the [spack documentation](../../environment/spack.md) for installation and configuration.
-
!!! note
Spack would be a good choice for building FEniCSx because it automatically manages complex dependencies, allows to isolates all installations in a dedicated environment, leverages system-provided packages in ~/.`spack/packages.yaml` for optimal performance, and simplifies reproducibility and maintenance across different systems.
@@ -27,47 +19,47 @@ Building FEniCSx with Spack on the [ULHPC](https://www.uni.lu/research-en/core-f
Create and Activate a Spack Environment:
To maintain an isolated installation, create a dedicated Spack environment in a chosen directory.
-The following example builds FEniCSx in the `home` directory:
+The following example sets up a stable release of FEniCSx `v0.9.0` in the `fenicsx-test` directory inside the `home` directory:
cd ~
- spack env create -d fenicsx-0.9.0/
- spack env activate fenicsx-0.9.0/
-
-
+ spack env create -d fenicsx-test/
+ spack env activate fenicsx-test/
+
Add the core FEniCSx components and common dependencies:
spack add py-fenics-dolfinx@0.9.0+petsc4py fenics-dolfinx+adios2+petsc adios2+python petsc+mumps
- # Change @0.9.0 to any version in the above if you want a another version.
- spack concretize
- spack install -j16
-
-
-!!! question " why concretize and -j16 ? "
-
- `spack concretize` resolves all dependencies and selects compatible versions for the specified packages. `-j16` sets the number cores to use for building. Using a higher number can speed up the build but should be chosen based on available CPU cores and cluster policies.
-
+!!! Additional
+ The spack `add command` add abstract specs of packages to the currently active environment and registers them as root `specs` in the environment’s `spack.yaml` file. Alternatively, packages can be predefined directly in the `spack.yaml` file located in`$SPACK_ENV`.
-or its also possible to define build packages in `$SPACK_ENV` in a `spack.yaml` file.
+ spack:
+ # add package specs to the `specs` list
+ specs:
+ - py-fenics-dolfinx@0.9.0+petsc4py
+ - fenics-dolfinx+adios2+petsc
+ - petsc+mumps
+ - adios2+python
- spack:
- # add package specs to the `specs` list
- specs:
- - py-fenics-dolfinx@0.9.0+petsc4py
- - fenics-dolfinx+adios2+petsc
- - petsc+mumps
- - adios2+python
-
- view: true
- concretizer:
- unify: true
+ view: true
+ concretizer:
+ unify: true
+ !!! note
+ Replace `@0.9.0` with a different version if you prefer to install others release.
-!!! question " why unify : true ? "
+??? question " why unify : true ? "
`unify: true` ensures all packages share the same dependency versions, preventing multiple builds of the same library. Without it, each `spec` could resolve dependencies independently, leading to potential conflicts and redundant installations.
+Once Packages `specs` have been added to the current environment, they need to be concretized.
+
+ spack concretize
+ spack install -j16
+
+!!! note
+ Here, [`spack concretize`](https://spack.readthedocs.io/en/latest/environments.html#spec-concretization) resolves all dependencies and selects compatible versions for the specified packages. In addition to adding individual specs to an environment, the `spack install` command installs the entire environment at once and `-j16` option sets the number of CPU cores used for building, which can speed up the installation.
+ Once installed, the FEniCSx environment is ready to use on the cluster.
The following are also common dependencies used in FEniCS scripts: