Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@ data
*.pyc
*.venv
*.log
test_output/
34 changes: 33 additions & 1 deletion doc/connecting-pull-lab.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
---
title: "Connecting Pull Labs runtime"
title: "Connecting and Running Pull Labs runtime"
date: 2025-02-14
description: "Connecting a PULL_LABS compatible lab to the KernelCI pipeline"
weight: 4
Expand All @@ -9,6 +9,9 @@ KernelCI supports labs that follow the [PULL_LABS protocol](https://github.com/k
LAVA- and Kubernetes-based integrations. This guide shows the minimum
configuration needed to make a lab instance visible to the pipeline.

There is an payload script in `tools/example_pull_lab.py` to which
provides a simply way to execute these pull-lab payloads.

The examples below mirror the demo entries committed in this repository.
Replace the names and tokens with the values that match your deployment.

Expand Down Expand Up @@ -90,3 +93,32 @@ Ensure the `scheduler` service is started with the `--runtimes pull-labs-demo`
argument in the relevant `docker-compose` file so the new runtime becomes active.
The lab will see the generated events once it authenticates with the callback
token value paired with the token name defined in the pipeline configuration.

## Running the Example Pull Lab Script

The `tools/example_pull_lab.py` script provides a simple way to execute pull-lab
payloads using tuxrun for QEMU-based virtual targets.

### Prerequisites

Tuxrun is required to run the jobs, Tuxrun requires podman also to be setup to
execute the jobs.

- Install tuxrun: `pip install tuxrun`
- Install podman: `sudo apt install podman`
- Tuxrun handles downloads and QEMU VM execution automatically

### Running the Script

```bash
python tools/example_pull_lab.py
```

The script will:
- Detect architecture from job definitions
- Support filtering by platform, group, device, and runtime
- Use `--cache-dir` for storing caches and outputs in `./tuxrun-cache/`
- Saves output to timestamped directories in `./test_output/`

**TODO:** Support for FVP (Fixed Virtual Platform) and DUT (Device Under Test)
jobs will be added in future versions, along with publishing to KCIDB.
40 changes: 32 additions & 8 deletions tools/example_pull_lab.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
import time
import subprocess
import shlex
import re
from urllib.parse import urlparse


BASE_URI = "https://staging.kernelci.org:9000/latest"
Expand All @@ -38,6 +40,19 @@ def retrieve_job_definition(url):
return response.json()


def parse_job_definition_url(url):
"""Extract date and job ID from job definition URL."""
try:
parsed = urlparse(url)
path = parsed.path
match = re.search(r'/pull_labs_jobs/(\d{8})/([a-f0-9]+)\.json', path)
if match:
return match.group(1), match.group(2)
except Exception as e:
print(f"Failed to parse job definition URL: {e}")
return None, None


def run_tuxrun(kernel_url, modules_url, device="qemu-x86_64", tests=None, rootfs_url=None, cache_dir=None):
"""
Launch a test using tuxrun
Expand Down Expand Up @@ -67,8 +82,8 @@ def run_tuxrun(kernel_url, modules_url, device="qemu-x86_64", tests=None, rootfs

if cache_dir:
os.makedirs(cache_dir, exist_ok=True)
cmd.extend(["--save-outputs", "--cache-dir", cache_dir])
print(f"Outputs will be saved to: {cache_dir}")
cmd.extend(["--save-outputs", "--cache-dir", cache_dir, "--log-file", "-"])
print(f"Outputs will be saved to: {cache_dir}")

print(f"Executing command: {' '.join(shlex.quote(arg) for arg in cmd)}")

Expand All @@ -80,7 +95,7 @@ def run_tuxrun(kernel_url, modules_url, device="qemu-x86_64", tests=None, rootfs
return result.returncode
except (subprocess.CalledProcessError, FileNotFoundError) as e:
print(f"\n✗ Error running tuxrun: {e}")
return e.returncode
return e.returncode if hasattr(e, 'returncode') else 1


def prepare_and_run(artifacts, device="qemu-x86_64", tests=None, rootfs_override=None, cache_dir=None):
Expand All @@ -96,7 +111,6 @@ def prepare_and_run(artifacts, device="qemu-x86_64", tests=None, rootfs_override
"""
kernel_url = artifacts.get("kernel")
modules_url = artifacts.get("modules")
# Try rootfs first, then ramdisk as fallback
rootfs_url = rootfs_override if rootfs_override else (artifacts.get("rootfs") or artifacts.get("ramdisk"))

if not kernel_url or not modules_url:
Expand Down Expand Up @@ -137,7 +151,13 @@ def main():
)
parser.add_argument(
"--cache-dir",
help="Directory to save tuxrun outputs and cache (e.g., ./outputs). Enables --save-outputs flag.",
default="./test_output",
help="Directory to save tuxrun outputs and cache (default: ./test_output). Use --no-save-outputs to disable.",
)
parser.add_argument(
"--no-save-outputs",
action="store_true",
help="Disable saving outputs (overrides --cache-dir)",
)
parser.add_argument(
"--platform",
Expand Down Expand Up @@ -242,9 +262,13 @@ def main():
continue

cache_dir = None
if args.cache_dir:
node_id = node.get("id", "unknown")
cache_dir = os.path.join(args.cache_dir, node_id)
if not args.no_save_outputs and args.cache_dir:
date, job_id = parse_job_definition_url(job_definition_url)
if date and job_id:
cache_dir = os.path.join(args.cache_dir, date, job_id)
else:
node_id = node.get("id", "unknown")
cache_dir = os.path.join(args.cache_dir, node_id)

prepare_and_run(
job_artifacts,
Expand Down