diff --git a/.github/workflows/driver-vm-linux.yml b/.github/workflows/driver-vm-linux.yml
index 8f1c71bec..9647bda05 100644
--- a/.github/workflows/driver-vm-linux.yml
+++ b/.github/workflows/driver-vm-linux.yml
@@ -32,7 +32,7 @@ jobs:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
with:
ref: ${{ inputs['checkout-ref'] }}
@@ -46,17 +46,14 @@ jobs:
for platform in linux-aarch64 linux-x86_64; do
asset="vm-runtime-${platform}.tar.zst"
echo "Downloading ${asset}..."
- asset_url=$(curl -fsSL \
- -H "Accept: application/vnd.github+json" \
- -H "Authorization: Bearer ${GH_TOKEN}" \
- "https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/tags/vm-dev" \
- | jq -r --arg asset "$asset" '.assets[] | select(.name == $asset) | .browser_download_url' \
- | head -n1)
- if [ -z "$asset_url" ]; then
- echo "::error::No ${asset} asset found on vm-dev release"
+ if ! gh release download vm-runtime \
+ --repo "${GITHUB_REPOSITORY}" \
+ --pattern "${asset}" \
+ --dir runtime-artifacts \
+ --clobber; then
+ echo "::error::No ${asset} asset found on vm-runtime release"
exit 1
fi
- curl -fL -o "runtime-artifacts/${asset}" "$asset_url"
done
ls -lah runtime-artifacts/
@@ -71,7 +68,7 @@ jobs:
- name: Upload runtime artifacts
uses: actions/upload-artifact@v4
with:
- name: driver-vm-kernel-runtime-tarballs
+ name: vm-driver-kernel-runtime-tarballs
path: runtime-artifacts/vm-runtime-*.tar.zst
retention-days: 1
@@ -103,7 +100,7 @@ jobs:
MISE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
OPENSHELL_IMAGE_TAG: ${{ inputs['image-tag'] }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
with:
ref: ${{ inputs['checkout-ref'] }}
fetch-depth: 0
@@ -130,7 +127,7 @@ jobs:
- name: Download kernel runtime tarball
uses: actions/download-artifact@v4
with:
- name: driver-vm-kernel-runtime-tarballs
+ name: vm-driver-kernel-runtime-tarballs
path: runtime-download/
- name: Stage compressed runtime for embedding
@@ -186,6 +183,13 @@ jobs:
OPENSHELL_VM_RUNTIME_COMPRESSED_DIR="${PWD}/target/vm-runtime-compressed" \
mise x -- cargo build --release -p openshell-driver-vm
+ - name: Verify packaged binary
+ run: |
+ set -euo pipefail
+ OUTPUT="$(target/release/openshell-driver-vm --version)"
+ echo "$OUTPUT"
+ grep -q '^openshell-driver-vm ' <<<"$OUTPUT"
+
- name: sccache stats
if: always()
run: mise x -- sccache --show-stats
diff --git a/.github/workflows/driver-vm-macos.yml b/.github/workflows/driver-vm-macos.yml
new file mode 100644
index 000000000..ecaef0615
--- /dev/null
+++ b/.github/workflows/driver-vm-macos.yml
@@ -0,0 +1,186 @@
+name: Driver VM macOS
+
+on:
+ workflow_call:
+ inputs:
+ cargo-version:
+ required: true
+ type: string
+ image-tag:
+ required: true
+ type: string
+ checkout-ref:
+ required: true
+ type: string
+
+permissions:
+ contents: read
+ packages: read
+
+defaults:
+ run:
+ shell: bash
+
+jobs:
+ download-kernel-runtime:
+ name: Download Kernel Runtime
+ runs-on: linux-amd64-cpu8
+ timeout-minutes: 10
+ container:
+ image: ghcr.io/nvidia/openshell/ci:latest
+ credentials:
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ steps:
+ - uses: actions/checkout@v6
+ with:
+ ref: ${{ inputs['checkout-ref'] }}
+
+ - name: Download macOS runtime tarball
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ set -euo pipefail
+ mkdir -p runtime-artifacts
+
+ asset="vm-runtime-darwin-aarch64.tar.zst"
+ echo "Downloading ${asset}..."
+ if ! gh release download vm-runtime \
+ --repo "${GITHUB_REPOSITORY}" \
+ --pattern "${asset}" \
+ --dir runtime-artifacts \
+ --clobber; then
+ echo "::error::No ${asset} asset found on vm-runtime release"
+ exit 1
+ fi
+
+ ls -lah runtime-artifacts/
+
+ - name: Verify download
+ run: test -f runtime-artifacts/vm-runtime-darwin-aarch64.tar.zst
+
+ - name: Upload runtime artifact
+ uses: actions/upload-artifact@v4
+ with:
+ name: vm-driver-macos-kernel-runtime-tarball
+ path: runtime-artifacts/vm-runtime-darwin-aarch64.tar.zst
+ retention-days: 1
+
+ build-driver-vm-macos:
+ name: Build Driver VM (macOS)
+ needs: [download-kernel-runtime]
+ runs-on: linux-amd64-cpu8
+ timeout-minutes: 60
+ container:
+ image: ghcr.io/nvidia/openshell/ci:latest
+ credentials:
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ options: --privileged
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ env:
+ MISE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ SCCACHE_MEMCACHED_ENDPOINT: ${{ vars.SCCACHE_MEMCACHED_ENDPOINT }}
+ steps:
+ - uses: actions/checkout@v6
+ with:
+ ref: ${{ inputs['checkout-ref'] }}
+ fetch-depth: 0
+
+ - name: Mark workspace safe for git
+ run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
+
+ - name: Fetch tags
+ run: git fetch --tags --force
+
+ - name: Log in to GHCR
+ run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u "${{ github.actor }}" --password-stdin
+
+ - name: Set up Docker Buildx
+ uses: ./.github/actions/setup-buildx
+
+ - name: Install zstd
+ run: apt-get update && apt-get install -y --no-install-recommends zstd && rm -rf /var/lib/apt/lists/*
+
+ - name: Download kernel runtime tarball
+ uses: actions/download-artifact@v4
+ with:
+ name: vm-driver-macos-kernel-runtime-tarball
+ path: runtime-download/
+
+ - name: Prepare compressed runtime directory
+ run: |
+ set -euo pipefail
+ COMPRESSED_DIR="${PWD}/target/vm-runtime-compressed-macos"
+ mkdir -p "$COMPRESSED_DIR"
+
+ EXTRACT_DIR=$(mktemp -d)
+ zstd -d "runtime-download/vm-runtime-darwin-aarch64.tar.zst" --stdout \
+ | tar -xf - -C "$EXTRACT_DIR"
+
+ echo "Extracted darwin runtime files:"
+ ls -lah "$EXTRACT_DIR"
+
+ for file in "$EXTRACT_DIR"/*; do
+ [ -f "$file" ] || continue
+ name=$(basename "$file")
+ [ "$name" = "provenance.json" ] && continue
+ zstd -19 -f -q -T0 -o "${COMPRESSED_DIR}/${name}.zst" "$file"
+ done
+
+ echo "Staged macOS compressed runtime artifacts:"
+ ls -lah "$COMPRESSED_DIR"
+
+ - name: Build bundled supervisor
+ run: |
+ set -euo pipefail
+ docker buildx build \
+ --file deploy/docker/Dockerfile.images \
+ --platform linux/arm64 \
+ --build-arg OPENSHELL_CARGO_VERSION="${{ inputs['cargo-version'] }}" \
+ --build-arg OPENSHELL_IMAGE_TAG="${{ inputs['image-tag'] }}" \
+ --target supervisor-output \
+ --output type=local,dest=supervisor-out/ \
+ .
+
+ zstd -19 -T0 -f supervisor-out/openshell-sandbox \
+ -o "${PWD}/target/vm-runtime-compressed-macos/openshell-sandbox.zst"
+
+ - name: Verify embedded driver inputs
+ run: |
+ set -euo pipefail
+ for file in libkrun.dylib.zst libkrunfw.5.dylib.zst gvproxy.zst openshell-sandbox.zst; do
+ test -s "target/vm-runtime-compressed-macos/${file}"
+ done
+
+ - name: Build macOS binary via Docker
+ run: |
+ set -euo pipefail
+ docker buildx build \
+ --file deploy/docker/Dockerfile.driver-vm-macos \
+ --build-arg OPENSHELL_CARGO_VERSION="${{ inputs['cargo-version'] }}" \
+ --build-arg OPENSHELL_IMAGE_TAG="${{ inputs['image-tag'] }}" \
+ --build-arg CARGO_TARGET_CACHE_SCOPE="${{ github.sha }}" \
+ --build-context vm-runtime-compressed="${PWD}/target/vm-runtime-compressed-macos" \
+ --target binary \
+ --output type=local,dest=out/ \
+ .
+
+ - name: Verify packaged binary shape
+ run: test -x out/openshell-driver-vm
+
+ - name: Package binary
+ run: |
+ set -euo pipefail
+ mkdir -p artifacts
+ tar -czf artifacts/openshell-driver-vm-aarch64-apple-darwin.tar.gz \
+ -C out openshell-driver-vm
+ ls -lh artifacts/
+
+ - name: Upload artifact
+ uses: actions/upload-artifact@v4
+ with:
+ name: driver-vm-macos
+ path: artifacts/*.tar.gz
+ retention-days: 5
diff --git a/.github/workflows/release-canary.yml b/.github/workflows/release-canary.yml
index d4a9dcc6f..8afcb6ac8 100644
--- a/.github/workflows/release-canary.yml
+++ b/.github/workflows/release-canary.yml
@@ -67,6 +67,79 @@ jobs:
echo "Version check passed: found $EXPECTED in output"
fi
+ install-dev:
+ name: Install Debian package (${{ matrix.arch }})
+ if: ${{ github.event_name == 'workflow_dispatch' || github.event.workflow_run.conclusion == 'success' }}
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - arch: amd64
+ runner: build-amd64
+ - arch: arm64
+ runner: build-arm64
+ runs-on: ${{ matrix.runner }}
+ timeout-minutes: 10
+ container:
+ image: ghcr.io/nvidia/openshell/ci:latest
+ credentials:
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ steps:
+ - name: Determine release tag
+ id: release
+ run: |
+ set -euo pipefail
+ if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
+ echo "tag=${{ inputs.tag }}" >> "$GITHUB_OUTPUT"
+ else
+ WORKFLOW_NAME="${{ github.event.workflow_run.name }}"
+ if [ "$WORKFLOW_NAME" = "Release Dev" ]; then
+ echo "tag=dev" >> "$GITHUB_OUTPUT"
+ elif [ "$WORKFLOW_NAME" = "Release Tag" ]; then
+ TAG="${{ github.event.workflow_run.head_branch }}"
+ if [ -z "$TAG" ]; then
+ echo "::error::Could not determine release tag from workflow_run"
+ exit 1
+ fi
+ echo "tag=${TAG}" >> "$GITHUB_OUTPUT"
+ else
+ echo "::error::Unexpected triggering workflow: ${WORKFLOW_NAME}"
+ exit 1
+ fi
+ fi
+
+ - name: Install Debian package
+ run: |
+ set -euo pipefail
+ curl -LsSf https://raw.githubusercontent.com/NVIDIA/OpenShell/main/install-dev.sh \
+ | OPENSHELL_VERSION=${{ steps.release.outputs.tag }} sh
+
+ - name: Verify gateway and VM driver versions
+ run: |
+ set -euo pipefail
+ command -v openshell-gateway
+ test -x /usr/libexec/openshell/openshell-driver-vm
+
+ GATEWAY_ACTUAL="$(openshell-gateway --version)"
+ DRIVER_ACTUAL="$(/usr/libexec/openshell/openshell-driver-vm --version)"
+ echo "Gateway: ${GATEWAY_ACTUAL}"
+ echo "Driver: ${DRIVER_ACTUAL}"
+
+ TAG="${{ steps.release.outputs.tag }}"
+ if [[ "$TAG" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
+ EXPECTED="${TAG#v}"
+ for actual in "$GATEWAY_ACTUAL" "$DRIVER_ACTUAL"; do
+ if [[ "$actual" != *"$EXPECTED"* ]]; then
+ echo "::error::Version mismatch: expected '$EXPECTED' in '$actual'"
+ exit 1
+ fi
+ done
+ echo "Version check passed: found $EXPECTED in both binaries"
+ else
+ echo "Non-release tag ($TAG), skipping version check"
+ fi
+
canary:
name: Canary ${{ matrix.mode }} (${{ matrix.arch }})
if: ${{ github.event_name == 'workflow_dispatch' || github.event.workflow_run.conclusion == 'success' }}
diff --git a/.github/workflows/release-dev.yml b/.github/workflows/release-dev.yml
index 0165b132c..a359dde59 100644
--- a/.github/workflows/release-dev.yml
+++ b/.github/workflows/release-dev.yml
@@ -626,6 +626,16 @@ jobs:
checkout-ref: ${{ github.sha }}
secrets: inherit
+ build-driver-vm-macos:
+ name: Build Driver VM macOS
+ needs: [compute-versions]
+ uses: ./.github/workflows/driver-vm-macos.yml
+ with:
+ cargo-version: ${{ needs.compute-versions.outputs.cargo_version }}
+ image-tag: dev
+ checkout-ref: ${{ github.sha }}
+ secrets: inherit
+
build-deb:
name: Build Debian Packages
needs: [compute-versions, build-cli-linux, build-gateway-binary-linux, build-driver-vm-linux]
@@ -644,13 +654,18 @@ jobs:
secrets: inherit
# ---------------------------------------------------------------------------
- # Create / update the dev GitHub Release with CLI binaries and wheels
+ # Create / update the dev GitHub Release with CLI, gateway, driver, and wheels
# ---------------------------------------------------------------------------
release-dev:
name: Release Dev
- needs: [compute-versions, build-cli-linux, build-cli-macos, build-gateway-binary-linux, build-gateway-binary-macos, build-supervisor-binary-linux, build-python-wheels-linux, build-python-wheel-macos, build-deb, build-rpm]
+ needs: [compute-versions, build-cli-linux, build-cli-macos, build-gateway-binary-linux, build-gateway-binary-macos, build-supervisor-binary-linux, build-python-wheels-linux, build-python-wheel-macos, build-driver-vm-linux, build-driver-vm-macos, build-deb, build-rpm]
runs-on: linux-amd64-cpu8
timeout-minutes: 10
+ permissions:
+ contents: write
+ id-token: write
+ attestations: write
+ artifact-metadata: write
outputs:
wheel_filenames: ${{ steps.wheel_filenames.outputs.wheel_filenames }}
steps:
@@ -677,6 +692,13 @@ jobs:
path: release/
merge-multiple: true
+ - name: Download VM driver artifacts
+ uses: actions/download-artifact@v4
+ with:
+ pattern: driver-vm-*
+ path: release/
+ merge-multiple: true
+
- name: Download wheel artifacts
uses: actions/download-artifact@v4
with:
@@ -728,6 +750,14 @@ jobs:
openshell-sandbox-aarch64-unknown-linux-gnu.tar.gz > openshell-sandbox-checksums-sha256.txt
cat openshell-sandbox-checksums-sha256.txt
+ - name: Attest VM driver artifacts
+ uses: actions/attest@v4
+ with:
+ subject-path: |
+ release/openshell-driver-vm-x86_64-unknown-linux-gnu.tar.gz
+ release/openshell-driver-vm-aarch64-unknown-linux-gnu.tar.gz
+ release/openshell-driver-vm-aarch64-apple-darwin.tar.gz
+
- name: Prune stale wheel, deb, and rpm assets from dev release
uses: actions/github-script@v7
env:
@@ -760,8 +790,8 @@ jobs:
core.info(` ${String(a.id).padStart(12)} ${a.name}`);
}
- // Delete stale wheels, debs, and rpms
- let kept = 0, deleted = 0, debDeleted = 0, rpmDeleted = 0;
+ // Delete stale wheels, debs, rpms, and removed VM checksum assets.
+ let kept = 0, deleted = 0, debDeleted = 0, rpmDeleted = 0, removedVmChecksums = 0;
for (const asset of assets) {
if (asset.name.endsWith('.deb')) {
core.info(`Deleting stale deb package: ${asset.name} (id=${asset.id})`);
@@ -771,6 +801,10 @@ jobs:
core.info(`Deleting stale rpm package: ${asset.name} (id=${asset.id})`);
await github.rest.repos.deleteReleaseAsset({ owner, repo, asset_id: asset.id });
rpmDeleted++;
+ } else if (asset.name === 'openshell-driver-vm-checksums-sha256.txt') {
+ core.info(`Deleting removed VM checksum asset: ${asset.name} (id=${asset.id})`);
+ await github.rest.repos.deleteReleaseAsset({ owner, repo, asset_id: asset.id });
+ removedVmChecksums++;
} else if (asset.name.endsWith('.whl') && asset.name.startsWith(currentPrefix)) {
core.info(`Keeping current wheel: ${asset.name}`);
kept++;
@@ -780,7 +814,7 @@ jobs:
deleted++;
}
}
- core.info(`Summary: kept_wheels=${kept}, deleted_wheels=${deleted}, deleted_debs=${debDeleted}, deleted_rpms=${rpmDeleted}`);
+ core.info(`Summary: kept_wheels=${kept}, deleted_wheels=${deleted}, deleted_debs=${debDeleted}, deleted_rpms=${rpmDeleted}, deleted_removed_vm_checksums=${removedVmChecksums}`);
- name: Move dev tag
run: |
@@ -818,6 +852,9 @@ jobs:
release/openshell-gateway-aarch64-apple-darwin.tar.gz
release/openshell-sandbox-x86_64-unknown-linux-gnu.tar.gz
release/openshell-sandbox-aarch64-unknown-linux-gnu.tar.gz
+ release/openshell-driver-vm-x86_64-unknown-linux-gnu.tar.gz
+ release/openshell-driver-vm-aarch64-unknown-linux-gnu.tar.gz
+ release/openshell-driver-vm-aarch64-apple-darwin.tar.gz
release/*.whl
release/openshell-checksums-sha256.txt
release/openshell-gateway-checksums-sha256.txt
diff --git a/.github/workflows/release-tag.yml b/.github/workflows/release-tag.yml
index 639188571..b8abb5a89 100644
--- a/.github/workflows/release-tag.yml
+++ b/.github/workflows/release-tag.yml
@@ -653,6 +653,16 @@ jobs:
checkout-ref: ${{ inputs.tag || github.ref }}
secrets: inherit
+ build-driver-vm-macos:
+ name: Build Driver VM macOS
+ needs: [compute-versions]
+ uses: ./.github/workflows/driver-vm-macos.yml
+ with:
+ cargo-version: ${{ needs.compute-versions.outputs.cargo_version }}
+ image-tag: ${{ needs.compute-versions.outputs.semver }}
+ checkout-ref: ${{ inputs.tag || github.ref }}
+ secrets: inherit
+
build-deb:
name: Build Debian Packages
needs: [compute-versions, build-cli-linux, build-gateway-binary-linux, build-driver-vm-linux]
@@ -671,13 +681,18 @@ jobs:
secrets: inherit
# ---------------------------------------------------------------------------
- # Create a tagged GitHub Release with CLI binaries and wheels
+ # Create a tagged GitHub Release with CLI, gateway, driver, and wheels
# ---------------------------------------------------------------------------
release:
name: Release
- needs: [compute-versions, build-cli-linux, build-cli-macos, build-gateway-binary-linux, build-gateway-binary-macos, build-supervisor-binary-linux, build-python-wheels-linux, build-python-wheel-macos, tag-ghcr-release, build-deb, build-rpm]
+ needs: [compute-versions, build-cli-linux, build-cli-macos, build-gateway-binary-linux, build-gateway-binary-macos, build-supervisor-binary-linux, build-python-wheels-linux, build-python-wheel-macos, tag-ghcr-release, build-driver-vm-linux, build-driver-vm-macos, build-deb, build-rpm]
runs-on: linux-amd64-cpu8
timeout-minutes: 10
+ permissions:
+ contents: write
+ id-token: write
+ attestations: write
+ artifact-metadata: write
outputs:
wheel_filenames: ${{ steps.wheel_filenames.outputs.wheel_filenames }}
steps:
@@ -706,6 +721,13 @@ jobs:
path: release/
merge-multiple: true
+ - name: Download VM driver artifacts
+ uses: actions/download-artifact@v4
+ with:
+ pattern: driver-vm-*
+ path: release/
+ merge-multiple: true
+
- name: Download wheel artifacts
uses: actions/download-artifact@v4
with:
@@ -757,6 +779,36 @@ jobs:
openshell-sandbox-aarch64-unknown-linux-gnu.tar.gz > openshell-sandbox-checksums-sha256.txt
cat openshell-sandbox-checksums-sha256.txt
+ - name: Attest VM driver artifacts
+ uses: actions/attest@v4
+ with:
+ subject-path: |
+ release/openshell-driver-vm-x86_64-unknown-linux-gnu.tar.gz
+ release/openshell-driver-vm-aarch64-unknown-linux-gnu.tar.gz
+ release/openshell-driver-vm-aarch64-apple-darwin.tar.gz
+
+ - name: Prune removed VM checksum asset
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const [owner, repo] = process.env.GITHUB_REPOSITORY.split('/');
+ let release;
+ try {
+ release = await github.rest.repos.getReleaseByTag({ owner, repo, tag: process.env.RELEASE_TAG });
+ } catch (err) {
+ if (err.status === 404) {
+ core.info(`No existing ${process.env.RELEASE_TAG} release; skipping VM checksum pruning.`);
+ return;
+ }
+ throw err;
+ }
+ for (const asset of release.data.assets) {
+ if (asset.name === 'openshell-driver-vm-checksums-sha256.txt') {
+ core.info(`Deleting removed VM checksum asset: ${asset.name}`);
+ await github.rest.repos.deleteReleaseAsset({ owner, repo, asset_id: asset.id });
+ }
+ }
+
- name: Create GitHub Release
uses: softprops/action-gh-release@v2
with:
@@ -784,6 +836,9 @@ jobs:
release/openshell-gateway-aarch64-apple-darwin.tar.gz
release/openshell-sandbox-x86_64-unknown-linux-gnu.tar.gz
release/openshell-sandbox-aarch64-unknown-linux-gnu.tar.gz
+ release/openshell-driver-vm-x86_64-unknown-linux-gnu.tar.gz
+ release/openshell-driver-vm-aarch64-unknown-linux-gnu.tar.gz
+ release/openshell-driver-vm-aarch64-apple-darwin.tar.gz
release/*.whl
release/openshell-checksums-sha256.txt
release/openshell-gateway-checksums-sha256.txt
diff --git a/.github/workflows/release-vm-dev.yml b/.github/workflows/release-vm-dev.yml
deleted file mode 100644
index 0676be051..000000000
--- a/.github/workflows/release-vm-dev.yml
+++ /dev/null
@@ -1,807 +0,0 @@
-name: Release VM Dev
-
-# Build openshell-vm binaries for all supported platforms and upload them to
-# the rolling "vm-dev" GitHub Release. Each binary is self-extracting: it
-# embeds pre-built kernel runtime artifacts (from release-vm-kernel.yml) and a
-# base rootfs tarball.
-#
-# Prerequisites: the vm-dev release must already contain kernel runtime
-# tarballs. Run the "Release VM Kernel" workflow first if they are missing.
-
-on:
- push:
- branches: [main]
- workflow_dispatch:
-
-permissions:
- contents: write
- packages: read
-
-# Serialize with release-vm-kernel.yml — both update the vm-dev release.
-concurrency:
- group: vm-dev-release
- cancel-in-progress: false
-
-defaults:
- run:
- shell: bash
-
-jobs:
- # ---------------------------------------------------------------------------
- # Compute versions (reuse the same logic as release-dev.yml)
- # ---------------------------------------------------------------------------
- compute-versions:
- name: Compute Versions
- runs-on: build-amd64
- timeout-minutes: 5
- container:
- image: ghcr.io/nvidia/openshell/ci:latest
- credentials:
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
- outputs:
- cargo_version: ${{ steps.v.outputs.cargo }}
- steps:
- - uses: actions/checkout@v6
- with:
- fetch-depth: 0
-
- - name: Mark workspace safe for git
- run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
-
- - name: Fetch tags
- run: git fetch --tags --force
-
- - name: Compute versions
- id: v
- run: |
- set -euo pipefail
- echo "cargo=$(uv run python tasks/scripts/release.py get-version --cargo)" >> "$GITHUB_OUTPUT"
-
- # ---------------------------------------------------------------------------
- # Download kernel runtime tarballs from the vm-dev release
- # ---------------------------------------------------------------------------
- download-kernel-runtime:
- name: Download Kernel Runtime
- runs-on: build-amd64
- timeout-minutes: 10
- container:
- image: ghcr.io/nvidia/openshell/ci:latest
- credentials:
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
- steps:
- - uses: actions/checkout@v6
-
- - name: Download all runtime tarballs
- env:
- GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- set -euo pipefail
- mkdir -p runtime-artifacts
-
- for platform in linux-aarch64 linux-x86_64 darwin-aarch64; do
- echo "Downloading vm-runtime-${platform}.tar.zst..."
- gh release download vm-dev \
- --repo "${GITHUB_REPOSITORY}" \
- --pattern "vm-runtime-${platform}.tar.zst" \
- --dir runtime-artifacts \
- --clobber
- done
-
- echo "Downloaded runtime artifacts:"
- ls -lah runtime-artifacts/
-
- - name: Verify downloads
- run: |
- set -euo pipefail
- for platform in linux-aarch64 linux-x86_64 darwin-aarch64; do
- file="runtime-artifacts/vm-runtime-${platform}.tar.zst"
- if [ ! -f "$file" ]; then
- echo "ERROR: Missing ${file}" >&2
- echo "" >&2
- echo "The vm-dev release does not have kernel runtime artifacts." >&2
- echo "Run the 'Release VM Kernel' workflow first:" >&2
- echo " gh workflow run release-vm-kernel.yml" >&2
- exit 1
- fi
- echo "OK: ${file} ($(du -sh "$file" | cut -f1))"
- done
-
- - name: Upload as workflow artifact
- uses: actions/upload-artifact@v4
- with:
- name: kernel-runtime-tarballs
- path: runtime-artifacts/vm-runtime-*.tar.zst
- retention-days: 1
-
- # ---------------------------------------------------------------------------
- # Build base rootfs tarballs (architecture-specific)
- # ---------------------------------------------------------------------------
- build-rootfs:
- name: Build Rootfs (${{ matrix.arch }})
- needs: [compute-versions]
- strategy:
- matrix:
- include:
- - arch: arm64
- runner: build-arm64
- guest_arch: aarch64
- - arch: amd64
- runner: build-amd64
- guest_arch: x86_64
- runs-on: ${{ matrix.runner }}
- timeout-minutes: 30
- container:
- image: ghcr.io/nvidia/openshell/ci:latest
- credentials:
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
- options: --privileged
- volumes:
- - /var/run/docker.sock:/var/run/docker.sock
- env:
- MISE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- OPENSHELL_IMAGE_TAG: dev
- steps:
- - uses: actions/checkout@v6
-
- - name: Mark workspace safe for git
- run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
-
- - name: Log in to GHCR
- run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u "${{ github.actor }}" --password-stdin
-
- - name: Install tools
- run: mise install --locked
-
- - name: Install zstd
- run: apt-get update && apt-get install -y --no-install-recommends zstd && rm -rf /var/lib/apt/lists/*
-
- - name: Build base rootfs tarball
- run: |
- set -euo pipefail
- crates/openshell-vm/scripts/build-rootfs.sh \
- --base \
- --arch ${{ matrix.guest_arch }} \
- target/rootfs-build
-
- mkdir -p target/vm-runtime-compressed
- tar -C target/rootfs-build -cf - . \
- | zstd -19 -T0 -o target/vm-runtime-compressed/rootfs.tar.zst
-
- echo "Rootfs tarball: $(du -sh target/vm-runtime-compressed/rootfs.tar.zst | cut -f1)"
-
- - name: Upload rootfs artifact
- uses: actions/upload-artifact@v4
- with:
- name: rootfs-${{ matrix.arch }}
- path: target/vm-runtime-compressed/rootfs.tar.zst
- retention-days: 1
-
- # ---------------------------------------------------------------------------
- # Build openshell-vm binary (Linux — native on each arch)
- # ---------------------------------------------------------------------------
- build-vm-linux:
- name: Build VM (Linux ${{ matrix.arch }})
- needs: [compute-versions, download-kernel-runtime, build-rootfs]
- strategy:
- matrix:
- include:
- - arch: arm64
- runner: build-arm64
- target: aarch64-unknown-linux-gnu
- platform: linux-aarch64
- guest_arch: aarch64
- - arch: amd64
- runner: build-amd64
- target: x86_64-unknown-linux-gnu
- platform: linux-x86_64
- guest_arch: x86_64
- runs-on: ${{ matrix.runner }}
- timeout-minutes: 30
- container:
- image: ghcr.io/nvidia/openshell/ci:latest
- credentials:
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
- options: --privileged
- env:
- MISE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- SCCACHE_MEMCACHED_ENDPOINT: ${{ vars.SCCACHE_MEMCACHED_ENDPOINT }}
- OPENSHELL_IMAGE_TAG: dev
- steps:
- - uses: actions/checkout@v6
- with:
- fetch-depth: 0
-
- - name: Mark workspace safe for git
- run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
-
- - name: Fetch tags
- run: git fetch --tags --force
-
- - name: Install tools
- run: mise install --locked
-
- - name: Cache Rust target and registry
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2
- with:
- shared-key: vm-linux-${{ matrix.arch }}
- cache-directories: .cache/sccache
- cache-targets: "true"
-
- - name: Install zstd
- run: apt-get update && apt-get install -y --no-install-recommends zstd && rm -rf /var/lib/apt/lists/*
-
- - name: Download kernel runtime tarball
- uses: actions/download-artifact@v4
- with:
- name: kernel-runtime-tarballs
- path: runtime-download/
-
- - name: Download rootfs tarball
- uses: actions/download-artifact@v4
- with:
- name: rootfs-${{ matrix.arch }}
- path: rootfs-download/
-
- - name: Stage compressed runtime for embedding
- run: |
- set -euo pipefail
- COMPRESSED_DIR="${PWD}/target/vm-runtime-compressed"
- mkdir -p "$COMPRESSED_DIR"
-
- # Extract kernel runtime tarball and re-compress individual files
- EXTRACT_DIR=$(mktemp -d)
- zstd -d "runtime-download/vm-runtime-${{ matrix.platform }}.tar.zst" --stdout \
- | tar -xf - -C "$EXTRACT_DIR"
-
- echo "Extracted runtime files:"
- ls -lah "$EXTRACT_DIR"
-
- for file in "$EXTRACT_DIR"/*; do
- [ -f "$file" ] || continue
- name=$(basename "$file")
- [ "$name" = "provenance.json" ] && continue
- zstd -19 -f -q -T0 -o "${COMPRESSED_DIR}/${name}.zst" "$file"
- done
-
- # Copy rootfs tarball (already zstd-compressed)
- cp rootfs-download/rootfs.tar.zst "${COMPRESSED_DIR}/rootfs.tar.zst"
-
- echo "Staged compressed artifacts:"
- ls -lah "$COMPRESSED_DIR"
-
- - name: Scope workspace to VM crates
- run: |
- set -euo pipefail
- sed -i 's|members = \["crates/\*"\]|members = ["crates/openshell-vm", "crates/openshell-core", "crates/openshell-bootstrap", "crates/openshell-policy"]|' Cargo.toml
-
- - name: Patch workspace version
- if: needs.compute-versions.outputs.cargo_version != ''
- run: |
- set -euo pipefail
- sed -i -E '/^\[workspace\.package\]/,/^\[/{s/^version[[:space:]]*=[[:space:]]*".*"/version = "'"${{ needs.compute-versions.outputs.cargo_version }}"'"/}' Cargo.toml
-
- - name: Build openshell-vm
- run: |
- set -euo pipefail
- OPENSHELL_VM_RUNTIME_COMPRESSED_DIR="${PWD}/target/vm-runtime-compressed" \
- mise x -- cargo build --release -p openshell-vm
-
- - name: sccache stats
- if: always()
- run: mise x -- sccache --show-stats
-
- - name: Package binary
- run: |
- set -euo pipefail
- mkdir -p artifacts
- tar -czf "artifacts/openshell-vm-${{ matrix.target }}.tar.gz" \
- -C target/release openshell-vm
- ls -lh artifacts/
-
- - name: Upload artifact
- uses: actions/upload-artifact@v4
- with:
- name: vm-linux-${{ matrix.arch }}
- path: artifacts/*.tar.gz
- retention-days: 5
-
- # ---------------------------------------------------------------------------
- # Build openshell-vm binary (macOS ARM64 via osxcross)
- # ---------------------------------------------------------------------------
- build-vm-macos:
- name: Build VM (macOS)
- needs: [compute-versions, download-kernel-runtime, build-rootfs]
- runs-on: build-amd64
- timeout-minutes: 60
- container:
- image: ghcr.io/nvidia/openshell/ci:latest
- credentials:
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
- options: --privileged
- volumes:
- - /var/run/docker.sock:/var/run/docker.sock
- env:
- MISE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- SCCACHE_MEMCACHED_ENDPOINT: ${{ vars.SCCACHE_MEMCACHED_ENDPOINT }}
- steps:
- - uses: actions/checkout@v6
- with:
- fetch-depth: 0
-
- - name: Mark workspace safe for git
- run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
-
- - name: Fetch tags
- run: git fetch --tags --force
-
- - name: Log in to GHCR
- run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u "${{ github.actor }}" --password-stdin
-
- - name: Set up Docker Buildx
- uses: ./.github/actions/setup-buildx
-
- - name: Install zstd
- run: apt-get update && apt-get install -y --no-install-recommends zstd && rm -rf /var/lib/apt/lists/*
-
- - name: Download kernel runtime tarball
- uses: actions/download-artifact@v4
- with:
- name: kernel-runtime-tarballs
- path: runtime-download/
-
- - name: Download rootfs tarball (arm64)
- uses: actions/download-artifact@v4
- with:
- name: rootfs-arm64
- path: rootfs-download/
-
- - name: Prepare compressed runtime directory
- run: |
- set -euo pipefail
- COMPRESSED_DIR="${PWD}/target/vm-runtime-compressed-macos"
- mkdir -p "$COMPRESSED_DIR"
-
- # Extract the darwin runtime tarball and re-compress for embedding.
- # The macOS embedded.rs expects: libkrun.dylib.zst, libkrunfw.5.dylib.zst, gvproxy.zst
- EXTRACT_DIR=$(mktemp -d)
- zstd -d "runtime-download/vm-runtime-darwin-aarch64.tar.zst" --stdout \
- | tar -xf - -C "$EXTRACT_DIR"
-
- echo "Extracted darwin runtime files:"
- ls -lah "$EXTRACT_DIR"
-
- for file in "$EXTRACT_DIR"/*; do
- [ -f "$file" ] || continue
- name=$(basename "$file")
- [ "$name" = "provenance.json" ] && continue
- zstd -19 -f -q -T0 -o "${COMPRESSED_DIR}/${name}.zst" "$file"
- done
-
- # The macOS VM guest is always Linux ARM64, so use the arm64 rootfs
- cp rootfs-download/rootfs.tar.zst "${COMPRESSED_DIR}/rootfs.tar.zst"
-
- echo "Staged macOS compressed artifacts:"
- ls -lah "$COMPRESSED_DIR"
-
- - name: Build macOS binary via Docker (osxcross)
- run: |
- set -euo pipefail
- docker buildx build \
- --file deploy/docker/Dockerfile.vm-macos \
- --build-arg OPENSHELL_CARGO_VERSION="${{ needs.compute-versions.outputs.cargo_version }}" \
- --build-arg OPENSHELL_IMAGE_TAG=dev \
- --build-arg CARGO_TARGET_CACHE_SCOPE="${{ github.sha }}" \
- --build-context vm-runtime-compressed="${PWD}/target/vm-runtime-compressed-macos" \
- --target binary \
- --output type=local,dest=out/ \
- .
-
- - name: Package binary
- run: |
- set -euo pipefail
- mkdir -p artifacts
- tar -czf artifacts/openshell-vm-aarch64-apple-darwin.tar.gz \
- -C out openshell-vm
- ls -lh artifacts/
-
- - name: Upload artifact
- uses: actions/upload-artifact@v4
- with:
- name: vm-macos
- path: artifacts/*.tar.gz
- retention-days: 5
-
- # ---------------------------------------------------------------------------
- # Build openshell-driver-vm binary (Linux — native on each arch)
- # ---------------------------------------------------------------------------
- build-driver-vm-linux:
- name: Build Driver VM (Linux ${{ matrix.arch }})
- needs: [compute-versions, download-kernel-runtime]
- strategy:
- matrix:
- include:
- - arch: arm64
- runner: build-arm64
- target: aarch64-unknown-linux-gnu
- platform: linux-aarch64
- guest_arch: aarch64
- - arch: amd64
- runner: build-amd64
- target: x86_64-unknown-linux-gnu
- platform: linux-x86_64
- guest_arch: x86_64
- runs-on: ${{ matrix.runner }}
- timeout-minutes: 30
- container:
- image: ghcr.io/nvidia/openshell/ci:latest
- credentials:
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
- options: --privileged
- env:
- MISE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- SCCACHE_MEMCACHED_ENDPOINT: ${{ vars.SCCACHE_MEMCACHED_ENDPOINT }}
- OPENSHELL_IMAGE_TAG: dev
- steps:
- - uses: actions/checkout@v6
- with:
- fetch-depth: 0
-
- - name: Mark workspace safe for git
- run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
-
- - name: Fetch tags
- run: git fetch --tags --force
-
- - name: Install tools
- run: mise install --locked
-
- - name: Cache Rust target and registry
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2
- with:
- shared-key: driver-vm-linux-${{ matrix.arch }}
- cache-directories: .cache/sccache
- cache-targets: "true"
-
- - name: Install zstd
- run: apt-get update && apt-get install -y --no-install-recommends zstd && rm -rf /var/lib/apt/lists/*
-
- - name: Download kernel runtime tarball
- uses: actions/download-artifact@v4
- with:
- name: kernel-runtime-tarballs
- path: runtime-download/
-
- - name: Stage compressed runtime for embedding
- run: |
- set -euo pipefail
- COMPRESSED_DIR="${PWD}/target/vm-runtime-compressed"
- mkdir -p "$COMPRESSED_DIR"
-
- # Extract kernel runtime tarball and re-compress individual files
- EXTRACT_DIR=$(mktemp -d)
- zstd -d "runtime-download/vm-runtime-${{ matrix.platform }}.tar.zst" --stdout \
- | tar -xf - -C "$EXTRACT_DIR"
-
- echo "Extracted runtime files:"
- ls -lah "$EXTRACT_DIR"
-
- for file in "$EXTRACT_DIR"/*; do
- [ -f "$file" ] || continue
- name=$(basename "$file")
- [ "$name" = "provenance.json" ] && continue
- zstd -19 -f -q -T0 -o "${COMPRESSED_DIR}/${name}.zst" "$file"
- done
-
- echo "Staged compressed artifacts:"
- ls -lah "$COMPRESSED_DIR"
-
- - name: Build bundled supervisor
- run: |
- set -euo pipefail
- OPENSHELL_VM_RUNTIME_COMPRESSED_DIR="${PWD}/target/vm-runtime-compressed" \
- tasks/scripts/vm/build-supervisor-bundle.sh --arch "${{ matrix.guest_arch }}"
-
- - name: Scope workspace to driver-vm crates
- run: |
- set -euo pipefail
- sed -i 's|members = \["crates/\*"\]|members = ["crates/openshell-driver-vm", "crates/openshell-core"]|' Cargo.toml
-
- - name: Patch workspace version
- if: needs.compute-versions.outputs.cargo_version != ''
- run: |
- set -euo pipefail
- sed -i -E '/^\[workspace\.package\]/,/^\[/{s/^version[[:space:]]*=[[:space:]]*".*"/version = "'"${{ needs.compute-versions.outputs.cargo_version }}"'"/}' Cargo.toml
-
- - name: Build openshell-driver-vm
- run: |
- set -euo pipefail
- OPENSHELL_VM_RUNTIME_COMPRESSED_DIR="${PWD}/target/vm-runtime-compressed" \
- mise x -- cargo build --release -p openshell-driver-vm
-
- - name: sccache stats
- if: always()
- run: mise x -- sccache --show-stats
-
- - name: Package binary
- run: |
- set -euo pipefail
- mkdir -p artifacts
- tar -czf "artifacts/openshell-driver-vm-${{ matrix.target }}.tar.gz" \
- -C target/release openshell-driver-vm
- ls -lh artifacts/
-
- - name: Upload artifact
- uses: actions/upload-artifact@v4
- with:
- name: driver-vm-linux-${{ matrix.arch }}
- path: artifacts/*.tar.gz
- retention-days: 5
-
- # ---------------------------------------------------------------------------
- # Build openshell-driver-vm binary (macOS ARM64 via osxcross)
- # ---------------------------------------------------------------------------
- build-driver-vm-macos:
- name: Build Driver VM (macOS)
- needs: [compute-versions, download-kernel-runtime]
- runs-on: build-amd64
- timeout-minutes: 60
- container:
- image: ghcr.io/nvidia/openshell/ci:latest
- credentials:
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
- options: --privileged
- volumes:
- - /var/run/docker.sock:/var/run/docker.sock
- env:
- MISE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- SCCACHE_MEMCACHED_ENDPOINT: ${{ vars.SCCACHE_MEMCACHED_ENDPOINT }}
- steps:
- - uses: actions/checkout@v6
- with:
- fetch-depth: 0
-
- - name: Mark workspace safe for git
- run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
-
- - name: Fetch tags
- run: git fetch --tags --force
-
- - name: Log in to GHCR
- run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u "${{ github.actor }}" --password-stdin
-
- - name: Set up Docker Buildx
- uses: ./.github/actions/setup-buildx
-
- - name: Install zstd
- run: apt-get update && apt-get install -y --no-install-recommends zstd && rm -rf /var/lib/apt/lists/*
-
- - name: Download kernel runtime tarball
- uses: actions/download-artifact@v4
- with:
- name: kernel-runtime-tarballs
- path: runtime-download/
-
- - name: Prepare compressed runtime directory
- run: |
- set -euo pipefail
- COMPRESSED_DIR="${PWD}/target/vm-runtime-compressed-macos"
- mkdir -p "$COMPRESSED_DIR"
-
- # Extract the darwin runtime tarball and re-compress for embedding.
- # The macOS embedded.rs expects: libkrun.dylib.zst, libkrunfw.5.dylib.zst, gvproxy.zst
- EXTRACT_DIR=$(mktemp -d)
- zstd -d "runtime-download/vm-runtime-darwin-aarch64.tar.zst" --stdout \
- | tar -xf - -C "$EXTRACT_DIR"
-
- echo "Extracted darwin runtime files:"
- ls -lah "$EXTRACT_DIR"
-
- for file in "$EXTRACT_DIR"/*; do
- [ -f "$file" ] || continue
- name=$(basename "$file")
- [ "$name" = "provenance.json" ] && continue
- zstd -19 -f -q -T0 -o "${COMPRESSED_DIR}/${name}.zst" "$file"
- done
-
- echo "Staged macOS compressed artifacts:"
- ls -lah "$COMPRESSED_DIR"
-
- - name: Build bundled supervisor
- run: |
- set -euo pipefail
- docker buildx build \
- --file deploy/docker/Dockerfile.images \
- --platform linux/arm64 \
- --build-arg OPENSHELL_CARGO_VERSION="${{ needs.compute-versions.outputs.cargo_version }}" \
- --build-arg OPENSHELL_IMAGE_TAG=dev \
- --target supervisor-output \
- --output type=local,dest=supervisor-out/ \
- .
-
- zstd -19 -T0 -f supervisor-out/openshell-sandbox \
- -o "${PWD}/target/vm-runtime-compressed-macos/openshell-sandbox.zst"
-
- - name: Build macOS binary via Docker (osxcross)
- run: |
- set -euo pipefail
- docker buildx build \
- --file deploy/docker/Dockerfile.driver-vm-macos \
- --build-arg OPENSHELL_CARGO_VERSION="${{ needs.compute-versions.outputs.cargo_version }}" \
- --build-arg OPENSHELL_IMAGE_TAG=dev \
- --build-arg CARGO_TARGET_CACHE_SCOPE="${{ github.sha }}" \
- --build-context vm-runtime-compressed="${PWD}/target/vm-runtime-compressed-macos" \
- --target binary \
- --output type=local,dest=out/ \
- .
-
- - name: Package binary
- run: |
- set -euo pipefail
- mkdir -p artifacts
- tar -czf artifacts/openshell-driver-vm-aarch64-apple-darwin.tar.gz \
- -C out openshell-driver-vm
- ls -lh artifacts/
-
- - name: Upload artifact
- uses: actions/upload-artifact@v4
- with:
- name: driver-vm-macos
- path: artifacts/*.tar.gz
- retention-days: 5
-
- # ---------------------------------------------------------------------------
- # Upload all VM binaries to the vm-dev rolling release
- # ---------------------------------------------------------------------------
- release-vm-dev:
- name: Release VM Dev
- needs:
- - build-vm-linux
- - build-vm-macos
- - build-driver-vm-linux
- - build-driver-vm-macos
- runs-on: build-amd64
- timeout-minutes: 10
- steps:
- - uses: actions/checkout@v6
-
- - name: Download all VM binary artifacts
- uses: actions/download-artifact@v4
- with:
- pattern: "{vm-*,driver-vm-*}"
- path: release/
- merge-multiple: true
-
- - name: Filter to only binary tarballs
- run: |
- set -euo pipefail
- mkdir -p release-final
- # Include both openshell-vm and openshell-driver-vm binary tarballs.
- # Exclude kernel runtime tarballs (they come from release-vm-kernel.yml).
- for pattern in 'openshell-vm-*.tar.gz' 'openshell-driver-vm-*.tar.gz'; do
- for file in release/${pattern}; do
- [ -f "$file" ] || continue
- cp "$file" release-final/
- done
- done
- vm_count=$(ls release-final/openshell-vm-*.tar.gz 2>/dev/null | wc -l)
- driver_count=$(ls release-final/openshell-driver-vm-*.tar.gz 2>/dev/null | wc -l)
- if [ "$vm_count" -eq 0 ] || [ "$driver_count" -eq 0 ]; then
- echo "ERROR: Missing binary tarballs (openshell-vm=${vm_count}, openshell-driver-vm=${driver_count})" >&2
- ls -la release/ || true
- exit 1
- fi
- echo "Release artifacts (openshell-vm=${vm_count}, openshell-driver-vm=${driver_count}):"
- ls -lh release-final/
-
- - name: Generate checksums
- run: |
- set -euo pipefail
- cd release-final
- sha256sum openshell-vm-*.tar.gz openshell-driver-vm-*.tar.gz \
- > vm-binary-checksums-sha256.txt
- cat vm-binary-checksums-sha256.txt
-
- - name: Ensure vm-dev tag exists
- run: |
- git config user.name "github-actions[bot]"
- git config user.email "github-actions[bot]@users.noreply.github.com"
- git tag -fa vm-dev -m "VM Development Build" "${GITHUB_SHA}"
- git push --force origin vm-dev
-
- - name: Prune stale VM binary assets from vm-dev release
- uses: actions/github-script@v7
- with:
- script: |
- const [owner, repo] = process.env.GITHUB_REPOSITORY.split('/');
- let release;
- try {
- release = await github.rest.repos.getReleaseByTag({ owner, repo, tag: 'vm-dev' });
- } catch (err) {
- if (err.status === 404) {
- core.info('No existing vm-dev release; will create fresh.');
- return;
- }
- throw err;
- }
- // Delete old VM binary assets (keep kernel runtime assets)
- for (const asset of release.data.assets) {
- if (
- asset.name.startsWith('openshell-vm-') ||
- asset.name.startsWith('openshell-driver-vm-') ||
- asset.name === 'vm-binary-checksums-sha256.txt'
- ) {
- core.info(`Deleting stale asset: ${asset.name}`);
- await github.rest.repos.deleteReleaseAsset({ owner, repo, asset_id: asset.id });
- }
- }
-
- - name: Upload to vm-dev GitHub Release
- uses: softprops/action-gh-release@v2
- with:
- name: OpenShell VM Development Build
- prerelease: true
- tag_name: vm-dev
- target_commitish: ${{ github.sha }}
- body: |
- Rolling development build of **openshell-vm** — the MicroVM runtime for OpenShell.
-
- > **NOTE**: This is a development build, not a tagged release, and may be unstable.
-
- ### Kernel Runtime Artifacts
-
- Pre-built kernel runtime (libkrunfw + libkrun + gvproxy) for embedding into
- the openshell-vm binary. These are rebuilt when the kernel config or pinned
- dependency versions change.
-
- | Platform | Artifact |
- |----------|----------|
- | Linux ARM64 | `vm-runtime-linux-aarch64.tar.zst` |
- | Linux x86_64 | `vm-runtime-linux-x86_64.tar.zst` |
- | macOS ARM64 | `vm-runtime-darwin-aarch64.tar.zst` |
-
- ### VM Binaries
-
- Self-extracting openshell-vm binaries with embedded kernel runtime and base
- rootfs. These are rebuilt on every push to main.
-
- | Platform | Artifact |
- |----------|----------|
- | Linux ARM64 | `openshell-vm-aarch64-unknown-linux-gnu.tar.gz` |
- | Linux x86_64 | `openshell-vm-x86_64-unknown-linux-gnu.tar.gz` |
- | macOS ARM64 | `openshell-vm-aarch64-apple-darwin.tar.gz` |
-
- ### VM Compute Driver Binaries
-
- `openshell-driver-vm` binaries with embedded kernel runtime and bundled sandbox supervisor.
- Launched by the gateway when `--drivers=vm` is configured. Rebuilt on every
- push to main alongside the openshell-vm binaries.
-
- | Platform | Artifact |
- |----------|----------|
- | Linux ARM64 | `openshell-driver-vm-aarch64-unknown-linux-gnu.tar.gz` |
- | Linux x86_64 | `openshell-driver-vm-x86_64-unknown-linux-gnu.tar.gz` |
- | macOS ARM64 | `openshell-driver-vm-aarch64-apple-darwin.tar.gz` |
-
- ### Quick install
-
- ```
- curl -fsSL https://raw.githubusercontent.com/NVIDIA/OpenShell/main/install-vm.sh | sh
- ```
-
- Auto-detects your platform, verifies checksums, and codesigns on macOS.
-
- files: |
- release-final/openshell-vm-aarch64-unknown-linux-gnu.tar.gz
- release-final/openshell-vm-x86_64-unknown-linux-gnu.tar.gz
- release-final/openshell-vm-aarch64-apple-darwin.tar.gz
- release-final/openshell-driver-vm-aarch64-unknown-linux-gnu.tar.gz
- release-final/openshell-driver-vm-x86_64-unknown-linux-gnu.tar.gz
- release-final/openshell-driver-vm-aarch64-apple-darwin.tar.gz
- release-final/vm-binary-checksums-sha256.txt
diff --git a/.github/workflows/release-vm-kernel.yml b/.github/workflows/release-vm-kernel.yml
index 4f63a5a4b..8bdaab11f 100644
--- a/.github/workflows/release-vm-kernel.yml
+++ b/.github/workflows/release-vm-kernel.yml
@@ -1,16 +1,16 @@
name: Release VM Kernel
# Build custom libkrunfw (kernel firmware) + libkrun (VMM) + gvproxy for all
-# supported openshell-vm platforms. Artifacts are uploaded to the rolling
-# "vm-dev" GitHub Release and consumed by release-vm-dev.yml when building the
-# openshell-vm binary.
+# supported openshell-driver-vm platforms. Artifacts are uploaded to the
+# rolling "vm-runtime" GitHub Release and consumed by normal dev/tag release
+# workflows when building the openshell-driver-vm binary.
#
# The Linux kernel is compiled once on aarch64 Linux. The resulting kernel.c
# (a C source file containing the kernel as a byte array) is shared with the
# macOS job, which only needs to compile it into a .dylib — no krunvm, no
# Fedora VM, no kernel rebuild. This cuts macOS CI from ~45 min to ~5 min.
#
-# This workflow runs on-demand (or when kernel config / pins change). It is
+# This workflow runs on demand when kernel config or pins change. It is
# intentionally decoupled from the per-commit VM binary build because the
# kernel rarely changes and takes 15-45 minutes to compile.
@@ -21,9 +21,9 @@ permissions:
contents: write
packages: read
-# Serialize with release-vm-dev.yml — both update the vm-dev release.
+# Serialize runtime release updates.
concurrency:
- group: vm-dev-release
+ group: vm-runtime-release
cancel-in-progress: false
defaults:
@@ -36,7 +36,7 @@ jobs:
# ---------------------------------------------------------------------------
build-runtime-linux-arm64:
name: Build Runtime (Linux ARM64)
- runs-on: build-arm64
+ runs-on: linux-arm64-cpu8
timeout-minutes: 60
container:
image: ghcr.io/nvidia/openshell/ci:latest
@@ -86,7 +86,7 @@ jobs:
# ---------------------------------------------------------------------------
build-runtime-linux-amd64:
name: Build Runtime (Linux AMD64)
- runs-on: build-amd64
+ runs-on: linux-amd64-cpu8
timeout-minutes: 60
container:
image: ghcr.io/nvidia/openshell/ci:latest
@@ -163,13 +163,18 @@ jobs:
retention-days: 5
# ---------------------------------------------------------------------------
- # Upload all runtime tarballs to the vm-dev rolling release
+ # Upload all runtime tarballs to the vm-runtime rolling release
# ---------------------------------------------------------------------------
release-kernel:
name: Release Kernel Runtime
needs: [build-runtime-linux-arm64, build-runtime-linux-amd64, build-runtime-macos-arm64]
- runs-on: build-amd64
+ runs-on: linux-amd64-cpu8
timeout-minutes: 10
+ permissions:
+ contents: write
+ id-token: write
+ attestations: write
+ artifact-metadata: write
steps:
- uses: actions/checkout@v6
@@ -180,36 +185,37 @@ jobs:
path: release/
merge-multiple: true
- - name: Generate checksums
- run: |
- set -euo pipefail
- cd release
- sha256sum vm-runtime-*.tar.zst > vm-runtime-checksums-sha256.txt
- cat vm-runtime-checksums-sha256.txt
+ - name: Attest VM runtime artifacts
+ uses: actions/attest@v4
+ with:
+ subject-path: |
+ release/vm-runtime-linux-aarch64.tar.zst
+ release/vm-runtime-linux-x86_64.tar.zst
+ release/vm-runtime-darwin-aarch64.tar.zst
- - name: Ensure vm-dev tag exists
+ - name: Ensure vm-runtime tag exists
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
- git tag -fa vm-dev -m "VM Development Build" "${GITHUB_SHA}"
- git push --force origin vm-dev
+ git tag -fa vm-runtime -m "VM Runtime Development Build" "${GITHUB_SHA}"
+ git push --force origin vm-runtime
- - name: Prune stale runtime assets from vm-dev release
+ - name: Prune stale runtime assets from vm-runtime release
uses: actions/github-script@v7
with:
script: |
const [owner, repo] = process.env.GITHUB_REPOSITORY.split('/');
let release;
try {
- release = await github.rest.repos.getReleaseByTag({ owner, repo, tag: 'vm-dev' });
+ release = await github.rest.repos.getReleaseByTag({ owner, repo, tag: 'vm-runtime' });
} catch (err) {
if (err.status === 404) {
- core.info('No existing vm-dev release; will create fresh.');
+ core.info('No existing vm-runtime release; will create fresh.');
return;
}
throw err;
}
- // Delete old runtime tarballs and checksums (keep vm binary assets)
+ // Delete old runtime assets, including removed checksum files.
for (const asset of release.data.assets) {
if (asset.name.startsWith('vm-runtime-')) {
core.info(`Deleting stale asset: ${asset.name}`);
@@ -217,25 +223,23 @@ jobs:
}
}
- - name: Create / update vm-dev GitHub Release
+ - name: Create / update vm-runtime GitHub Release
uses: softprops/action-gh-release@v2
with:
- name: OpenShell VM Development Build
+ name: OpenShell VM Runtime
prerelease: true
- tag_name: vm-dev
+ tag_name: vm-runtime
target_commitish: ${{ github.sha }}
body: |
- Rolling development build of **openshell-vm** — the MicroVM runtime for OpenShell.
+ Build of the OpenShell VM runtime artifacts used by `openshell-driver-vm`.
- > **NOTE**: This is a development build, not a tagged release, and may be unstable.
- > The VM implementation itself is also experimental and may change or break without
- > notice.
+ > **NOTE**: This is a development build.
### Kernel Runtime Artifacts
Pre-built kernel runtime (libkrunfw + libkrun + gvproxy) for embedding into
- the openshell-vm binary. These are rebuilt when the kernel config or pinned
- dependency versions change.
+ the `openshell-driver-vm` binary. These are rebuilt on demand when the kernel
+ config or pinned dependency versions change.
| Platform | Artifact |
|----------|----------|
@@ -243,27 +247,14 @@ jobs:
| Linux x86_64 | `vm-runtime-linux-x86_64.tar.zst` |
| macOS ARM64 | `vm-runtime-darwin-aarch64.tar.zst` |
- ### VM Binaries
-
- Self-extracting openshell-vm binaries with embedded kernel runtime and base
- rootfs. These are rebuilt on every push to main.
+ ### Verify
- | Platform | Artifact |
- |----------|----------|
- | Linux ARM64 | `openshell-vm-aarch64-unknown-linux-gnu.tar.gz` |
- | Linux x86_64 | `openshell-vm-x86_64-unknown-linux-gnu.tar.gz` |
- | macOS ARM64 | `openshell-vm-aarch64-apple-darwin.tar.gz` |
-
- ### Quick install
-
- ```
- curl -fsSL https://raw.githubusercontent.com/NVIDIA/OpenShell/main/install-vm.sh | sh
+ ```bash
+ gh release download vm-runtime -R NVIDIA/OpenShell -p vm-runtime-linux-x86_64.tar.zst
+ gh attestation verify vm-runtime-linux-x86_64.tar.zst -R NVIDIA/OpenShell
```
- Auto-detects your platform, verifies checksums, and codesigns on macOS.
-
files: |
release/vm-runtime-linux-aarch64.tar.zst
release/vm-runtime-linux-x86_64.tar.zst
release/vm-runtime-darwin-aarch64.tar.zst
- release/vm-runtime-checksums-sha256.txt
diff --git a/Cargo.lock b/Cargo.lock
index 2bf308fd2..f3576ca19 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3702,33 +3702,6 @@ dependencies = [
"tracing",
]
-[[package]]
-name = "openshell-vm"
-version = "0.0.0"
-dependencies = [
- "base64 0.22.1",
- "clap",
- "indicatif",
- "libc",
- "libloading",
- "miette",
- "nix",
- "openshell-bootstrap",
- "openshell-core",
- "rustls",
- "rustls-pemfile",
- "serde",
- "serde_json",
- "tar",
- "thiserror 2.0.18",
- "tokio",
- "tokio-rustls",
- "tonic",
- "tracing",
- "tracing-subscriber",
- "zstd",
-]
-
[[package]]
name = "openssh"
version = "0.11.6"
diff --git a/Cargo.toml b/Cargo.toml
index 9bc3f9ea2..c9bfe6c91 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -4,6 +4,7 @@
[workspace]
resolver = "2"
members = ["crates/*"]
+exclude = ["crates/openshell-vm"]
[workspace.package]
version = "0.0.0"
diff --git a/architecture/ci-e2e.md b/architecture/ci-e2e.md
index 0041f981b..4fc007fca 100644
--- a/architecture/ci-e2e.md
+++ b/architecture/ci-e2e.md
@@ -34,7 +34,7 @@ OS-49 Phase 5 added non-required shadow workflows for the non-release workflows
The `mise-lockfile` job regenerates `mise.lock` with the CI image's pinned mise version and requires the checked-in file to match exactly. This intentionally includes generated metadata so contributors catch toolchain-version drift instead of letting different mise versions churn the lockfile.
-OS-49 Phase 7 moves the release-facing CPU jobs in `release-canary.yml`, `release-dev.yml`, and `release-tag.yml` to the same shared CPU labels. The release workflows also call `driver-vm-linux.yml` and `deb-package.yml`, so those reusable workers use the same labels to avoid retaining a hidden ARC dependency in the release path. `release-vm-dev.yml` and `release-vm-kernel.yml` remain on the old labels until the VM runtime decision is recorded for OS-131.
+OS-49 Phase 7 moves the release-facing CPU jobs in `release-canary.yml`, `release-dev.yml`, and `release-tag.yml` to the same shared CPU labels. The release workflows also call `driver-vm-linux.yml`, `driver-vm-macos.yml`, and `deb-package.yml`, so those reusable workers use the same labels to avoid retaining a hidden ARC dependency in the release path. `release-vm-kernel.yml` uses the shared CPU labels for its Linux runtime and release jobs; the macOS runtime job stays on `macos-latest-xlarge` because it builds native macOS dylibs.
## Trigger taxonomy
diff --git a/architecture/custom-vm-runtime.md b/architecture/custom-vm-runtime.md
index 4f6bffa34..9f723d8d7 100644
--- a/architecture/custom-vm-runtime.md
+++ b/architecture/custom-vm-runtime.md
@@ -140,9 +140,9 @@ still replace-the-rootfs semantics, so VM images must remain base-compatible
with the sandbox guest init path. Distroless or `scratch` images are not
expected to work.
-The separate `openshell-vm` binary still uses `vm:rootfs` to build a standalone
-embedded guest filesystem, but `openshell-driver-vm` no longer consumes that
-artifact.
+The legacy `openshell-vm` crate remains in the repository for later
+deprecation, but it is excluded from the normal workspace and release paths.
+`openshell-driver-vm` owns active VM runtime build inputs.
## Network Plane
@@ -203,7 +203,7 @@ specific runtime artifact.
```mermaid
graph LR
- subgraph Source["crates/openshell-vm/runtime/"]
+ subgraph Source["crates/openshell-driver-vm/runtime/"]
KCONF["kernel/openshell.kconfig
Kernel config fragment"]
end
@@ -254,7 +254,7 @@ stock libkrunfw kernel:
| Landlock | `CONFIG_SECURITY_LANDLOCK` | Sandbox supervisor filesystem sandboxing |
| Seccomp filter | `CONFIG_SECCOMP_FILTER` | Sandbox supervisor syscall filtering |
-See `crates/openshell-vm/runtime/kernel/openshell.kconfig` for the full
+See `crates/openshell-driver-vm/runtime/kernel/openshell.kconfig` for the full
fragment with inline comments explaining why each option is needed.
## Verification
@@ -278,8 +278,8 @@ mise run gateway:vm
# With custom kernel (optional, adds ~20 min)
FROM_SOURCE=1 mise run vm:setup
-# Wipe everything and start over
-mise run vm:clean
+# Remove the staged compressed runtime when you need a clean rebuild
+rm -rf target/vm-runtime-compressed
```
See `crates/openshell-driver-vm/README.md` for the full driver workflow,
@@ -288,43 +288,45 @@ examples.
## CI/CD
-Two GitHub Actions workflows back the driver's release artifacts, both
-publishing to a rolling `vm-dev` GitHub Release:
+The driver release path is split between on-demand runtime builds and normal
+OpenShell releases:
### Kernel Runtime (`release-vm-kernel.yml`)
Builds the custom libkrunfw (kernel firmware), libkrun (VMM), and gvproxy for
-all supported platforms. Runs on-demand or when the kernel config / pinned
+all supported platforms. Run it on demand when the kernel config or pinned
versions change.
| Platform | Runner | Build Method |
|----------|--------|-------------|
-| Linux ARM64 | `build-arm64` (self-hosted) | Native `build-libkrun.sh` |
-| Linux x86_64 | `build-amd64` (self-hosted) | Native `build-libkrun.sh` |
+| Linux ARM64 | `linux-arm64-cpu8` | Native `build-libkrun.sh` |
+| Linux x86_64 | `linux-amd64-cpu8` | Native `build-libkrun.sh` |
| macOS ARM64 | `macos-latest-xlarge` (GitHub-hosted) | `build-libkrun-macos.sh` |
Artifacts: `vm-runtime-{platform}.tar.zst` containing libkrun, libkrunfw,
gvproxy, and provenance metadata. Each platform builds its own libkrunfw and
libkrun natively; the kernel inside libkrunfw is always Linux regardless of
-host platform.
+host platform. The workflow publishes GitHub artifact attestations for each
+runtime tarball instead of a separate runtime checksum file.
-### Driver Binary (`release-vm-dev.yml`)
+### Driver Binary (`release-dev.yml` / `release-tag.yml`)
Builds the self-contained `openshell-driver-vm` binary for every platform,
-with the kernel runtime + bundled sandbox supervisor embedded. Runs on every
-push to `main` that touches VM-related crates.
+with the kernel runtime + bundled sandbox supervisor embedded. Development
+driver binaries are published to the rolling `dev` release; tagged driver
+binaries are published to the corresponding `v*` release.
-The `download-kernel-runtime` job pulls the current `vm-runtime-.tar.zst`
-from the `vm-dev` release; the `build-openshell-driver-vm` jobs set
+The reusable driver workflows pull the current `vm-runtime-.tar.zst`
+from the `vm-runtime` release; their build jobs set
`OPENSHELL_VM_RUNTIME_COMPRESSED_DIR=$PWD/target/vm-runtime-compressed` and
run `cargo build --release -p openshell-driver-vm`. The macOS driver is
cross-compiled via osxcross (no macOS runner needed for the binary build —
only for the kernel build).
-macOS driver binaries produced via osxcross are not codesigned. Development
-builds are signed automatically by `tasks/scripts/gateway-vm.sh`
-(registered as `mise run gateway:vm`); a packaged release needs signing in
-CI.
+macOS driver binaries produced via osxcross are not codesigned. Local
+development builds are signed automatically by `tasks/scripts/gateway-vm.sh`
+(registered as `mise run gateway:vm`). Release tarball users on macOS must
+ad-hoc sign `openshell-driver-vm` before running VM sandboxes.
## Rollout Strategy
@@ -335,5 +337,6 @@ CI.
fast if missing.
3. For development, override with `OPENSHELL_VM_RUNTIME_DIR` to use a local
directory instead of the extracted cache.
-4. In CI, the kernel runtime is pre-built and cached in the `vm-dev` release.
- The driver build downloads it via `download-kernel-runtime.sh`.
+4. In CI, the kernel runtime is pre-built and cached in the `vm-runtime` release.
+ Dev and tagged release builds download that runtime, embed it into
+ `openshell-driver-vm`, and publish the driver next to `openshell-gateway`.
diff --git a/crates/openshell-driver-vm/README.md b/crates/openshell-driver-vm/README.md
index dbb90bb67..e71f926ed 100644
--- a/crates/openshell-driver-vm/README.md
+++ b/crates/openshell-driver-vm/README.md
@@ -169,11 +169,24 @@ The VM guest's serial console is appended to `//console.l
builds the image and the VM driver exports it via the local Docker daemon
- `gh` CLI (used by `mise run vm:setup` to download pre-built runtime artifacts)
+## Releases
+
+`openshell-driver-vm` is published as a normal OpenShell release artifact:
+
+- development builds: the rolling `dev` release
+- tagged builds: the corresponding `v*` release
+- runtime tarballs: the rolling `vm-runtime` release, rebuilt on demand by
+ `release-vm-kernel.yml`
+
+On Linux amd64 and arm64, `install-dev.sh` installs the Debian package from the
+selected `OPENSHELL_VERSION` release tag. That package includes
+`openshell-gateway` and `openshell-driver-vm`.
+
## Relationship to `openshell-vm`
-`openshell-vm` is a separate, legacy crate that runs the **whole OpenShell gateway inside a single VM**. `openshell-driver-vm` is the compute driver called by a host-resident gateway to spawn **per-sandbox VMs**. Both embed libkrun but share no Rust code — the driver vendors its own rootfs handling and runtime loader so `openshell-server` never has to link libkrun.
+`openshell-vm` is a separate, legacy crate that runs the **whole OpenShell gateway inside a single VM**. It remains in the repository for later deprecation or removal, but is excluded from normal workspace builds and release paths. `openshell-driver-vm` is the active compute driver called by a host-resident gateway to spawn **per-sandbox VMs**. The driver vendors its own rootfs handling and runtime loader so `openshell-server` never has to link libkrun.
## TODOs
- The gateway still configures the driver via CLI args; this will move to a gRPC bootstrap call so the driver interface is uniform across backends. See the `TODO(driver-abstraction)` notes in `crates/openshell-server/src/lib.rs` and `crates/openshell-server/src/compute/vm.rs`.
-- macOS codesigning is handled by `tasks/scripts/gateway-vm.sh`; a packaged release would need signing in CI.
+- macOS local builds are codesigned by `tasks/scripts/gateway-vm.sh`; release tarball users must ad-hoc sign `openshell-driver-vm` before running VM sandboxes.
diff --git a/crates/openshell-driver-vm/runtime/README.md b/crates/openshell-driver-vm/runtime/README.md
new file mode 100644
index 000000000..74afeb2be
--- /dev/null
+++ b/crates/openshell-driver-vm/runtime/README.md
@@ -0,0 +1,75 @@
+# VM Driver Runtime
+
+> Status: Experimental. VM support is under active development and may change.
+
+This directory owns the pinned runtime inputs for `openshell-driver-vm`:
+
+```text
+runtime/
+ pins.env
+ kernel/
+ openshell.kconfig
+```
+
+`openshell-driver-vm` embeds libkrun, libkrunfw, gvproxy, and the bundled
+`openshell-sandbox` supervisor. The legacy `crates/openshell-vm` crate remains
+in the repository, but normal workspace builds and release workflows do not use
+it.
+
+## Why
+
+The stock `libkrunfw` kernel does not include the bridge, netfilter,
+conntrack, cgroup, seccomp, and Landlock features the sandbox supervisor needs
+inside each microVM. `kernel/openshell.kconfig` extends the libkrunfw kernel so
+VM sandboxes can run the same supervisor enforcement path as other backends.
+
+## Build Scripts
+
+| Script | Platform | Purpose |
+|---|---|---|
+| `tasks/scripts/vm/build-libkrun.sh` | Linux | Builds libkrunfw and libkrun from source with the custom kernel config |
+| `tasks/scripts/vm/build-libkrun-macos.sh` | macOS | Builds portable libkrunfw and libkrun from a prebuilt `kernel.c` |
+| `tasks/scripts/vm/package-vm-runtime.sh` | Any | Packages `vm-runtime-.tar.zst` with libraries, gvproxy, and provenance |
+| `tasks/scripts/vm/download-kernel-runtime.sh` | Any | Downloads runtime tarballs from the `vm-runtime` release and stages compressed files |
+
+## Local Flow
+
+```shell
+# Download the current pre-built runtime and stage compressed artifacts
+mise run vm:setup
+
+# Build the bundled guest supervisor
+mise run vm:supervisor
+
+# Build the gateway and VM driver with embedded runtime artifacts
+OPENSHELL_VM_RUNTIME_COMPRESSED_DIR=$PWD/target/vm-runtime-compressed \
+ cargo build -p openshell-server -p openshell-driver-vm
+```
+
+Use `FROM_SOURCE=1 mise run vm:setup` to build the runtime from source instead
+of downloading `vm-runtime-.tar.zst`.
+
+## CI Ownership
+
+`release-vm-kernel.yml` is the on-demand producer for:
+
+- `vm-runtime-linux-aarch64.tar.zst`
+- `vm-runtime-linux-x86_64.tar.zst`
+- `vm-runtime-darwin-aarch64.tar.zst`
+
+Those artifacts stay on the rolling `vm-runtime` release. Normal `dev` and `v*`
+release workflows download them, embed them into `openshell-driver-vm`, and
+publish the driver binary next to `openshell-gateway`.
+
+## Provenance
+
+`package-vm-runtime.sh` writes `provenance.json` into each runtime tarball with
+the platform, libkrunfw commit, kernel version, GitHub SHA, and build time. The
+driver logs this metadata when it extracts and loads a runtime bundle.
+
+The release workflow also publishes GitHub artifact attestations for each
+runtime tarball. Verify a downloaded runtime with:
+
+```bash
+gh attestation verify vm-runtime-linux-x86_64.tar.zst -R NVIDIA/OpenShell
+```
diff --git a/crates/openshell-vm/runtime/kernel/openshell.kconfig b/crates/openshell-driver-vm/runtime/kernel/openshell.kconfig
similarity index 100%
rename from crates/openshell-vm/runtime/kernel/openshell.kconfig
rename to crates/openshell-driver-vm/runtime/kernel/openshell.kconfig
diff --git a/crates/openshell-vm/pins.env b/crates/openshell-driver-vm/runtime/pins.env
similarity index 97%
rename from crates/openshell-vm/pins.env
rename to crates/openshell-driver-vm/runtime/pins.env
index b3d802292..4a60c0225 100644
--- a/crates/openshell-vm/pins.env
+++ b/crates/openshell-driver-vm/runtime/pins.env
@@ -1,7 +1,7 @@
# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
-# Pinned dependency versions for openshell-vm builds.
+# Pinned dependency versions for the openshell-driver-vm runtime.
#
# This file is sourced by build-rootfs.sh and
# build-libkrun.sh. It centralises version pins and content-addressed
diff --git a/crates/openshell-vm/README.md b/crates/openshell-vm/README.md
index 32632e349..266818741 100644
--- a/crates/openshell-vm/README.md
+++ b/crates/openshell-vm/README.md
@@ -1,18 +1,16 @@
# openshell-vm
-> Status: Experimental and work in progress (WIP). VM support is under active development and may change.
+> Status: Legacy. This crate remains in the repository for later deprecation or
+> removal, but it is excluded from normal workspace builds, CI, and release
+> paths. Active VM sandbox work lives in `crates/openshell-driver-vm`.
MicroVM runtime for OpenShell, powered by [libkrun](https://github.com/containers/libkrun). Boots a lightweight ARM64 Linux VM on macOS (Apple Hypervisor.framework) or Linux (KVM) running a single-node k3s cluster with the OpenShell control plane.
-## Quick Start
+## Current Path
-```bash
-# One-time setup: download pre-built runtime (~30s)
-mise run vm:setup
-
-# Build and run the VM
-mise run vm
-```
+Use `mise run gateway:vm` for the supported per-sandbox VM driver workflow. The
+standalone `openshell-vm` tasks and wrappers are intentionally not part of the
+normal task surface.
## Prerequisites
@@ -27,7 +25,8 @@ mise run vm
### macOS-Specific
-The binary must be codesigned with the Hypervisor.framework entitlement. The `mise run vm` flow handles this automatically. To codesign manually:
+The binary must be codesigned with the Hypervisor.framework entitlement. To
+codesign manually:
```bash
codesign --entitlements crates/openshell-vm/entitlements.plist --force -s - target/debug/openshell-vm
@@ -37,7 +36,8 @@ codesign --entitlements crates/openshell-vm/entitlements.plist --force -s - targ
### Download Pre-Built Runtime (Default)
-Downloads libkrun, libkrunfw, and gvproxy from the `vm-dev` GitHub Release:
+Downloads libkrun, libkrunfw, and gvproxy from the `vm-runtime` GitHub Release for
+the active VM driver runtime:
```bash
mise run vm:setup
@@ -55,25 +55,14 @@ On macOS this builds a custom libkrunfw (kernel firmware with bridge/netfilter s
## Build
-Build the openshell-vm binary with embedded runtime:
-
-```bash
-mise run vm:build
-```
-
-This compresses runtime artifacts, compiles the Rust binary with `include_bytes!()` embedding, codesigns it (macOS), and stages the sidecar runtime bundle.
+There is no first-class `mise` build task for the standalone binary. This crate
+is no longer part of normal CI or release builds.
## Rootfs
-The rootfs is an Ubuntu filesystem containing k3s, pre-loaded container images, and the OpenShell binaries. Build it with:
-
-```bash
-# Base rootfs (~200-300MB, cold starts in ~30-60s)
-mise run vm:rootfs -- --base
-
-# Full rootfs (~2GB+, pre-initialized, boots in ~3-5s)
-mise run vm:rootfs
-```
+The legacy rootfs scripts are kept with this crate for historical reference.
+They are not used by `openshell-driver-vm`, which derives each sandbox guest
+rootfs from a container image at create time.
## Run
@@ -81,11 +70,7 @@ mise run vm:rootfs
Boots the full OpenShell gateway -- k3s + openshell-server + openshell-sandbox:
-```bash
-mise run vm
-```
-
-Or run the binary directly:
+Run the binary directly after manually building and signing it:
```bash
./target/debug/openshell-vm
@@ -143,39 +128,11 @@ Subcommands:
exec Execute a command inside a running VM
```
-## mise Tasks Reference
-
-| Task | Description |
-|------|-------------|
-| `vm` | Build and run the VM |
-| `vm:build` | Build openshell-vm binary with embedded runtime |
-| `vm:setup` | One-time setup: download (or build) the VM runtime |
-| `vm:rootfs` | Build the VM rootfs tarball (`-- --base` for lightweight) |
-| `vm:clean` | Remove all VM cached artifacts |
-| `e2e:vm` | Boot VM and run smoke e2e tests |
-
-### Common Workflows
-
-```bash
-# First time setup
-mise run vm:setup # download pre-built runtime (~30s)
-mise run vm # build + run
-
-# Day-to-day iteration
-mise run vm # incremental build + run
-
-# Need fresh rootfs
-mise run vm:rootfs -- --base # rebuild base rootfs
-mise run vm:build # rebuild binary with new rootfs
+## Tasks
-# Something broken, start over
-mise run vm:clean # wipe everything
-mise run vm:setup # re-download runtime
-mise run vm # full rebuild + run
-
-# Custom kernel work (rare)
-FROM_SOURCE=1 mise run vm:setup
-```
+Standalone `openshell-vm` tasks have been removed from the normal task surface.
+The remaining VM tasks (`vm:setup`, `vm:supervisor`, `gateway:vm`, `e2e:vm`,
+and `vm:smoke:orphan-cleanup`) support `openshell-driver-vm`.
## Architecture
diff --git a/crates/openshell-vm/runtime/README.md b/crates/openshell-vm/runtime/README.md
deleted file mode 100644
index 76646a5ba..000000000
--- a/crates/openshell-vm/runtime/README.md
+++ /dev/null
@@ -1,183 +0,0 @@
-# Custom libkrunfw Runtime
-
-> Status: Experimental and work in progress (WIP). VM support is under active development and may change.
-
-This directory contains the kernel config fragment for a custom `libkrunfw` runtime
-that enables bridge CNI and netfilter support in the OpenShell gateway VM.
-
-## Why
-
-The stock `libkrunfw` (from Homebrew) ships a kernel without bridge, netfilter,
-or conntrack support. This means the VM cannot:
-
-- Create `cni0` bridge interfaces (required by the bridge CNI plugin)
-- Run kube-proxy (requires nftables)
-- Route service VIP traffic (requires NAT/conntrack)
-
-The custom runtime builds libkrunfw with an additional kernel config fragment
-that enables these networking and sandboxing features.
-
-## Directory Structure
-
-```text
-runtime/
- kernel/
- openshell.kconfig # Kernel config fragment (networking + sandboxing)
-```
-
-## Build Pipeline
-
-Each platform builds its own kernel and runtime natively.
-
-```text
-Linux ARM64: builds aarch64 kernel -> .so (parallel)
-Linux AMD64: builds x86_64 kernel -> .so (parallel)
-macOS ARM64: builds aarch64 kernel -> .dylib
-```
-
-### Build Scripts
-
-| Script | Platform | What it does |
-|--------|----------|-------------|
-| `tasks/scripts/vm/build-libkrun.sh` | Linux | Builds libkrunfw + libkrun from source |
-| `tasks/scripts/vm/build-libkrun-macos.sh` | macOS | Builds libkrunfw + libkrun from source |
-| `tasks/scripts/vm/package-vm-runtime.sh` | Any | Packages runtime tarball (libs + gvproxy + provenance) |
-
-### Quick Build (Linux)
-
-```bash
-# Build both libkrunfw and libkrun from source
-tasks/scripts/vm/build-libkrun.sh
-
-# Or build the full runtime from source via mise:
-FROM_SOURCE=1 mise run vm:setup
-```
-
-### Quick Build (macOS)
-
-```bash
-# Download pre-built runtime (recommended, ~30s):
-mise run vm:setup
-
-# Or build from source:
-FROM_SOURCE=1 mise run vm:setup
-```
-
-### Output
-
-Build artifacts are placed in `target/libkrun-build/`:
-
-```text
-target/libkrun-build/
- libkrun.so / libkrun.dylib # The VMM library
- libkrunfw.so* / libkrunfw.dylib # Kernel firmware library
-```
-
-## Networking
-
-The VM uses bridge CNI for pod networking with nftables-mode kube-proxy for
-service VIP / ClusterIP support. The kernel config fragment enables both
-iptables (for CNI bridge masquerade) and nftables (for kube-proxy).
-
-k3s is started with `--kube-proxy-arg=proxy-mode=nftables` because the
-bundled iptables binaries in k3s have revision-negotiation issues with the
-libkrun kernel's xt_MARK module. nftables mode uses the kernel's nf_tables
-subsystem directly and avoids this entirely.
-
-## Runtime Provenance
-
-At VM boot, the openshell-vm binary logs provenance information about the loaded
-runtime:
-
-```text
-runtime: /path/to/openshell-vm.runtime
- libkrunfw: libkrunfw.dylib
- sha256: a1b2c3d4e5f6...
- type: custom (OpenShell-built)
- libkrunfw-commit: abc1234
- kernel-version: 6.6.30
- build-timestamp: 2026-03-23T10:00:00Z
-```
-
-For stock runtimes:
-
-```text
-runtime: /path/to/openshell-vm.runtime
- libkrunfw: libkrunfw.dylib
- sha256: f6e5d4c3b2a1...
- type: stock (system/homebrew)
-```
-
-## Verification
-
-### Capability Check (inside VM)
-
-```bash
-# Run inside the VM to verify kernel capabilities:
-/srv/check-vm-capabilities.sh
-
-# JSON output for CI:
-/srv/check-vm-capabilities.sh --json
-```
-
-### Rollback
-
-To revert to the stock runtime:
-
-```bash
-# Unset the custom runtime source:
-unset OPENSHELL_VM_RUNTIME_SOURCE_DIR
-
-# Re-download pre-built runtime and rebuild:
-mise run vm:setup
-mise run vm:build
-
-# Boot:
-mise run vm
-```
-
-## Troubleshooting
-
-### "FailedCreatePodSandBox" bridge errors
-
-The kernel does not have bridge support. Verify:
-
-```bash
-# Inside VM:
-ip link add test0 type bridge && echo "bridge OK" && ip link del test0
-```
-
-If this fails, you are running the stock runtime. Build and use the custom one.
-
-### kube-proxy CrashLoopBackOff
-
-kube-proxy runs in nftables mode. If it crashes, verify nftables support:
-
-```bash
-# Inside VM:
-nft list ruleset
-```
-
-If this fails, the kernel may lack `CONFIG_NF_TABLES`. Use the custom runtime.
-
-Common errors:
-
-- `unknown option "--xor-mark"`: kube-proxy is running in iptables mode instead
- of nftables. Verify `--kube-proxy-arg=proxy-mode=nftables` is in the k3s args.
-
-### Runtime mismatch after upgrade
-
-If libkrunfw is updated (e.g., via `brew upgrade`), the stock runtime may
-change. Check provenance:
-
-```bash
-# Look for provenance info in VM boot output
-grep "runtime:" ~/.local/share/openshell/openshell-vm/console.log
-```
-
-Re-build the custom runtime if needed:
-
-```bash
-FROM_SOURCE=1 mise run vm:setup
-mise run vm:build
-```
diff --git a/crates/openshell-vm/scripts/build-rootfs.sh b/crates/openshell-vm/scripts/build-rootfs.sh
index 02f72cdfe..bfafe8c85 100755
--- a/crates/openshell-vm/scripts/build-rootfs.sh
+++ b/crates/openshell-vm/scripts/build-rootfs.sh
@@ -43,10 +43,10 @@ else
fi
# Source pinned dependency versions (digests, checksums, commit SHAs).
-# Environment variables override pins — see pins.env for details.
-PINS_FILE="${SCRIPT_DIR}/../pins.env"
+# Environment variables override pins.
+PINS_FILE="${SCRIPT_DIR}/../../openshell-driver-vm/runtime/pins.env"
if [ -f "$PINS_FILE" ]; then
- # shellcheck source=../pins.env
+ # shellcheck source=../../openshell-driver-vm/runtime/pins.env
source "$PINS_FILE"
fi
diff --git a/crates/openshell-vm/scripts/check-vm-capabilities.sh b/crates/openshell-vm/scripts/check-vm-capabilities.sh
index 2e758f5e0..f88a1340c 100755
--- a/crates/openshell-vm/scripts/check-vm-capabilities.sh
+++ b/crates/openshell-vm/scripts/check-vm-capabilities.sh
@@ -224,7 +224,7 @@ else
echo ""
echo "FAIL: $FAIL required capabilities missing."
echo "The VM kernel needs to be rebuilt with the missing features."
- echo "See: crates/openshell-vm/runtime/kernel/README.md"
+ echo "See: crates/openshell-driver-vm/runtime/README.md"
exit 1
else
echo ""
diff --git a/deploy/docker/Dockerfile.vm-macos b/deploy/docker/Dockerfile.vm-macos
deleted file mode 100644
index 4527217bc..000000000
--- a/deploy/docker/Dockerfile.vm-macos
+++ /dev/null
@@ -1,125 +0,0 @@
-# syntax=docker/dockerfile:1.6
-
-# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-# SPDX-License-Identifier: Apache-2.0
-
-# Cross-compile the openshell-vm binary for macOS aarch64 (Apple Silicon)
-# using the osxcross toolchain.
-#
-# The openshell-vm binary loads libkrun/libkrunfw at runtime via dlopen, so it
-# does NOT need Hypervisor.framework headers at build time. Pre-compressed
-# runtime artifacts (libkrun, libkrunfw, gvproxy, rootfs) are injected via
-# the vm-runtime-compressed build context and embedded into the binary via
-# include_bytes!().
-#
-# Usage:
-# docker buildx build -f deploy/docker/Dockerfile.vm-macos \
-# --build-arg OPENSHELL_CARGO_VERSION=0.6.0 \
-# --build-context vm-runtime-compressed=/path/to/compressed-dir \
-# --output type=local,dest=out/ .
-
-ARG OSXCROSS_IMAGE=crazymax/osxcross:latest
-
-FROM ${OSXCROSS_IMAGE} AS osxcross
-
-FROM python:3.12-slim AS builder
-
-ARG CARGO_TARGET_CACHE_SCOPE=default
-
-ENV PATH="/root/.cargo/bin:/usr/local/bin:/osxcross/bin:${PATH}"
-ENV LD_LIBRARY_PATH="/osxcross/lib"
-
-COPY --from=osxcross /osxcross /osxcross
-
-RUN apt-get update && apt-get install -y --no-install-recommends \
- build-essential \
- ca-certificates \
- clang \
- cmake \
- curl \
- pkg-config \
- && rm -rf /var/lib/apt/lists/*
-
-RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.95.0
-
-RUN rustup target add aarch64-apple-darwin
-
-WORKDIR /build
-
-ENV CC_aarch64_apple_darwin=oa64-clang
-ENV CXX_aarch64_apple_darwin=oa64-clang++
-ENV AR_aarch64_apple_darwin=aarch64-apple-darwin25.1-ar
-ENV CARGO_TARGET_AARCH64_APPLE_DARWIN_LINKER=oa64-clang
-ENV CARGO_TARGET_AARCH64_APPLE_DARWIN_AR=aarch64-apple-darwin25.1-ar
-
-# aws-lc-sys workaround (in case it ends up in the dep tree via feature unification)
-RUN ln -sf /osxcross/bin/arm64-apple-darwin25.1-ld /usr/local/bin/arm64-apple-macosx-ld
-
-# ---------------------------------------------------------------------------
-# Stage 1: dependency caching — copy only manifests, create dummy sources,
-# build dependencies. This layer is cached unless Cargo.toml/lock changes.
-# ---------------------------------------------------------------------------
-COPY Cargo.toml Cargo.lock ./
-COPY crates/openshell-vm/Cargo.toml crates/openshell-vm/Cargo.toml
-COPY crates/openshell-vm/build.rs crates/openshell-vm/build.rs
-COPY crates/openshell-core/Cargo.toml crates/openshell-core/Cargo.toml
-COPY crates/openshell-core/build.rs crates/openshell-core/build.rs
-COPY crates/openshell-bootstrap/Cargo.toml crates/openshell-bootstrap/Cargo.toml
-COPY crates/openshell-policy/Cargo.toml crates/openshell-policy/Cargo.toml
-COPY proto/ proto/
-
-# Scope workspace to VM crates only.
-RUN sed -i 's|members = \["crates/\*"\]|members = ["crates/openshell-vm", "crates/openshell-core", "crates/openshell-bootstrap", "crates/openshell-policy"]|' Cargo.toml
-
-RUN mkdir -p crates/openshell-vm/src \
- crates/openshell-core/src \
- crates/openshell-bootstrap/src \
- crates/openshell-policy/src && \
- echo "fn main() {}" > crates/openshell-vm/src/main.rs && \
- touch crates/openshell-vm/src/lib.rs && \
- touch crates/openshell-core/src/lib.rs && \
- touch crates/openshell-bootstrap/src/lib.rs && \
- touch crates/openshell-policy/src/lib.rs
-
-# Build deps only (cached layer).
-RUN --mount=type=cache,id=cargo-registry-vm-macos,sharing=locked,target=/root/.cargo/registry \
- --mount=type=cache,id=cargo-git-vm-macos,sharing=locked,target=/root/.cargo/git \
- --mount=type=cache,id=cargo-target-vm-macos-${CARGO_TARGET_CACHE_SCOPE},sharing=locked,target=/build/target \
- cargo build --release --target aarch64-apple-darwin -p openshell-vm 2>/dev/null || true
-
-# ---------------------------------------------------------------------------
-# Stage 2: real build with compressed runtime artifacts
-# ---------------------------------------------------------------------------
-COPY crates/ crates/
-
-# Copy compressed VM runtime artifacts for embedding.
-# These are passed in via --build-context vm-runtime-compressed=...
-COPY --from=vm-runtime-compressed / /build/vm-runtime-compressed/
-
-# Touch source files to ensure they're rebuilt (not the cached dummy).
-RUN touch crates/openshell-vm/src/main.rs \
- crates/openshell-vm/src/lib.rs \
- crates/openshell-vm/build.rs \
- crates/openshell-bootstrap/src/lib.rs \
- crates/openshell-core/src/lib.rs \
- crates/openshell-core/build.rs \
- crates/openshell-policy/src/lib.rs \
- proto/*.proto
-
-# Declare version ARGs here (not earlier) so the git-hash-bearing values do not
-# invalidate the expensive dependency-build layers above on every commit.
-ARG OPENSHELL_CARGO_VERSION
-ARG OPENSHELL_IMAGE_TAG
-RUN --mount=type=cache,id=cargo-registry-vm-macos,sharing=locked,target=/root/.cargo/registry \
- --mount=type=cache,id=cargo-git-vm-macos,sharing=locked,target=/root/.cargo/git \
- --mount=type=cache,id=cargo-target-vm-macos-${CARGO_TARGET_CACHE_SCOPE},sharing=locked,target=/build/target \
- if [ -n "${OPENSHELL_CARGO_VERSION:-}" ]; then \
- sed -i -E '/^\[workspace\.package\]/,/^\[/{s/^version[[:space:]]*=[[:space:]]*".*"/version = "'"${OPENSHELL_CARGO_VERSION}"'"/}' Cargo.toml; \
- fi && \
- OPENSHELL_VM_RUNTIME_COMPRESSED_DIR=/build/vm-runtime-compressed \
- OPENSHELL_IMAGE_TAG="${OPENSHELL_IMAGE_TAG:-dev}" \
- cargo build --release --target aarch64-apple-darwin -p openshell-vm && \
- cp target/aarch64-apple-darwin/release/openshell-vm /openshell-vm
-
-FROM scratch AS binary
-COPY --from=builder /openshell-vm /openshell-vm
diff --git a/e2e/rust/e2e-vm.sh b/e2e/rust/e2e-vm.sh
index 551f9b41e..dc462bf98 100755
--- a/e2e/rust/e2e-vm.sh
+++ b/e2e/rust/e2e-vm.sh
@@ -23,7 +23,7 @@
# mise run e2e:vm
#
# What the script does:
-# 1. Ensures the VM runtime (libkrun + gvproxy + rootfs) is staged.
+# 1. Ensures the VM runtime (libkrun + gvproxy) and bundled supervisor are staged.
# 2. Builds `openshell-gateway`, `openshell-driver-vm`, and the
# `openshell` CLI with the embedded runtime.
# 3. On macOS, codesigns the VM driver (libkrun needs the
@@ -36,8 +36,8 @@
# log and every VM serial console log for post-mortem.
#
# Prerequisites (handled automatically by this script if missing):
-# - `mise run vm:setup` — downloads / builds the libkrun runtime.
-# - `mise run vm:rootfs -- --base` — builds the sandbox rootfs tarball.
+# - `mise run vm:setup` — downloads / builds the libkrun runtime.
+# - `mise run vm:supervisor` — builds the bundled sandbox supervisor.
set -euo pipefail
@@ -56,25 +56,26 @@ DRIVER_BIN="${ROOT}/target/debug/openshell-driver-vm"
STATE_DIR_ROOT="/tmp"
# Smoke test timeouts. First boot extracts the embedded libkrun runtime
-# (~60–90MB of zstd per architecture) and the sandbox rootfs (~200MB).
-# The guest then runs k3s-free sandbox supervisor startup; a cold
-# microVM is typically ready within ~15s.
+# (~60-90MB of zstd per architecture) and prepares a sandbox rootfs from the
+# configured image. The guest then runs k3s-free sandbox supervisor startup; a
+# cold microVM is typically ready within ~15s after image preparation.
GATEWAY_READY_TIMEOUT=60
SANDBOX_PROVISION_TIMEOUT=180
# ── Build prerequisites ──────────────────────────────────────────────
-if [ ! -f "${COMPRESSED_DIR}/rootfs.tar.zst" ]; then
- echo "==> Building base VM rootfs tarball (mise run vm:rootfs -- --base)"
- mise run vm:rootfs -- --base
-fi
+mkdir -p "${COMPRESSED_DIR}"
-if [ ! -f "${COMPRESSED_DIR}/rootfs.tar.zst" ] \
- || ! find "${COMPRESSED_DIR}" -maxdepth 1 -name 'libkrun*.zst' | grep -q .; then
+if ! find "${COMPRESSED_DIR}" -maxdepth 1 -name 'libkrun*.zst' | grep -q .; then
echo "==> Preparing embedded VM runtime (mise run vm:setup)"
mise run vm:setup
fi
+if [ ! -f "${COMPRESSED_DIR}/openshell-sandbox.zst" ]; then
+ echo "==> Building bundled VM supervisor (mise run vm:supervisor)"
+ mise run vm:supervisor
+fi
+
export OPENSHELL_VM_RUNTIME_COMPRESSED_DIR="${OPENSHELL_VM_RUNTIME_COMPRESSED_DIR:-${COMPRESSED_DIR}}"
echo "==> Building openshell-gateway, openshell-driver-vm, openshell (CLI)"
@@ -164,9 +165,9 @@ echo "==> Starting openshell-gateway on 127.0.0.1:${HOST_PORT} (state: ${RUN_STA
# Pin --driver-dir to the workspace `target/debug/` so we always pick up
# the driver we just cargo-built. Without this, the gateway's
# `resolve_compute_driver_bin` fallback prefers
-# `~/.local/libexec/openshell/openshell-driver-vm` when present
-# (install-vm.sh installs there), which silently shadows development
-# builds — a subtle source of stale-binary bugs in e2e runs.
+# `~/.local/libexec/openshell/openshell-driver-vm` when present,
+# which silently shadows development builds — a subtle source of
+# stale-binary bugs in e2e runs.
# --grpc-endpoint is the URL the VM driver passes into each guest as
# OPENSHELL_ENDPOINT. The supervisor inside the VM dials this address.
# Use `host.containers.internal` rather than `127.0.0.1` so gvproxy's
diff --git a/e2e/rust/tests/smoke.rs b/e2e/rust/tests/smoke.rs
index c380efc8c..172afa22b 100644
--- a/e2e/rust/tests/smoke.rs
+++ b/e2e/rust/tests/smoke.rs
@@ -7,7 +7,7 @@
//! command inside it, and tear it down.
//!
//! This test is cluster-agnostic — it works against any running gateway
-//! (Docker-based cluster or openshell-vm microVM). The `e2e:vm` mise
+//! (Docker-based cluster or openshell-driver-vm microVM). The `e2e:vm` mise
//! task uses it to validate the VM gateway after boot.
use std::process::Stdio;
diff --git a/install-dev.sh b/install-dev.sh
index dc0666cd6..fb98a841d 100755
--- a/install-dev.sh
+++ b/install-dev.sh
@@ -2,17 +2,18 @@
# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
-# Install the OpenShell development build from the rolling GitHub `dev` release.
+# Install an OpenShell Debian package from a GitHub release.
#
-# This script is intended as a convenient installer for development builds. It
-# currently supports Debian packages on Linux amd64 and arm64 only.
+# This script defaults to the rolling `dev` release and supports Debian
+# packages on Linux amd64 and arm64 only. The package installs the CLI,
+# gateway, and VM compute driver.
#
set -e
APP_NAME="openshell"
REPO="NVIDIA/OpenShell"
GITHUB_URL="https://github.com/${REPO}"
-RELEASE_TAG="dev"
+RELEASE_TAG="${OPENSHELL_VERSION:-dev}"
CHECKSUMS_NAME="openshell-checksums-sha256.txt"
info() {
@@ -26,7 +27,7 @@ error() {
usage() {
cat <&2
-}
-
-warn() {
- printf '%s: warning: %s\n' "$APP_NAME" "$*" >&2
-}
-
-error() {
- printf '%s: error: %s\n' "$APP_NAME" "$*" >&2
- exit 1
-}
-
-# ---------------------------------------------------------------------------
-# HTTP helpers — prefer curl, fall back to wget
-# ---------------------------------------------------------------------------
-
-has_cmd() {
- command -v "$1" >/dev/null 2>&1
-}
-
-check_downloader() {
- if has_cmd curl; then
- return 0
- elif has_cmd wget; then
- return 0
- else
- error "either 'curl' or 'wget' is required to download files"
- fi
-}
-
-download() {
- _url="$1"
- _output="$2"
-
- if has_cmd curl; then
- curl -fLsS --retry 3 --max-redirs 5 -o "$_output" "$_url"
- elif has_cmd wget; then
- wget -q --tries=3 --max-redirect=5 -O "$_output" "$_url"
- fi
-}
-
-# Follow a URL and print the final resolved URL (for detecting redirect targets).
-resolve_redirect() {
- _url="$1"
-
- if has_cmd curl; then
- curl -fLsS -o /dev/null -w '%{url_effective}' "$_url"
- elif has_cmd wget; then
- wget --spider --max-redirect=10 "$_url" 2>&1 | sed -n 's/^.*Location: \([^ ]*\).*/\1/p' | tail -1
- fi
-}
-
-# Validate that a download URL resolves to the expected GitHub origin.
-# A MITM or DNS hijack could redirect to an attacker-controlled domain,
-# which would also serve a matching checksums file (making checksum
-# verification useless). See: https://github.com/NVIDIA/OpenShell/issues/638
-validate_download_origin() {
- _vdo_url="$1"
- _resolved="$(resolve_redirect "$_vdo_url")" || return 0 # best-effort
-
- case "$_resolved" in
- https://github.com/${REPO}/*) ;;
- https://objects.githubusercontent.com/*) ;;
- https://release-assets.githubusercontent.com/*) ;;
- *)
- error "unexpected redirect target: ${_resolved} (expected github.com/${REPO}/...)"
- ;;
- esac
-}
-
-# ---------------------------------------------------------------------------
-# Platform detection
-# ---------------------------------------------------------------------------
-
-# Both binaries ship the same set of triples under the same naming scheme.
-get_target() {
- _arch="$(uname -m)"
- _os="$(uname -s)"
-
- case "$_os" in
- Darwin)
- case "$_arch" in
- arm64|aarch64) echo "aarch64-apple-darwin" ;;
- *) error "macOS x86_64 is not supported; use Apple Silicon" ;;
- esac
- ;;
- Linux)
- case "$_arch" in
- x86_64|amd64) echo "x86_64-unknown-linux-gnu" ;;
- aarch64|arm64) echo "aarch64-unknown-linux-gnu" ;;
- *) error "unsupported architecture: $_arch" ;;
- esac
- ;;
- *) error "unsupported OS: $_os" ;;
- esac
-}
-
-# ---------------------------------------------------------------------------
-# Checksum verification
-# ---------------------------------------------------------------------------
-
-verify_checksum() {
- _vc_archive="$1"
- _vc_checksums="$2"
- _vc_filename="$3"
-
- if ! has_cmd shasum && ! has_cmd sha256sum; then
- error "neither 'shasum' nor 'sha256sum' found; cannot verify download integrity"
- fi
-
- _vc_expected="$(grep -F "$_vc_filename" "$_vc_checksums" | awk '{print $1}')"
-
- if [ -z "$_vc_expected" ]; then
- error "no checksum entry found for $_vc_filename in checksums file"
- fi
-
- if has_cmd shasum; then
- echo "$_vc_expected $_vc_archive" | shasum -a 256 -c --quiet 2>/dev/null
- elif has_cmd sha256sum; then
- echo "$_vc_expected $_vc_archive" | sha256sum -c --quiet 2>/dev/null
- fi
-}
-
-# ---------------------------------------------------------------------------
-# Install locations
-# ---------------------------------------------------------------------------
-
-get_gateway_install_dir() {
- if [ -n "${OPENSHELL_INSTALL_DIR:-}" ]; then
- echo "$OPENSHELL_INSTALL_DIR"
- else
- echo "${HOME}/.local/bin"
- fi
-}
-
-# Default per-user install dir for the VM compute driver. Newer gateways also
-# auto-discover conventional system installs under `/usr/local/libexec`.
-get_driver_install_dir() {
- if [ -n "${OPENSHELL_DRIVER_DIR:-}" ]; then
- echo "$OPENSHELL_DRIVER_DIR"
- else
- echo "${HOME}/.local/libexec/openshell"
- fi
-}
-
-is_on_path() {
- case ":${PATH}:" in
- *":$1:"*) return 0 ;;
- *) return 1 ;;
- esac
-}
-
-# ---------------------------------------------------------------------------
-# macOS codesign — the VM driver runs libkrun and needs the hypervisor
-# entitlement. The gateway does not.
-# ---------------------------------------------------------------------------
-
-codesign_driver_vm() {
- _binary="$1"
- _cs_tmpdir="$2" # reuse caller's tmpdir for cleanup-safe temp files
-
- if [ "$(uname -s)" != "Darwin" ]; then
- return 0
- fi
-
- if ! has_cmd codesign; then
- warn "codesign not found; ${DRIVER_VM_BIN} will fail without the Hypervisor entitlement"
- return 0
- fi
-
- info "codesigning ${DRIVER_VM_BIN} with Hypervisor entitlement..."
- _entitlements="${_cs_tmpdir}/entitlements.plist"
- cat > "$_entitlements" <<'PLIST'
-
-
-
-
- com.apple.security.hypervisor
-
-
-
-PLIST
- codesign --entitlements "$_entitlements" --force -s - "$_binary"
-}
-
-# ---------------------------------------------------------------------------
-# Download + install a single binary release asset
-# ---------------------------------------------------------------------------
-
-# Args:
-# $1 binary name (e.g. openshell-gateway)
-# $2 release tag (e.g. dev, vm-dev)
-# $3 target triple (e.g. aarch64-apple-darwin)
-# $4 checksums filename in the release (e.g. openshell-gateway-checksums-sha256.txt)
-# $5 destination directory
-# $6 tmp working dir (caller-owned; will be cleaned up outside)
-install_release_binary() {
- _bin="$1"
- _tag="$2"
- _target="$3"
- _checksums_name="$4"
- _dest_dir="$5"
- _work_dir="$6"
-
- _filename="${_bin}-${_target}.tar.gz"
- _download_url="${GITHUB_URL}/releases/download/${_tag}/${_filename}"
- _checksums_url="${GITHUB_URL}/releases/download/${_tag}/${_checksums_name}"
-
- info "downloading ${_bin} from release '${_tag}' (${_target})..."
-
- validate_download_origin "$_download_url"
-
- if ! download "$_download_url" "${_work_dir}/${_filename}"; then
- error "failed to download ${_download_url}"
- fi
-
- if ! download "$_checksums_url" "${_work_dir}/${_bin}-checksums.txt"; then
- error "failed to download checksums file from ${_checksums_url}"
- fi
-
- info "verifying ${_bin} checksum..."
- if ! verify_checksum "${_work_dir}/${_filename}" "${_work_dir}/${_bin}-checksums.txt" "$_filename"; then
- error "checksum verification failed for ${_filename}"
- fi
-
- info "extracting ${_bin}..."
- tar -xzf "${_work_dir}/${_filename}" -C "${_work_dir}" --no-same-owner --no-same-permissions "${_bin}"
-
- # Install into destination dir, escalating with sudo if needed.
- if mkdir -p "$_dest_dir" 2>/dev/null && [ -w "$_dest_dir" ]; then
- install -m 755 "${_work_dir}/${_bin}" "${_dest_dir}/${_bin}"
- else
- info "elevated permissions required to install to ${_dest_dir}"
- sudo mkdir -p "$_dest_dir"
- sudo install -m 755 "${_work_dir}/${_bin}" "${_dest_dir}/${_bin}"
- fi
-}
-
-# ---------------------------------------------------------------------------
-# Main
-# ---------------------------------------------------------------------------
-
-usage() {
- cat </dev/null || echo "${GATEWAY_RELEASE_TAG}")"
- info "installed ${_gateway_version} to ${_gateway_dir}/${GATEWAY_BIN}"
- info "installed ${DRIVER_VM_BIN} to ${_driver_dir}/${DRIVER_VM_BIN}"
-
- # Warn if the gateway dir isn't on PATH.
- if ! is_on_path "$_gateway_dir"; then
- echo ""
- info "${_gateway_dir} is not on your PATH."
- info ""
- info "Add it by appending the following to your shell configuration file"
- info "(e.g. ~/.bashrc, ~/.zshrc, or ~/.config/fish/config.fish):"
- info ""
-
- _current_shell="$(basename "${SHELL:-sh}" 2>/dev/null || echo "sh")"
- case "$_current_shell" in
- fish) info " fish_add_path ${_gateway_dir}" ;;
- *) info " export PATH=\"${_gateway_dir}:\$PATH\"" ;;
- esac
-
- info ""
- info "Then restart your shell or run the command above in your current session."
- fi
-
- # ---------------------------------------------------------------------------
- # Next steps — print a working command to start the gateway.
- #
- # The VM compute driver requires:
- # * --driver-dir — only needed when the driver is installed
- # outside the built-in search paths:
- # ~/.local/libexec/openshell,
- # /usr/local/libexec/openshell,
- # /usr/local/libexec, or next to the gateway.
- # * --grpc-endpoint — URL the VM guest uses to call the gateway
- # back. Loopback is accepted; scheme must
- # match TLS mode.
- # * --ssh-handshake-secret — shared secret for gateway↔sandbox SSH.
- # ---------------------------------------------------------------------------
-
- echo ""
- info "Next steps — start the gateway with the VM compute driver:"
- echo ""
-
- _driver_dir_arg=""
- case "$_driver_dir" in
- "${HOME}/.local/libexec/openshell"|"/usr/local/libexec/openshell"|"/usr/local/libexec") ;;
- *)
- _driver_dir_arg=" --driver-dir '${_driver_dir}' \\
-"
- ;;
- esac
-
- cat >&2 </dev/null
-fi
-exec "$BINARY" "$@"
diff --git a/tasks/scripts/vm/_lib.sh b/tasks/scripts/vm/_lib.sh
index b925492a3..3f70330cd 100755
--- a/tasks/scripts/vm/_lib.sh
+++ b/tasks/scripts/vm/_lib.sh
@@ -2,7 +2,7 @@
# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
-# Shared helpers for openshell-vm build scripts.
+# Shared helpers for VM runtime build scripts.
# Source this file from other scripts:
# source "$(dirname "${BASH_SOURCE[0]}")/_lib.sh"
diff --git a/tasks/scripts/vm/build-libkrun-macos.sh b/tasks/scripts/vm/build-libkrun-macos.sh
index 4e89deef7..339a73666 100755
--- a/tasks/scripts/vm/build-libkrun-macos.sh
+++ b/tasks/scripts/vm/build-libkrun-macos.sh
@@ -276,7 +276,7 @@ if $ALL_GOOD; then
echo ""
echo "All libraries are portable!"
echo ""
- echo "Next step: mise run vm:build"
+ echo "Next step: mise run vm:supervisor && cargo build -p openshell-driver-vm"
else
echo ""
echo "Warning: Some libraries have non-portable paths"
diff --git a/tasks/scripts/vm/build-libkrun.sh b/tasks/scripts/vm/build-libkrun.sh
index ec636f2a3..46e9c041d 100755
--- a/tasks/scripts/vm/build-libkrun.sh
+++ b/tasks/scripts/vm/build-libkrun.sh
@@ -28,12 +28,12 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${SCRIPT_DIR}/_lib.sh"
ROOT="$(vm_lib_root)"
-# Source pinned dependency versions
-source "${ROOT}/crates/openshell-vm/pins.env" 2>/dev/null || true
+# Source pinned dependency versions.
+source "${ROOT}/crates/openshell-driver-vm/runtime/pins.env" 2>/dev/null || true
BUILD_DIR="${ROOT}/target/libkrun-build"
OUTPUT_DIR="${BUILD_DIR}"
-KERNEL_CONFIG="${ROOT}/crates/openshell-vm/runtime/kernel/openshell.kconfig"
+KERNEL_CONFIG="${ROOT}/crates/openshell-driver-vm/runtime/kernel/openshell.kconfig"
if [ "$(uname -s)" != "Linux" ]; then
echo "Error: This script only runs on Linux" >&2
@@ -66,7 +66,7 @@ install_deps() {
if command -v apt-get &>/dev/null; then
# Debian/Ubuntu
- DEPS="build-essential git python3 python3-pip python3-pyelftools flex bison libelf-dev libssl-dev bc curl libclang-dev cpio zstd jq"
+ DEPS="build-essential git python3 python3-pip python3-pyelftools flex bison libelf-dev libssl-dev libcap-ng-dev bc curl libclang-dev cpio zstd jq"
MISSING=""
for dep in $DEPS; do
if ! dpkg -s "$dep" &>/dev/null; then
@@ -83,14 +83,14 @@ install_deps() {
elif command -v dnf &>/dev/null; then
# Fedora/RHEL
- DEPS="make git python3 python3-pyelftools gcc flex bison elfutils-libelf-devel openssl-devel bc glibc-static curl clang-devel cpio zstd jq"
+ DEPS="make git python3 python3-pyelftools gcc flex bison elfutils-libelf-devel openssl-devel libcap-ng-devel bc glibc-static curl clang-devel cpio zstd jq"
echo " Installing dependencies via dnf..."
$SUDO dnf install -y $DEPS
else
echo "Warning: Unknown package manager. Please install manually:" >&2
echo " build-essential git python3 python3-pyelftools flex bison" >&2
- echo " libelf-dev libssl-dev bc curl cpio" >&2
+ echo " libelf-dev libssl-dev libcap-ng-dev bc curl cpio" >&2
fi
}
@@ -237,6 +237,21 @@ make -j"$(nproc)"
# Copy output
cp libkrunfw.so* "$OUTPUT_DIR/"
+if [ ! -f ABI_VERSION ]; then
+ ABI_VERSION_VALUE="$(awk '/^ABI_VERSION[[:space:]]*[:?+]?=/ { value=$0; sub(/^[^=]*=/, "", value); gsub(/[[:space:]]/, "", value); print value; exit }' Makefile)"
+ if [ -z "$ABI_VERSION_VALUE" ]; then
+ echo "ERROR: could not determine libkrunfw ABI_VERSION from Makefile" >&2
+ exit 1
+ fi
+ printf '%s\n' "$ABI_VERSION_VALUE" > ABI_VERSION
+fi
+for artifact in kernel.c ABI_VERSION; do
+ if [ ! -f "$artifact" ]; then
+ echo "ERROR: expected libkrunfw export missing: $artifact" >&2
+ exit 1
+ fi
+ cp "$artifact" "$OUTPUT_DIR/"
+done
echo " Built: $(ls "$OUTPUT_DIR"/libkrunfw.so* | xargs -n1 basename | tr '\n' ' ')"
cd "$BUILD_DIR"
@@ -418,4 +433,4 @@ echo " Artifacts:"
ls -lah "$OUTPUT_DIR"/*.so*
echo ""
-echo "Next step: mise run vm:build"
+echo "Next step: mise run vm:supervisor && cargo build -p openshell-driver-vm"
diff --git a/tasks/scripts/vm/build-rootfs-tarball.sh b/tasks/scripts/vm/build-rootfs-tarball.sh
deleted file mode 100755
index 87abca27e..000000000
--- a/tasks/scripts/vm/build-rootfs-tarball.sh
+++ /dev/null
@@ -1,406 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-# SPDX-License-Identifier: Apache-2.0
-
-# Build rootfs and compress to tarball for embedding in openshell-vm binary.
-#
-# This script:
-# 1. Builds the rootfs using build-rootfs.sh
-# 2. Compresses it to a zstd tarball for embedding
-#
-# Usage:
-# ./build-rootfs-tarball.sh [--base|--gpu|--gpu-cuda]
-#
-# Options:
-# --base Build a base rootfs (~200-300MB) without pre-loaded images.
-# First boot will be slower but binary size is much smaller.
-# Default: full rootfs with pre-loaded images (~2GB+).
-# --gpu Build a GPU-augmented rootfs that layers kmod, nvidia kernel
-# modules, and nvidia firmware on top of the base rootfs.
-# Output: target/vm-runtime-compressed/rootfs-gpu.tar.zst
-# --gpu-cuda Like --gpu but also includes CUDA driver libraries
-# (libcuda.so, libnvidia-ptxjitcompiler.so) for CUDA workloads.
-#
-# The resulting tarball is placed at target/vm-runtime-compressed/rootfs.tar.zst
-# (or rootfs-gpu.tar.zst for --gpu) for inclusion in the embedded binary build.
-
-set -euo pipefail
-
-source "$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)/container-engine.sh"
-
-ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." && pwd)"
-ROOTFS_BUILD_DIR="${ROOT}/target/rootfs-build"
-OUTPUT_DIR="${ROOT}/target/vm-runtime-compressed"
-OUTPUT="${OUTPUT_DIR}/rootfs.tar.zst"
-
-KERNEL_VERSION="6.12.76"
-NVIDIA_MODULES_DIR="${ROOT}/target/libkrun-build/nvidia-modules"
-NVIDIA_USERSPACE_DIR="${ROOT}/target/libkrun-build/nvidia-userspace"
-
-# Parse arguments
-BASE_ONLY=false
-GPU_BUILD=false
-GPU_CUDA=false
-for arg in "$@"; do
- case "$arg" in
- --base)
- BASE_ONLY=true
- ;;
- --gpu)
- GPU_BUILD=true
- ;;
- --gpu-cuda)
- GPU_CUDA=true
- GPU_BUILD=true
- ;;
- --help|-h)
- echo "Usage: $0 [--base|--gpu|--gpu-cuda]"
- echo ""
- echo "Options:"
- echo " --base Build base rootfs (~200-300MB) without pre-loaded images"
- echo " First boot will be slower but binary size is much smaller"
- echo " --gpu Build GPU rootfs with kmod, nvidia modules, and firmware"
- echo " Layers on top of base rootfs, output: rootfs-gpu.tar.zst"
- echo " --gpu-cuda Like --gpu but also includes CUDA driver libraries"
- echo " (libcuda.so, libnvidia-ptxjitcompiler.so)"
- exit 0
- ;;
- *)
- echo "Unknown option: $arg"
- echo "Use --help for usage information"
- exit 1
- ;;
- esac
-done
-
-if [ "$GPU_BUILD" = true ]; then
- GPU_OUTPUT="${OUTPUT_DIR}/rootfs-gpu.tar.zst"
- GPU_ROOTFS_DIR="${ROOT}/target/rootfs-gpu-build"
- trap 'echo "ERROR: GPU rootfs build failed; cleaning up ${GPU_ROOTFS_DIR}" >&2; rm -rf "${GPU_ROOTFS_DIR}"' ERR
-
- echo "==> Building GPU rootfs for embedding"
- echo " Build dir: ${GPU_ROOTFS_DIR}"
- echo " Output: ${GPU_OUTPUT}"
- echo ""
-
- # Build base rootfs first if it doesn't exist
- if [ ! -d "${ROOTFS_BUILD_DIR}" ]; then
- echo "==> Step 1/3: Base rootfs not found, building it first..."
- "${ROOT}/crates/openshell-vm/scripts/build-rootfs.sh" --base "${ROOTFS_BUILD_DIR}"
- echo ""
- fi
-
- echo "==> Step 2/3: Layering GPU tools onto base rootfs..."
-
- rm -rf "${GPU_ROOTFS_DIR}"
- cp -a "${ROOTFS_BUILD_DIR}" "${GPU_ROOTFS_DIR}"
-
- # --- kmod ---
- KMOD_BIN="$(command -v kmod 2>/dev/null || true)"
- if [ -z "${KMOD_BIN}" ]; then
- echo "WARNING: kmod not found on host; skipping kmod installation"
- else
- echo " Installing kmod from ${KMOD_BIN}"
- mkdir -p "${GPU_ROOTFS_DIR}/bin"
- cp "${KMOD_BIN}" "${GPU_ROOTFS_DIR}/bin/kmod"
- chmod 755 "${GPU_ROOTFS_DIR}/bin/kmod"
-
- # Copy shared libraries required by kmod (host and guest must share compatible glibc)
- if command -v ldd &>/dev/null; then
- mkdir -p "${GPU_ROOTFS_DIR}/lib" "${GPU_ROOTFS_DIR}/lib64"
- ldd "${KMOD_BIN}" 2>/dev/null | while read -r line; do
- lib_path="$(echo "${line}" | sed -n 's/.* => \(\/[^ ]*\).*/\1/p')"
- if [ -n "${lib_path}" ] && [ -f "${lib_path}" ]; then
- # Skip core system libraries that already exist in the base rootfs.
- # The host glibc may be older and overwriting breaks rootfs binaries.
- lib_basename="$(basename "${lib_path}")"
- case "${lib_basename}" in
- libc.so*|libm.so*|libpthread.so*|libdl.so*|librt.so*|ld-linux*) continue ;;
- esac
- lib_dir="$(dirname "${lib_path}")"
- mkdir -p "${GPU_ROOTFS_DIR}${lib_dir}"
- cp -Lf "${lib_path}" "${GPU_ROOTFS_DIR}${lib_path}" 2>/dev/null || true
- fi
- done
- fi
-
- # Fix broken .so symlinks left by Docker export (e.g. libzstd.so.1.5.5 -> itself).
- # These cause ELOOP when the dynamic linker resolves the SONAME chain.
- # Use -xtype l to find symlinks whose targets are missing or circular.
- find "${GPU_ROOTFS_DIR}" -xtype l -name '*.so*' 2>/dev/null | while read -r broken; do
- sobase="$(basename "$broken" | sed 's/\.so.*/\.so/')"
- host_real="$(find /usr/lib /lib -name "${sobase}*" -type f 2>/dev/null | head -1)"
- if [ -n "$host_real" ]; then
- rm -f "$broken"
- cp -L "$host_real" "$broken" 2>/dev/null || true
- fi
- done || true
-
- mkdir -p "${GPU_ROOTFS_DIR}/usr/sbin"
- for tool in modprobe insmod rmmod lsmod depmod; do
- ln -sf ../../bin/kmod "${GPU_ROOTFS_DIR}/usr/sbin/${tool}"
- done
- echo " Created symlinks: modprobe insmod rmmod lsmod depmod -> ../../bin/kmod"
- fi
-
- # --- nvidia kernel modules ---
- MODULES_DST="${GPU_ROOTFS_DIR}/lib/modules/${KERNEL_VERSION}/kernel/drivers/video"
- if [ -d "${NVIDIA_MODULES_DIR}" ]; then
- ko_files=("${NVIDIA_MODULES_DIR}"/*.ko)
- if [ -e "${ko_files[0]}" ]; then
- mkdir -p "${MODULES_DST}"
- cp "${NVIDIA_MODULES_DIR}"/*.ko "${MODULES_DST}/"
- echo " Installed nvidia kernel modules into lib/modules/${KERNEL_VERSION}/kernel/drivers/video/"
- ls -1 "${MODULES_DST}"/*.ko | xargs -I{} basename {} | sed 's/^/ /'
- if command -v depmod &>/dev/null; then
- depmod -b "${GPU_ROOTFS_DIR}" "${KERNEL_VERSION}" 2>/dev/null || true
- echo " Generated modules.dep"
- fi
- else
- echo "WARNING: ${NVIDIA_MODULES_DIR} exists but contains no .ko files"
- fi
- else
- echo "WARNING: nvidia kernel modules not found at ${NVIDIA_MODULES_DIR}"
- echo " GPU rootfs will not contain nvidia drivers"
- fi
-
- # Determine the kernel module driver version so we can match firmware + userspace.
- NV_DRIVER_VERSION=""
- if command -v modinfo &>/dev/null && [ -f "${NVIDIA_MODULES_DIR}/nvidia.ko" ]; then
- NV_DRIVER_VERSION="$(modinfo -F version "${NVIDIA_MODULES_DIR}/nvidia.ko" 2>/dev/null || true)"
- fi
- if [ -n "${NV_DRIVER_VERSION}" ]; then
- echo " Kernel module driver version: ${NV_DRIVER_VERSION}"
- fi
-
- # --- nvidia firmware (GSP) ---
- # Prefer version-matched firmware from nvidia-firmware/ directory.
- # Fall back to host /lib/firmware/nvidia if version-matched is unavailable.
- rm -rf "${GPU_ROOTFS_DIR}/lib/firmware/nvidia" 2>/dev/null || true
- NVIDIA_FW_MATCHED_DIR="${ROOT}/target/libkrun-build/nvidia-firmware/${NV_DRIVER_VERSION}"
- FW_DST="${GPU_ROOTFS_DIR}/lib/firmware/nvidia/${NV_DRIVER_VERSION}"
- if [ -n "${NV_DRIVER_VERSION}" ] && [ -d "${NVIDIA_FW_MATCHED_DIR}" ]; then
- mkdir -p "${FW_DST}"
- cp "${NVIDIA_FW_MATCHED_DIR}"/*.bin "${FW_DST}/" 2>/dev/null || true
- echo " Installed nvidia firmware from ${NVIDIA_FW_MATCHED_DIR} (version-matched)"
- else
- HOST_FW_DIR=""
- for candidate in /lib/firmware/nvidia /usr/lib/firmware/nvidia; do
- if [ -d "${candidate}" ]; then
- HOST_FW_DIR="${candidate}"
- break
- fi
- done
- if [ -n "${HOST_FW_DIR}" ]; then
- mkdir -p "${GPU_ROOTFS_DIR}/lib/firmware/nvidia"
- cp -r "${HOST_FW_DIR}"/* "${GPU_ROOTFS_DIR}/lib/firmware/nvidia/" 2>/dev/null || true
- echo " Installed nvidia firmware from ${HOST_FW_DIR}"
- if [ -n "${NV_DRIVER_VERSION}" ]; then
- echo " WARNING: host firmware version may not match kernel module version ${NV_DRIVER_VERSION}"
- fi
- else
- echo "WARNING: nvidia firmware not found"
- echo " GPU guests may fail to initialize the GPU without GSP firmware"
- fi
- fi
-
- # --- nvidia userspace (nvidia-smi + NVML) ---
-
- # Remove any pre-existing nvidia userspace from the base rootfs to avoid
- # version conflicts. The base image may ship nvidia-smi and libs from a
- # different driver version than the kernel modules we're installing.
- for search_dir in "${GPU_ROOTFS_DIR}/usr/lib/x86_64-linux-gnu" \
- "${GPU_ROOTFS_DIR}/usr/lib64" \
- "${GPU_ROOTFS_DIR}/usr/lib"; do
- rm -f "${search_dir}"/libnvidia-ml.so* 2>/dev/null || true
- rm -f "${search_dir}"/libcuda.so* 2>/dev/null || true
- rm -f "${search_dir}"/libnvidia-ptxjitcompiler.so* 2>/dev/null || true
- done
- rm -f "${GPU_ROOTFS_DIR}/usr/bin/nvidia-smi" 2>/dev/null || true
- echo " Cleaned pre-existing nvidia userspace from base rootfs"
-
- # Prefer pre-extracted version-matched userspace from nvidia-userspace/.
- # Fall back to host binaries only if the pre-extracted ones don't exist.
- if [ -f "${NVIDIA_USERSPACE_DIR}/nvidia-smi" ]; then
- mkdir -p "${GPU_ROOTFS_DIR}/usr/bin"
- cp "${NVIDIA_USERSPACE_DIR}/nvidia-smi" "${GPU_ROOTFS_DIR}/usr/bin/nvidia-smi"
- chmod 755 "${GPU_ROOTFS_DIR}/usr/bin/nvidia-smi"
- echo " Installed nvidia-smi from ${NVIDIA_USERSPACE_DIR} (version-matched)"
- else
- NV_SMI="$(command -v nvidia-smi 2>/dev/null || true)"
- if [ -n "${NV_SMI}" ]; then
- mkdir -p "${GPU_ROOTFS_DIR}/usr/bin"
- cp "${NV_SMI}" "${GPU_ROOTFS_DIR}/usr/bin/nvidia-smi"
- chmod 755 "${GPU_ROOTFS_DIR}/usr/bin/nvidia-smi"
- echo " Installed nvidia-smi from host: ${NV_SMI}"
- echo " WARNING: host nvidia-smi version may not match kernel module version ${NV_DRIVER_VERSION}"
- else
- echo "WARNING: nvidia-smi not found; GPU rootfs will use mknod fallback"
- fi
- fi
-
- # libnvidia-ml.so — required by nvidia-smi (dlopen'd at runtime)
- if [ -f "${NVIDIA_USERSPACE_DIR}/libnvidia-ml.so.${NV_DRIVER_VERSION}" ]; then
- NV_ML_REAL="${NVIDIA_USERSPACE_DIR}/libnvidia-ml.so.${NV_DRIVER_VERSION}"
- NV_LIB_DEST="${GPU_ROOTFS_DIR}/usr/lib/x86_64-linux-gnu"
- mkdir -p "${NV_LIB_DEST}"
- cp "${NV_ML_REAL}" "${NV_LIB_DEST}/libnvidia-ml.so.${NV_DRIVER_VERSION}"
- ln -sf "libnvidia-ml.so.${NV_DRIVER_VERSION}" "${NV_LIB_DEST}/libnvidia-ml.so.1"
- ln -sf libnvidia-ml.so.1 "${NV_LIB_DEST}/libnvidia-ml.so"
- echo " Installed libnvidia-ml.so.${NV_DRIVER_VERSION} (version-matched)"
- else
- NV_ML_REAL=""
- for search_dir in /usr/lib/x86_64-linux-gnu /usr/lib64 /usr/lib; do
- NV_ML_REAL="$(find "${search_dir}" -maxdepth 1 -name 'libnvidia-ml.so.*.*.*' -type f 2>/dev/null | head -1)"
- [ -n "${NV_ML_REAL}" ] && break
- done
- if [ -n "${NV_ML_REAL}" ]; then
- NV_LIB_DIR="$(dirname "${NV_ML_REAL}")"
- mkdir -p "${GPU_ROOTFS_DIR}${NV_LIB_DIR}"
- cp "${NV_ML_REAL}" "${GPU_ROOTFS_DIR}${NV_ML_REAL}"
- ln -sf "$(basename "${NV_ML_REAL}")" "${GPU_ROOTFS_DIR}${NV_LIB_DIR}/libnvidia-ml.so.1"
- ln -sf libnvidia-ml.so.1 "${GPU_ROOTFS_DIR}${NV_LIB_DIR}/libnvidia-ml.so"
- echo " Installed libnvidia-ml.so from host: ${NV_ML_REAL}"
- echo " WARNING: host library version may not match kernel module version ${NV_DRIVER_VERSION}"
- else
- echo "WARNING: libnvidia-ml.so not found; nvidia-smi may not work at runtime"
- fi
- fi
-
- # --- CUDA driver libraries (optional, via --gpu-cuda) ---
- if [ "${GPU_CUDA}" = true ]; then
- echo " Installing CUDA driver libraries..."
-
- # libcuda.so
- if [ -f "${NVIDIA_USERSPACE_DIR}/libcuda.so.${NV_DRIVER_VERSION}" ]; then
- NV_LIB_DEST="${GPU_ROOTFS_DIR}/usr/lib/x86_64-linux-gnu"
- mkdir -p "${NV_LIB_DEST}"
- cp "${NVIDIA_USERSPACE_DIR}/libcuda.so.${NV_DRIVER_VERSION}" "${NV_LIB_DEST}/"
- ln -sf "libcuda.so.${NV_DRIVER_VERSION}" "${NV_LIB_DEST}/libcuda.so.1"
- ln -sf libcuda.so.1 "${NV_LIB_DEST}/libcuda.so"
- echo " Installed libcuda.so.${NV_DRIVER_VERSION} (version-matched)"
- else
- CUDA_REAL=""
- for search_dir in /usr/lib/x86_64-linux-gnu /usr/lib64 /usr/lib; do
- CUDA_REAL="$(find "${search_dir}" -maxdepth 1 -name 'libcuda.so.*.*.*' -type f 2>/dev/null | head -1)"
- [ -n "${CUDA_REAL}" ] && break
- done
- if [ -n "${CUDA_REAL}" ]; then
- CUDA_LIB_DIR="$(dirname "${CUDA_REAL}")"
- mkdir -p "${GPU_ROOTFS_DIR}${CUDA_LIB_DIR}"
- cp "${CUDA_REAL}" "${GPU_ROOTFS_DIR}${CUDA_REAL}"
- ln -sf "$(basename "${CUDA_REAL}")" "${GPU_ROOTFS_DIR}${CUDA_LIB_DIR}/libcuda.so.1"
- ln -sf libcuda.so.1 "${GPU_ROOTFS_DIR}${CUDA_LIB_DIR}/libcuda.so"
- echo " Installed libcuda.so from host: ${CUDA_REAL}"
- echo " WARNING: host library version may not match kernel module version ${NV_DRIVER_VERSION}"
- else
- echo "WARNING: libcuda.so not found; CUDA workloads will not work"
- fi
- fi
-
- # libnvidia-ptxjitcompiler.so
- if [ -f "${NVIDIA_USERSPACE_DIR}/libnvidia-ptxjitcompiler.so.${NV_DRIVER_VERSION}" ]; then
- NV_LIB_DEST="${GPU_ROOTFS_DIR}/usr/lib/x86_64-linux-gnu"
- mkdir -p "${NV_LIB_DEST}"
- cp "${NVIDIA_USERSPACE_DIR}/libnvidia-ptxjitcompiler.so.${NV_DRIVER_VERSION}" "${NV_LIB_DEST}/"
- ln -sf "libnvidia-ptxjitcompiler.so.${NV_DRIVER_VERSION}" "${NV_LIB_DEST}/libnvidia-ptxjitcompiler.so.1"
- ln -sf libnvidia-ptxjitcompiler.so.1 "${NV_LIB_DEST}/libnvidia-ptxjitcompiler.so"
- echo " Installed libnvidia-ptxjitcompiler.so.${NV_DRIVER_VERSION} (version-matched)"
- else
- PTX_REAL=""
- for search_dir in /usr/lib/x86_64-linux-gnu /usr/lib64 /usr/lib; do
- PTX_REAL="$(find "${search_dir}" -maxdepth 1 -name 'libnvidia-ptxjitcompiler.so.*.*.*' -type f 2>/dev/null | head -1)"
- [ -n "${PTX_REAL}" ] && break
- done
- if [ -n "${PTX_REAL}" ]; then
- PTX_LIB_DIR="$(dirname "${PTX_REAL}")"
- mkdir -p "${GPU_ROOTFS_DIR}${PTX_LIB_DIR}"
- cp "${PTX_REAL}" "${GPU_ROOTFS_DIR}${PTX_REAL}"
- ln -sf "$(basename "${PTX_REAL}")" "${GPU_ROOTFS_DIR}${PTX_LIB_DIR}/libnvidia-ptxjitcompiler.so.1"
- ln -sf libnvidia-ptxjitcompiler.so.1 "${GPU_ROOTFS_DIR}${PTX_LIB_DIR}/libnvidia-ptxjitcompiler.so"
- echo " Installed libnvidia-ptxjitcompiler.so from host: ${PTX_REAL}"
- echo " WARNING: host library version may not match kernel module version ${NV_DRIVER_VERSION}"
- else
- echo "WARNING: libnvidia-ptxjitcompiler.so not found; PTX JIT may not work"
- fi
- fi
- fi
-
- # Ensure nvidia library path is in ld.so.conf for dlopen resolution
- mkdir -p "${GPU_ROOTFS_DIR}/etc/ld.so.conf.d"
- echo "/usr/lib/x86_64-linux-gnu" > "${GPU_ROOTFS_DIR}/etc/ld.so.conf.d/nvidia.conf"
- if command -v ldconfig &>/dev/null; then
- ldconfig -r "${GPU_ROOTFS_DIR}" 2>/dev/null || true
- fi
-
- echo ""
- echo "==> Step 3/3: Compressing GPU rootfs to tarball..."
- mkdir -p "${OUTPUT_DIR}"
- rm -f "${GPU_OUTPUT}"
-
- echo " Uncompressed size: $(du -sh "${GPU_ROOTFS_DIR}" | cut -f1)"
- echo " Compressing with zstd (level 3)..."
- tar -C "${GPU_ROOTFS_DIR}" -cf - . | zstd -3 -T0 -o "${GPU_OUTPUT}"
-
- echo ""
- echo "==> GPU rootfs tarball created successfully!"
- echo " Output: ${GPU_OUTPUT}"
- echo " Compressed: $(du -sh "${GPU_OUTPUT}" | cut -f1)"
- echo " Type: gpu (kmod + nvidia modules + firmware)"
- echo ""
- echo "Next step: mise run vm:build"
- trap - ERR
- exit 0
-fi
-
-# Check if container engine is running
-if ! ce info &>/dev/null; then
- echo "Error: container engine is not running" >&2
- echo "Please start your container engine and try again" >&2
- exit 1
-fi
-
-if [ "$BASE_ONLY" = true ]; then
- echo "==> Building BASE rootfs for embedding"
- echo " Build dir: ${ROOTFS_BUILD_DIR}"
- echo " Output: ${OUTPUT}"
- echo " Mode: base (no pre-loaded images, ~200-300MB)"
- echo ""
-
- echo "==> Step 1/2: Building base rootfs..."
- "${ROOT}/crates/openshell-vm/scripts/build-rootfs.sh" --base "${ROOTFS_BUILD_DIR}"
-else
- echo "==> Building FULL rootfs for embedding"
- echo " Build dir: ${ROOTFS_BUILD_DIR}"
- echo " Output: ${OUTPUT}"
- echo " Mode: full (pre-loaded images, pre-initialized, ~2GB+)"
- echo ""
-
- echo "==> Step 1/2: Building full rootfs (this may take 10-15 minutes)..."
- "${ROOT}/crates/openshell-vm/scripts/build-rootfs.sh" "${ROOTFS_BUILD_DIR}"
-fi
-
-echo ""
-echo "==> Step 2/2: Compressing rootfs to tarball..."
-mkdir -p "${OUTPUT_DIR}"
-
-rm -f "${OUTPUT}"
-
-echo " Uncompressed size: $(du -sh "${ROOTFS_BUILD_DIR}" | cut -f1)"
-
-# -19 = high compression (slower but smaller)
-# -T0 = use all available threads
-echo " Compressing with zstd (level 19, this may take a few minutes)..."
-tar -C "${ROOTFS_BUILD_DIR}" -cf - . | zstd -19 -T0 -o "${OUTPUT}"
-
-echo ""
-echo "==> Rootfs tarball created successfully!"
-echo " Output: ${OUTPUT}"
-echo " Compressed: $(du -sh "${OUTPUT}" | cut -f1)"
-if [ "$BASE_ONLY" = true ]; then
- echo " Type: base (first boot ~30-60s, images pulled on demand)"
-else
- echo " Type: full (first boot ~3-5s, images pre-loaded)"
-fi
-echo ""
-echo "Next step: mise run vm:build"
diff --git a/tasks/scripts/vm/bundle-vm-runtime.sh b/tasks/scripts/vm/bundle-vm-runtime.sh
deleted file mode 100755
index 6c21e511d..000000000
--- a/tasks/scripts/vm/bundle-vm-runtime.sh
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-# SPDX-License-Identifier: Apache-2.0
-
-# Stage the openshell-vm sidecar runtime bundle next to local build outputs.
-#
-# Copies the uncompressed VM runtime libraries (libkrun, libkrunfw, gvproxy)
-# from target/vm-runtime/ into the .runtime sidecar directories alongside
-# each build output. This is required for:
-# - build-rootfs.sh pre-initialization (boots the real VM to pre-bake k3s state)
-# - Direct invocation of target/debug/openshell-vm without embedding
-#
-# The source artifacts are collected by compress-vm-runtime.sh into
-# target/vm-runtime/ before compression; this script re-uses that work dir.
-#
-# Usage:
-# ./tasks/scripts/vm/bundle-vm-runtime.sh
-
-set -euo pipefail
-
-ROOT="$(git rev-parse --show-toplevel 2>/dev/null)" || ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
-
-SOURCE_DIR="${ROOT}/target/vm-runtime"
-
-if [ ! -d "${SOURCE_DIR}" ]; then
- echo "ERROR: VM runtime source not found at ${SOURCE_DIR}"
- echo " Run: mise run vm:setup"
- exit 1
-fi
-
-# Verify required files are present
-for required in libkrun.so gvproxy; do
- if ! ls "${SOURCE_DIR}/${required}" >/dev/null 2>&1; then
- # Try platform-specific variants
- if [ "$required" = "libkrun.so" ] && ls "${SOURCE_DIR}"/libkrun.dylib >/dev/null 2>&1; then
- continue
- fi
- echo "ERROR: Required runtime file not found: ${SOURCE_DIR}/${required}"
- echo " Run: mise run vm:setup"
- exit 1
- fi
-done
-
-TARGETS=(
- "${ROOT}/target/debug"
- "${ROOT}/target/release"
-)
-
-for target_dir in "${TARGETS[@]}"; do
- # Only stage if the binary exists (avoid creating orphan runtime dirs)
- if [ ! -f "${target_dir}/openshell-vm" ] && [ ! -f "${target_dir}/openshell-vm.d" ]; then
- continue
- fi
-
- runtime_dir="${target_dir}/openshell-vm.runtime"
- mkdir -p "${runtime_dir}"
-
- for file in "${SOURCE_DIR}"/*; do
- [ -f "$file" ] || continue
- name="$(basename "$file")"
- install -m 0755 "$file" "${runtime_dir}/${name}"
- done
-
- echo "staged runtime bundle in ${runtime_dir}"
-done
diff --git a/tasks/scripts/vm/codesign-openshell-vm.sh b/tasks/scripts/vm/codesign-openshell-vm.sh
deleted file mode 100755
index 0aeeca9b1..000000000
--- a/tasks/scripts/vm/codesign-openshell-vm.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-# SPDX-License-Identifier: Apache-2.0
-
-set -euo pipefail
-
-if [ "$(uname -s)" != "Darwin" ]; then
- exit 0
-fi
-
-ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." && pwd)"
-codesign --entitlements "${ROOT}/crates/openshell-vm/entitlements.plist" --force -s - "${ROOT}/target/debug/openshell-vm"
diff --git a/tasks/scripts/vm/compress-vm-runtime.sh b/tasks/scripts/vm/compress-vm-runtime.sh
index db5fbbd5b..50e0bc543 100755
--- a/tasks/scripts/vm/compress-vm-runtime.sh
+++ b/tasks/scripts/vm/compress-vm-runtime.sh
@@ -6,7 +6,7 @@
#
# This script collects libkrun, libkrunfw, and gvproxy from local sources
# (Homebrew on macOS, built from source on Linux) and compresses them with
-# zstd for embedding into the openshell-vm binary.
+# zstd for embedding into the openshell-driver-vm binary.
#
# Usage:
# ./compress-vm-runtime.sh
@@ -26,8 +26,8 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${SCRIPT_DIR}/_lib.sh"
ROOT="$(vm_lib_root)"
-# Source pins for gvproxy version
-source "${ROOT}/crates/openshell-vm/pins.env" 2>/dev/null || true
+# Source pins for gvproxy version.
+source "${ROOT}/crates/openshell-driver-vm/runtime/pins.env" 2>/dev/null || true
GVPROXY_VERSION="${GVPROXY_VERSION:-v0.8.8}"
# ── macOS dylib portability helpers ─────────────────────────────────────
@@ -83,7 +83,7 @@ if [ -z "${VM_RUNTIME_TARBALL:-}" ] && _check_compressed_artifacts "$OUTPUT_DIR"
echo "==> Compressed artifacts already present in ${OUTPUT_DIR} — skipping compression."
ls -lah "$OUTPUT_DIR"
- # Decompress artifacts into WORK_DIR so bundle-vm-runtime.sh can find them.
+ # Decompress artifacts into WORK_DIR for local inspection.
echo ""
echo "==> Decompressing artifacts into ${WORK_DIR} for runtime bundle..."
rm -rf "$WORK_DIR"
@@ -100,7 +100,7 @@ if [ -z "${VM_RUNTIME_TARBALL:-}" ] && _check_compressed_artifacts "$OUTPUT_DIR"
ls -lah "$WORK_DIR"
echo ""
- echo "Next step: cargo build -p openshell-vm"
+ echo "Next step: mise run vm:supervisor && cargo build -p openshell-driver-vm"
exit 0
fi
@@ -133,7 +133,7 @@ if [ -n "${VM_RUNTIME_TARBALL:-}" ]; then
else
echo ""
echo "Note: rootfs.tar.zst not found."
- echo " To build one, run: mise run vm:rootfs -- --base"
+ echo " openshell-driver-vm does not embed a standalone rootfs."
fi
echo ""
@@ -143,7 +143,7 @@ if [ -n "${VM_RUNTIME_TARBALL:-}" ]; then
echo ""
echo "==> Total compressed size: ${TOTAL}"
echo ""
- echo "Next step: mise run vm:build"
+ echo "Next step: mise run vm:supervisor && cargo build -p openshell-driver-vm"
exit 0
fi
@@ -279,9 +279,7 @@ if [ -f "$ROOTFS_TARBALL" ]; then
else
echo ""
echo "Note: rootfs.tar.zst not found."
- echo " To build one, run: mise run vm:rootfs -- --base"
- echo " Without it, the binary will still work but require the rootfs"
- echo " to be built separately on first run."
+ echo " openshell-driver-vm does not embed a standalone rootfs."
fi
echo ""
@@ -292,4 +290,4 @@ TOTAL=$(du -sh "$OUTPUT_DIR" | cut -f1)
echo ""
echo "==> Total compressed size: ${TOTAL}"
echo ""
-echo "Next step: mise run vm:build"
+echo "Next step: mise run vm:supervisor && cargo build -p openshell-driver-vm"
diff --git a/tasks/scripts/vm/download-kernel-runtime.sh b/tasks/scripts/vm/download-kernel-runtime.sh
index 8f0427af9..4b75b0217 100755
--- a/tasks/scripts/vm/download-kernel-runtime.sh
+++ b/tasks/scripts/vm/download-kernel-runtime.sh
@@ -2,17 +2,17 @@
# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
-# Download pre-built VM kernel runtime artifacts from the vm-dev GitHub Release
-# and stage them for the openshell-vm cargo build.
+# Download pre-built VM kernel runtime artifacts from the vm-runtime GitHub Release
+# and stage them for the openshell-driver-vm cargo build.
#
-# This script is used by CI (release-vm-dev.yml) and can also be used locally
+# This script is used by driver release CI and can also be used locally
# to avoid building libkrun/libkrunfw from source.
#
# Usage:
# ./download-kernel-runtime.sh [--platform PLATFORM]
#
# Environment:
-# VM_RUNTIME_RELEASE_TAG - GitHub Release tag (default: vm-dev)
+# VM_RUNTIME_RELEASE_TAG - GitHub Release tag (default: vm-runtime)
# GITHUB_REPOSITORY - owner/repo (default: NVIDIA/OpenShell)
# OPENSHELL_VM_RUNTIME_COMPRESSED_DIR - Output directory (default: target/vm-runtime-compressed)
#
@@ -24,7 +24,7 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${SCRIPT_DIR}/_lib.sh"
ROOT="$(vm_lib_root)"
-RELEASE_TAG="${VM_RUNTIME_RELEASE_TAG:-vm-dev}"
+RELEASE_TAG="${VM_RUNTIME_RELEASE_TAG:-vm-runtime}"
REPO="${GITHUB_REPOSITORY:-NVIDIA/OpenShell}"
OUTPUT_DIR="${OPENSHELL_VM_RUNTIME_COMPRESSED_DIR:-${ROOT}/target/vm-runtime-compressed}"
@@ -38,12 +38,12 @@ while [[ $# -gt 0 ]]; do
--help|-h)
echo "Usage: $0 [--platform PLATFORM]"
echo ""
- echo "Download pre-built VM kernel runtime from the vm-dev GitHub Release."
+ echo "Download pre-built VM kernel runtime from the vm-runtime GitHub Release."
echo ""
echo "Platforms: linux-aarch64, linux-x86_64, darwin-aarch64"
echo ""
echo "Environment:"
- echo " VM_RUNTIME_RELEASE_TAG Release tag (default: vm-dev)"
+ echo " VM_RUNTIME_RELEASE_TAG Release tag (default: vm-runtime)"
echo " GITHUB_REPOSITORY owner/repo (default: NVIDIA/OpenShell)"
echo " OPENSHELL_VM_RUNTIME_COMPRESSED_DIR Output directory"
exit 0
@@ -90,7 +90,7 @@ gh release download "${RELEASE_TAG}" \
if [ ! -f "${DOWNLOAD_DIR}/${TARBALL_NAME}" ]; then
echo "Error: Download failed — ${TARBALL_NAME} not found." >&2
echo "" >&2
- echo "The vm-dev release may not have kernel runtime artifacts yet." >&2
+ echo "The vm-runtime release may not have kernel runtime artifacts yet." >&2
echo "Run the 'Release VM Kernel' workflow first:" >&2
echo " gh workflow run release-vm-kernel.yml" >&2
exit 1
@@ -120,17 +120,6 @@ ls -lah "$EXTRACT_DIR"
echo ""
compress_dir "$EXTRACT_DIR" "$OUTPUT_DIR"
-# ── Check for rootfs (may already be present from a separate build step) ──
-
-if [ -f "${OUTPUT_DIR}/rootfs.tar.zst" ]; then
- echo ""
- echo " rootfs.tar.zst: $(du -h "${OUTPUT_DIR}/rootfs.tar.zst" | cut -f1) (pre-existing)"
-else
- echo ""
- echo "Note: rootfs.tar.zst not found in ${OUTPUT_DIR}."
- echo " Build it with: mise run vm:rootfs -- --base"
-fi
-
echo ""
echo "==> Staged artifacts in ${OUTPUT_DIR}:"
ls -lah "$OUTPUT_DIR"
@@ -138,4 +127,4 @@ ls -lah "$OUTPUT_DIR"
echo ""
echo "==> Done."
echo ""
-echo "Next step: mise run vm:build"
+echo "Next step: mise run vm:supervisor && cargo build -p openshell-driver-vm"
diff --git a/tasks/scripts/vm/ensure-vm-rootfs.sh b/tasks/scripts/vm/ensure-vm-rootfs.sh
deleted file mode 100755
index 3cf9ddfc6..000000000
--- a/tasks/scripts/vm/ensure-vm-rootfs.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-# SPDX-License-Identifier: Apache-2.0
-
-set -euo pipefail
-
-ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." && pwd)"
-GATEWAY_BIN="${ROOT}/target/debug/openshell-vm"
-
-NAME="default"
-ROOTFS_ARGS=()
-
-while [[ $# -gt 0 ]]; do
- case "$1" in
- --name)
- NAME="$2"
- shift 2
- ;;
- --name=*)
- NAME="${1#--name=}"
- shift
- ;;
- --rootfs)
- ROOTFS_ARGS=("$1" "$2")
- shift 2
- ;;
- --rootfs=*)
- ROOTFS_ARGS=("$1")
- shift
- ;;
- *)
- echo "Unknown argument: $1" >&2
- exit 1
- ;;
- esac
-done
-
-if [ ! -x "${GATEWAY_BIN}" ]; then
- echo "ERROR: openshell-vm binary not found at ${GATEWAY_BIN}" >&2
- echo " Run: mise run vm:build" >&2
- exit 1
-fi
-
-prepare_args=(--name "${NAME}")
-if [ "${#ROOTFS_ARGS[@]}" -gt 0 ]; then
- prepare_args=("${ROOTFS_ARGS[@]}" "${prepare_args[@]}")
-fi
-if [ "${OPENSHELL_VM_FORCE_ROOTFS_REBUILD:-}" = "1" ]; then
- prepare_args+=(prepare-rootfs --force)
-else
- prepare_args+=(prepare-rootfs)
-fi
-
-if ROOTFS_PATH="$("${GATEWAY_BIN}" "${prepare_args[@]}" 2>/dev/null)"; then
- echo "using openshell-vm rootfs at ${ROOTFS_PATH}"
- exit 0
-fi
-
-# prepare-rootfs failed — no embedded rootfs in the binary.
-# Fall back to target/rootfs-build if it exists (rootfs was built separately
-# but not yet compressed for embedding via mise run vm:rootfs).
-if [ "${#ROOTFS_ARGS[@]}" -eq 0 ]; then
- FALLBACK_ROOTFS="${ROOT}/target/rootfs-build"
- if [ -d "${FALLBACK_ROOTFS}/srv" ]; then
- echo "using openshell-vm rootfs at ${FALLBACK_ROOTFS}"
- exit 0
- fi
-fi
-
-echo "ERROR: No rootfs available." >&2
-echo " Run: mise run vm:rootfs -- --base # build rootfs (~5-10 min, requires Docker)" >&2
-exit 1
diff --git a/tasks/scripts/vm/package-vm-runtime.sh b/tasks/scripts/vm/package-vm-runtime.sh
index f97eec870..a986a81d2 100755
--- a/tasks/scripts/vm/package-vm-runtime.sh
+++ b/tasks/scripts/vm/package-vm-runtime.sh
@@ -5,7 +5,7 @@
# Package VM runtime artifacts into a release tarball.
#
# Used by CI (release-vm-kernel.yml) to bundle libkrun, libkrunfw, and gvproxy
-# into a platform-specific tarball for the vm-dev GitHub Release. Handles
+# into a platform-specific tarball for the vm-runtime GitHub Release. Handles
# gvproxy download, provenance metadata generation, and tarball creation.
#
# Usage:
@@ -28,8 +28,8 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${SCRIPT_DIR}/_lib.sh"
ROOT="$(vm_lib_root)"
-# Source pins for gvproxy version
-source "${ROOT}/crates/openshell-vm/pins.env" 2>/dev/null || true
+# Source pins for gvproxy version.
+source "${ROOT}/crates/openshell-driver-vm/runtime/pins.env" 2>/dev/null || true
GVPROXY_VERSION="${GVPROXY_VERSION:-v0.8.8}"
PLATFORM=""
diff --git a/tasks/scripts/vm/run-vm.sh b/tasks/scripts/vm/run-vm.sh
deleted file mode 100755
index 630d1eecd..000000000
--- a/tasks/scripts/vm/run-vm.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-# SPDX-License-Identifier: Apache-2.0
-
-set -euo pipefail
-
-ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." && pwd)"
-RUNTIME_DIR="${ROOT}/target/debug/openshell-vm.runtime"
-GATEWAY_BIN="${ROOT}/target/debug/openshell-vm"
-
-if [ "$(uname -s)" = "Darwin" ]; then
- export DYLD_FALLBACK_LIBRARY_PATH="${RUNTIME_DIR}${DYLD_FALLBACK_LIBRARY_PATH:+:${DYLD_FALLBACK_LIBRARY_PATH}}"
-fi
-
-args=("$@")
-name="default"
-rootfs_args=()
-expect_name=0
-expect_rootfs=0
-subcommand=""
-skip_prepare=0
-
-for arg in "${args[@]}"; do
- if [ "${expect_name}" -eq 1 ]; then
- name="${arg}"
- expect_name=0
- continue
- fi
-
- if [ "${expect_rootfs}" -eq 1 ]; then
- rootfs_args=(--rootfs "${arg}")
- expect_rootfs=0
- continue
- fi
-
- case "${arg}" in
- --name)
- expect_name=1
- ;;
- --name=*)
- name="${arg#--name=}"
- ;;
- --rootfs)
- expect_rootfs=1
- ;;
- --rootfs=*)
- rootfs_args=("${arg}")
- ;;
- --help|-h|--version)
- skip_prepare=1
- ;;
- exec|prepare-rootfs)
- subcommand="${arg}"
- break
- ;;
- esac
-done
-
-if [ "${skip_prepare}" -eq 0 ] && [ -z "${subcommand}" ]; then
- prep_args=(--name "${name}")
- if [ "${#rootfs_args[@]}" -gt 0 ]; then
- prep_args=("${rootfs_args[@]}" "${prep_args[@]}")
- fi
- resolved_rootfs="$("${ROOT}/tasks/scripts/vm/ensure-vm-rootfs.sh" "${prep_args[@]}" \
- | tail -n 1 | sed 's/^using openshell-vm rootfs at //')"
- "${ROOT}/tasks/scripts/vm/sync-vm-rootfs.sh" "${prep_args[@]}"
-
- # When no --rootfs was supplied by the caller, inject the resolved rootfs path
- # so the binary finds the rootfs regardless of whether it is embedded.
- if [ "${#rootfs_args[@]}" -eq 0 ] && [ -n "${resolved_rootfs}" ]; then
- args=(--rootfs "${resolved_rootfs}" "${args[@]}")
- fi
-fi
-
-exec "${GATEWAY_BIN}" "${args[@]}"
diff --git a/tasks/scripts/vm/sync-vm-rootfs.sh b/tasks/scripts/vm/sync-vm-rootfs.sh
deleted file mode 100755
index f7b93b971..000000000
--- a/tasks/scripts/vm/sync-vm-rootfs.sh
+++ /dev/null
@@ -1,188 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-# SPDX-License-Identifier: Apache-2.0
-
-# Sync mutable development artifacts into the existing VM rootfs.
-# Runs on every `mise run vm` so that script changes, helm chart
-# updates, manifest changes, and supervisor binary rebuilds are
-# picked up without a full rootfs rebuild.
-#
-# This is fast (<1s) — it only copies files, no Docker or VM boot.
-
-set -euo pipefail
-
-source "$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)/container-engine.sh"
-
-ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." && pwd)"
-SCRIPT_DIR="${ROOT}/crates/openshell-vm/scripts"
-IMAGE_REPO_BASE="${IMAGE_REPO_BASE:-openshell}"
-IMAGE_TAG="${IMAGE_TAG:-dev}"
-SERVER_IMAGE="${IMAGE_REPO_BASE}/gateway:${IMAGE_TAG}"
-NAME="default"
-ROOTFS_ARGS=()
-
-while [[ $# -gt 0 ]]; do
- case "$1" in
- --name)
- NAME="$2"
- shift 2
- ;;
- --name=*)
- NAME="${1#--name=}"
- shift
- ;;
- --rootfs)
- ROOTFS_ARGS=("$1" "$2")
- shift 2
- ;;
- --rootfs=*)
- ROOTFS_ARGS=("$1")
- shift
- ;;
- *)
- echo "Unknown argument: $1" >&2
- exit 1
- ;;
- esac
-done
-
-ensure_args=(--name "${NAME}")
-if [ "${#ROOTFS_ARGS[@]}" -gt 0 ]; then
- ensure_args=("${ROOTFS_ARGS[@]}" "${ensure_args[@]}")
-fi
-
-if ! ROOTFS_DIR="$("${ROOT}/tasks/scripts/vm/ensure-vm-rootfs.sh" "${ensure_args[@]}" | tail -n 1 | sed 's/^using openshell-vm rootfs at //')"; then
- echo "ERROR: ensure-vm-rootfs.sh failed — no rootfs available." >&2
- exit 1
-fi
-
-patch_vm_helmchart() {
- local helmchart="$1"
- [ -f "${helmchart}" ] || return 0
-
- sed_in_place() {
- local expr="$1"
- sed -i.bak -E "${expr}" "${helmchart}"
- rm -f "${helmchart}.bak"
- }
-
- # Mirror the build-rootfs patching so the VM keeps using the locally
- # imported openshell/gateway:dev image after incremental rootfs syncs.
- sed_in_place 's|__IMAGE_PULL_POLICY__|IfNotPresent|g'
- sed_in_place 's|__SANDBOX_IMAGE_PULL_POLICY__|"IfNotPresent"|g'
- sed_in_place 's|__DB_URL__|"sqlite:/tmp/openshell.db"|g'
- sed_in_place "s|repository:[[:space:]]*[^[:space:]]+|repository: ${SERVER_IMAGE%:*}|"
- sed_in_place "s|tag:[[:space:]]*\"?[^\"[:space:]]+\"?|tag: \"${IMAGE_TAG}\"|"
- sed_in_place 's|sshGatewayHost: __SSH_GATEWAY_HOST__|sshGatewayHost: ""|g'
- sed_in_place 's|sshGatewayPort: __SSH_GATEWAY_PORT__|sshGatewayPort: 0|g'
- sed_in_place 's|__DISABLE_GATEWAY_AUTH__|false|g'
- sed_in_place 's|__DISABLE_TLS__|false|g'
- sed_in_place 's|hostGatewayIP: __HOST_GATEWAY_IP__|hostGatewayIP: ""|g'
- sed_in_place '/__CHART_CHECKSUM__/d'
-}
-
-if [ ! -d "${ROOTFS_DIR}/srv" ]; then
- # Rootfs doesn't exist yet — nothing to sync. ensure-vm-rootfs.sh
- # or build-rootfs.sh will create it.
- exit 0
-fi
-
-echo "Syncing development artifacts into rootfs..."
-
-# ── Init scripts and utilities ─────────────────────────────────────────
-for script in openshell-vm-init.sh openshell-vm-exec-agent.py check-vm-capabilities.sh; do
- src="${SCRIPT_DIR}/${script}"
- dst="${ROOTFS_DIR}/srv/${script}"
- if [ -f "$src" ]; then
- if ! cmp -s "$src" "$dst" 2>/dev/null; then
- cp "$src" "$dst"
- chmod +x "$dst"
- echo " updated: /srv/${script}"
- fi
- fi
-done
-
-# ── Helm chart ─────────────────────────────────────────────────────────
-HELM_CHART_DIR="${ROOT}/deploy/helm/openshell"
-CHART_STAGING="${ROOTFS_DIR}/opt/openshell/charts"
-if [ -d "${HELM_CHART_DIR}" ]; then
- if ! command -v helm >/dev/null 2>&1; then
- echo " warning: helm not found — skipping chart sync (run: mise install)" >&2
- else
- mkdir -p "${CHART_STAGING}"
- # Package into a temp dir and compare — only update if changed.
- TMP_CHART=$(mktemp -d)
- helm package "${HELM_CHART_DIR}" -d "${TMP_CHART}" >/dev/null 2>&1
- for tgz in "${TMP_CHART}"/*.tgz; do
- [ -f "$tgz" ] || continue
- base=$(basename "$tgz")
- if ! cmp -s "$tgz" "${CHART_STAGING}/${base}" 2>/dev/null; then
- cp "$tgz" "${CHART_STAGING}/${base}"
- echo " updated: /opt/openshell/charts/${base}"
- fi
- done
- rm -rf "${TMP_CHART}"
- fi
-fi
-
-# ── Kubernetes manifests ───────────────────────────────────────────────
-MANIFEST_SRC="${ROOT}/deploy/kube/manifests"
-MANIFEST_DST="${ROOTFS_DIR}/opt/openshell/manifests"
-if [ -d "${MANIFEST_SRC}" ]; then
- mkdir -p "${MANIFEST_DST}"
- for manifest in "${MANIFEST_SRC}"/*.yaml; do
- [ -f "$manifest" ] || continue
- base=$(basename "$manifest")
- if ! cmp -s "$manifest" "${MANIFEST_DST}/${base}" 2>/dev/null; then
- cp "$manifest" "${MANIFEST_DST}/${base}"
- echo " updated: /opt/openshell/manifests/${base}"
- fi
- done
-fi
-
-patch_vm_helmchart "${MANIFEST_DST}/openshell-helmchart.yaml"
-patch_vm_helmchart "${ROOTFS_DIR}/var/lib/rancher/k3s/server/manifests/openshell-helmchart.yaml"
-
-# ── Gateway image tarball ──────────────────────────────────────────────
-# The VM rootfs airgap-imports openshell/gateway:dev from k3s/agent/images/.
-# Keep that tarball in sync with the local Docker image so `mise run e2e:vm`
-# validates the current openshell-server code, not whatever image happened to
-# be baked into the rootfs last time it was rebuilt.
-SERVER_IMAGE_TAR="${ROOTFS_DIR}/var/lib/rancher/k3s/agent/images/openshell-server.tar.zst"
-SERVER_IMAGE_ID_FILE="${ROOTFS_DIR}/opt/openshell/.gateway-image-id"
-if ce image inspect "${SERVER_IMAGE}" >/dev/null 2>&1; then
- current_image_id=$(ce image inspect --format '{{.Id}}' "${SERVER_IMAGE}")
- previous_image_id=""
- if [ -f "${SERVER_IMAGE_ID_FILE}" ]; then
- previous_image_id=$(cat "${SERVER_IMAGE_ID_FILE}")
- fi
-
- if [ "${current_image_id}" != "${previous_image_id}" ] || [ ! -f "${SERVER_IMAGE_TAR}" ]; then
- mkdir -p "$(dirname "${SERVER_IMAGE_TAR}")" "$(dirname "${SERVER_IMAGE_ID_FILE}")"
- tmp_tar=$(mktemp /tmp/openshell-server-image.XXXXXX)
- ce save "${SERVER_IMAGE}" | zstd -f -T0 -3 -o "${tmp_tar}" >/dev/null
- mv "${tmp_tar}" "${SERVER_IMAGE_TAR}"
- printf '%s\n' "${current_image_id}" > "${SERVER_IMAGE_ID_FILE}"
- echo " updated: /var/lib/rancher/k3s/agent/images/openshell-server.tar.zst"
- fi
-fi
-
-# ── Supervisor binary ─────────────────────────────────────────────────
-SUPERVISOR_TARGET="aarch64-unknown-linux-gnu"
-SUPERVISOR_BIN="${ROOT}/target/${SUPERVISOR_TARGET}/release/openshell-sandbox"
-SUPERVISOR_DST="${ROOTFS_DIR}/opt/openshell/bin/openshell-sandbox"
-if [ -f "${SUPERVISOR_BIN}" ]; then
- mkdir -p "$(dirname "${SUPERVISOR_DST}")"
- if ! cmp -s "${SUPERVISOR_BIN}" "${SUPERVISOR_DST}" 2>/dev/null; then
- cp "${SUPERVISOR_BIN}" "${SUPERVISOR_DST}"
- chmod +x "${SUPERVISOR_DST}"
- echo " updated: /opt/openshell/bin/openshell-sandbox"
- fi
-fi
-
-# ── Fix execute permissions on k3s data binaries ──────────────────────
-# docker export and macOS virtio-fs can strip execute bits.
-chmod +x "${ROOTFS_DIR}"/var/lib/rancher/k3s/data/*/bin/* 2>/dev/null || true
-chmod +x "${ROOTFS_DIR}"/var/lib/rancher/k3s/data/*/bin/aux/* 2>/dev/null || true
-
-echo "Sync complete."
diff --git a/tasks/scripts/vm/vm-clean.sh b/tasks/scripts/vm/vm-clean.sh
deleted file mode 100755
index c293348d0..000000000
--- a/tasks/scripts/vm/vm-clean.sh
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-# SPDX-License-Identifier: Apache-2.0
-
-# Remove all openshell-vm cached artifacts.
-#
-# Use this when you need a clean slate — after running this, you will need to
-# re-run `mise run vm:setup` before building again.
-#
-# Usage:
-# ./vm-clean.sh # clean VM-specific artifacts
-# ./vm-clean.sh --all # also remove the compiled binary
-
-set -euo pipefail
-
-SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-source "${SCRIPT_DIR}/_lib.sh"
-ROOT="$(vm_lib_root)"
-
-CLEAN_ALL=0
-while [[ $# -gt 0 ]]; do
- case "$1" in
- --all)
- CLEAN_ALL=1
- shift
- ;;
- --help|-h)
- echo "Usage: $0 [--all]"
- echo ""
- echo "Remove all openshell-vm cached build artifacts."
- echo ""
- echo "Options:"
- echo " --all Also remove compiled binaries (target/debug/openshell-vm)"
- exit 0
- ;;
- *)
- echo "Unknown argument: $1" >&2
- exit 1
- ;;
- esac
-done
-
-echo "==> Cleaning openshell-vm artifacts..."
-
-removed=0
-
-remove_if_exists() {
- local path="$1"
- local label="$2"
- if [ -e "$path" ]; then
- local size
- size="$(du -sh "$path" 2>/dev/null | cut -f1 || echo "?")"
- rm -rf "$path"
- echo " Removed ${label} (${size}): ${path}"
- removed=$((removed + 1))
- fi
-}
-
-# Build artifacts under target/
-remove_if_exists "${ROOT}/target/vm-runtime" "uncompressed staging"
-remove_if_exists "${ROOT}/target/vm-runtime-compressed" "compressed artifacts"
-remove_if_exists "${ROOT}/target/vm-runtime-download" "downloaded tarballs"
-remove_if_exists "${ROOT}/target/vm-runtime-extracted" "extraction temp"
-remove_if_exists "${ROOT}/target/libkrun-build" "libkrun source build"
-remove_if_exists "${ROOT}/target/custom-runtime" "custom libkrunfw"
-remove_if_exists "${ROOT}/target/rootfs-build" "rootfs directory"
-
-# Named instance rootfs directories
-XDG_DATA="${XDG_DATA_HOME:-${HOME}/.local/share}"
-VM_DATA_DIR="${XDG_DATA}/openshell/openshell-vm"
-remove_if_exists "${VM_DATA_DIR}" "named instance rootfs"
-
-# Embedded runtime cache
-VM_RUNTIME_CACHE="${XDG_DATA}/openshell/vm-runtime"
-remove_if_exists "${VM_RUNTIME_CACHE}" "embedded runtime cache"
-
-if [ "$CLEAN_ALL" -eq 1 ]; then
- # Remove compiled binaries and sidecar bundles
- for profile in debug release; do
- remove_if_exists "${ROOT}/target/${profile}/openshell-vm" "${profile} binary"
- remove_if_exists "${ROOT}/target/${profile}/openshell-vm.runtime" "${profile} runtime bundle"
- done
-fi
-
-echo ""
-if [ "$removed" -eq 0 ]; then
- echo " Nothing to clean."
-else
- echo " Removed ${removed} item(s)."
-fi
-echo ""
-echo "Next step: mise run vm:setup"
diff --git a/tasks/scripts/vm/vm-setup.sh b/tasks/scripts/vm/vm-setup.sh
index 7563819b9..5cf87d05a 100755
--- a/tasks/scripts/vm/vm-setup.sh
+++ b/tasks/scripts/vm/vm-setup.sh
@@ -2,12 +2,12 @@
# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
-# One-time setup for the openshell-vm runtime.
+# One-time setup for the openshell-driver-vm runtime.
#
# Downloads pre-built runtime artifacts (libkrun, libkrunfw, gvproxy) from the
-# vm-dev GitHub Release, or builds them from source when --from-source is set.
+# vm-runtime GitHub Release, or builds them from source when --from-source is set.
# After obtaining the runtime, compresses the artifacts for embedding into the
-# openshell-vm binary.
+# openshell-driver-vm binary.
#
# Usage:
# ./vm-setup.sh # download pre-built (default, ~30s)
@@ -34,7 +34,7 @@ while [[ $# -gt 0 ]]; do
--help|-h)
echo "Usage: $0 [--from-source]"
echo ""
- echo "Set up the openshell-vm runtime (libkrun, libkrunfw, gvproxy)."
+ echo "Set up the openshell-driver-vm runtime (libkrun, libkrunfw, gvproxy)."
echo ""
echo "Options:"
echo " --from-source Build runtime from source instead of downloading (~15-45min)"
@@ -52,7 +52,7 @@ while [[ $# -gt 0 ]]; do
done
PLATFORM="$(detect_platform)"
-echo "==> openshell-vm setup"
+echo "==> openshell-driver-vm setup"
echo " Platform: ${PLATFORM}"
echo " Mode: $([ "$FROM_SOURCE" = "1" ] && echo "build from source" || echo "download pre-built")"
echo ""
diff --git a/tasks/test.toml b/tasks/test.toml
index da0fe8cc0..ddb0d6bab 100644
--- a/tasks/test.toml
+++ b/tasks/test.toml
@@ -17,7 +17,7 @@ depends = ["e2e:python:gpu"]
["test:rust"]
description = "Run Rust tests"
-run = "cargo test --workspace --exclude openshell-vm"
+run = "cargo test --workspace"
hide = true
["test:python"]
diff --git a/tasks/vm.toml b/tasks/vm.toml
index e9eb22561..538168c14 100644
--- a/tasks/vm.toml
+++ b/tasks/vm.toml
@@ -1,54 +1,29 @@
# SPDX-FileCopyrightText: Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
-# openshell-vm development tasks
+# VM driver development tasks
#
# Workflow:
# mise run vm:setup # one-time: download pre-built runtime (~30s)
# mise run vm:supervisor # build the bundled sandbox supervisor
# mise run gateway:vm # start openshell-gateway with the VM driver
# # (defined in tasks/gateway.toml)
-# mise run vm # build + run the standalone openshell-vm microVM
-# mise run vm:clean # wipe everything and start over
#
# See tasks/gateway.toml for `gateway:vm`,
-# crates/openshell-driver-vm/README.md for the VM driver workflow, and
-# crates/openshell-vm/README.md for the standalone microVM path.
+# crates/openshell-driver-vm/README.md for the VM driver workflow.
# ═══════════════════════════════════════════════════════════════════════════
# Main Commands
# ═══════════════════════════════════════════════════════════════════════════
-[vm]
-description = "Build and run the standalone openshell-vm microVM"
-depends = ["build:docker:gateway"]
-run = ["mise run vm:build", "tasks/scripts/vm/run-vm.sh"]
-
-["vm:build"]
-description = "Build the openshell-vm binary with embedded runtime"
-run = [
- "tasks/scripts/vm/compress-vm-runtime.sh",
- "OPENSHELL_VM_RUNTIME_COMPRESSED_DIR=$PWD/target/vm-runtime-compressed cargo build -p openshell-vm",
- "tasks/scripts/vm/codesign-openshell-vm.sh",
- "tasks/scripts/vm/bundle-vm-runtime.sh",
-]
-
["vm:setup"]
-description = "One-time setup: download (or build) the VM runtime"
+description = "One-time setup: download (or build) the VM driver runtime"
run = "tasks/scripts/vm/vm-setup.sh"
["vm:supervisor"]
description = "Build the bundled openshell-sandbox supervisor for openshell-driver-vm"
run = "tasks/scripts/vm/build-supervisor-bundle.sh"
-["vm:rootfs"]
-description = "Build the VM rootfs tarball (use -- --base for lightweight)"
-run = "tasks/scripts/vm/build-rootfs-tarball.sh"
-
-["vm:clean"]
-description = "Remove all VM cached artifacts (runtime, rootfs, builds)"
-run = "tasks/scripts/vm/vm-clean.sh"
-
["vm:smoke:orphan-cleanup"]
description = "Smoke test: start gateway+driver, create a sandbox, signal the gateway, assert no orphaned processes survive"
run = "tasks/scripts/vm/smoke-orphan-cleanup.sh"