From 105fb9d3806cd007eae3a06376a6917d92e1077e Mon Sep 17 00:00:00 2001 From: YanLien Date: Thu, 13 Nov 2025 15:19:30 +0800 Subject: [PATCH 1/4] Enhance kernel functionality and add Phytium SDCard driver support --- Cargo.lock | 120 ++- crates/nop/src/lib.rs | 2 +- doc/shell.md | 349 +++++++- kernel/src/hal/mod.rs | 2 +- kernel/src/shell/command/vm.rs | 1277 +++++++++++++++++++++-------- kernel/src/shell/mod.rs | 2 + kernel/src/task.rs | 25 +- kernel/src/vmm/config.rs | 7 +- kernel/src/vmm/mod.rs | 6 +- kernel/src/vmm/vcpus.rs | 107 ++- modules/driver/Cargo.toml | 4 +- modules/driver/src/blk/mod.rs | 3 + modules/driver/src/blk/phytium.rs | 304 +++++++ 13 files changed, 1775 insertions(+), 433 deletions(-) create mode 100644 modules/driver/src/blk/phytium.rs diff --git a/Cargo.lock b/Cargo.lock index 283b3b0f..a4f7ffe0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -227,8 +227,7 @@ dependencies = [ [[package]] name = "arm_vcpu" version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f227e99f64201b2fa5d8a0db32cf1f914b0f04225abeeb80eb10e35076be7e" +source = "git+https://github.com/arceos-hypervisor/arm_vcpu?branch=next#b24cc3635c049302ab8d58d3b54007bb5a053a96" dependencies = [ "aarch64-cpu", "axaddrspace", @@ -240,7 +239,6 @@ dependencies = [ "numeric-enum-macro", "percpu", "spin 0.10.0", - "tock-registers 0.9.0", ] [[package]] @@ -893,7 +891,7 @@ dependencies = [ "axcpu", "axplat 0.2.0 (git+https://github.com/arceos-hypervisor/axplat_crates.git?tag=vmm-v0.3.0)", "bitflags 2.10.0", - "heapless 0.9.1", + "heapless 0.9.2", "int_ratio", "kspin", "lazyinit", @@ -1008,7 +1006,7 @@ dependencies = [ "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.7.0", + "hyper 1.8.0", "hyper-util", "itoa", "matchit", @@ -1076,7 +1074,7 @@ dependencies = [ "axstd", "axvcpu", "axvisor_api", - "axvm", + "axvm 0.1.0 (git+https://github.com/arceos-hypervisor/axvm.git?branch=next)", "bitflags 2.10.0", "byte-unit", "cfg-if", @@ -1152,6 +1150,31 @@ dependencies = [ "x86_vcpu", ] +[[package]] +name = "axvm" +version = "0.1.0" +source = "git+https://github.com/arceos-hypervisor/axvm.git?branch=next#22437d2b8e853bfd576f77f00308380463bd2710" +dependencies = [ + "arm_vcpu", + "arm_vgic 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "axaddrspace", + "axdevice", + "axdevice_base", + "axerrno", + "axvcpu", + "axvmconfig", + "cfg-if", + "cpumask", + "log", + "memory_addr", + "page_table_entry", + "page_table_multiarch", + "percpu", + "riscv_vcpu", + "spin 0.9.8", + "x86_vcpu", +] + [[package]] name = "axvmconfig" version = "0.1.0" @@ -1354,6 +1377,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "bytemuck" +version = "1.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" + [[package]] name = "byteorder" version = "1.5.0" @@ -1435,9 +1464,9 @@ dependencies = [ [[package]] name = "cargo_metadata" -version = "0.23.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "981a6f317983eec002839b90fae7411a85621410ae591a9cab2ecf5cb5744873" +checksum = "ef987d17b0a113becdd19d3d0022d04d7ef41f9efe4f3fb63ac44ba61df3ade9" dependencies = [ "camino", "cargo-platform 0.3.1", @@ -1754,9 +1783,9 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ "generic-array", "typenum", @@ -2000,6 +2029,12 @@ dependencies = [ "syn 2.0.110", ] +[[package]] +name = "dma-api" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624f4a84cc1031cfceb56780b82570a785f6cfdcee4f34c06c4e8f1fba25c970" + [[package]] name = "dma-api" version = "0.3.1" @@ -2036,6 +2071,7 @@ version = "0.1.0" dependencies = [ "axklib 0.2.0 (git+https://github.com/arceos-hypervisor/axklib)", "log", + "phytium-mci", "rdif-block", "rdif-clk", "rdrive", @@ -2266,7 +2302,7 @@ checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" [[package]] name = "fitimage" version = "0.1.0" -source = "git+https://github.com/ZR233/ostool.git#aac8b3dedbe47a66a34a9c274f9dfb39dca6b6eb" +source = "git+https://github.com/ZR233/ostool.git#e9f6c62829f8aa232292dc9fe46de4174c734928" dependencies = [ "anyhow", "byteorder", @@ -2425,9 +2461,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.9" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -2567,9 +2603,9 @@ dependencies = [ [[package]] name = "heapless" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1edcd5a338e64688fbdcb7531a846cfd3476a54784dcb918a0844682bc7ada5" +checksum = "2af2455f757db2b292a9b1768c4b70186d443bcb3b316252d6b540aec1cd89ed" dependencies = [ "hash32", "stable_deref_trait", @@ -2687,9 +2723,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" +checksum = "1744436df46f0bde35af3eda22aeaba453aada65d8f1c171cd8a5f59030bd69f" dependencies = [ "atomic-waker", "bytes", @@ -2715,7 +2751,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ "http 1.3.1", - "hyper 1.7.0", + "hyper 1.8.0", "hyper-util", "rustls", "rustls-pki-types", @@ -2746,7 +2782,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.7.0", + "hyper 1.8.0", "hyper-util", "native-tls", "tokio", @@ -2767,7 +2803,7 @@ dependencies = [ "futures-util", "http 1.3.1", "http-body 1.0.1", - "hyper 1.7.0", + "hyper 1.8.0", "ipnet", "libc", "percent-encoding", @@ -2924,9 +2960,9 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.18.2" +version = "0.18.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade6dfcba0dfb62ad59e59e7241ec8912af34fd29e0e743e3db992bd278e8b65" +checksum = "9375e112e4b463ec1b1c6c011953545c65a30164fbab5b581df32b3abf0dcb88" dependencies = [ "console", "portable-atomic", @@ -3036,15 +3072,17 @@ dependencies = [ [[package]] name = "jkconfig" -version = "0.1.3" -source = "git+https://github.com/ZR233/ostool.git#aac8b3dedbe47a66a34a9c274f9dfb39dca6b6eb" +version = "0.1.4" +source = "git+https://github.com/ZR233/ostool.git#e9f6c62829f8aa232292dc9fe46de4174c734928" dependencies = [ "anyhow", "axum", + "cargo_metadata 0.23.1", "chrono", "clap", "cursive", "log", + "schemars", "serde", "serde_json", "thiserror 2.0.17", @@ -3191,9 +3229,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.22" +version = "1.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" dependencies = [ "cc", "pkg-config", @@ -3633,14 +3671,15 @@ dependencies = [ [[package]] name = "ostool" version = "0.8.0" -source = "git+https://github.com/ZR233/ostool.git#aac8b3dedbe47a66a34a9c274f9dfb39dca6b6eb" +source = "git+https://github.com/ZR233/ostool.git#e9f6c62829f8aa232292dc9fe46de4174c734928" dependencies = [ "anyhow", "byte-unit", - "cargo_metadata 0.23.0", + "cargo_metadata 0.23.1", "clap", "colored", "crossterm 0.29.0", + "cursive", "env_logger", "fitimage", "indicatif", @@ -3786,6 +3825,23 @@ dependencies = [ "syn 2.0.110", ] +[[package]] +name = "phytium-mci" +version = "0.1.0" +source = "git+https://github.com/YanQD/phytium-mci.git?rev=99c9ee5#99c9ee5f66490fbf41370be3b3d7a42cf4406b0c" +dependencies = [ + "bare-test-macros", + "bitflags 2.10.0", + "bytemuck", + "dma-api 0.2.2", + "lazy_static", + "log", + "nb", + "rlsf", + "spin 0.10.0", + "tock-registers 0.9.0", +] + [[package]] name = "pie-boot-if" version = "0.8.0" @@ -4371,7 +4427,7 @@ dependencies = [ "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.7.0", + "hyper 1.8.0", "hyper-rustls", "hyper-tls 0.6.0", "hyper-util", @@ -5738,8 +5794,8 @@ dependencies = [ [[package]] name = "uboot-shell" -version = "0.1.10" -source = "git+https://github.com/ZR233/ostool.git#aac8b3dedbe47a66a34a9c274f9dfb39dca6b6eb" +version = "0.2.0" +source = "git+https://github.com/ZR233/ostool.git#e9f6c62829f8aa232292dc9fe46de4174c734928" dependencies = [ "colored", "log", @@ -6542,7 +6598,7 @@ version = "0.1.0" dependencies = [ "anyhow", "axvmconfig", - "cargo_metadata 0.23.0", + "cargo_metadata 0.23.1", "chrono", "clap", "colored", diff --git a/crates/nop/src/lib.rs b/crates/nop/src/lib.rs index 75b39cf4..93c687df 100644 --- a/crates/nop/src/lib.rs +++ b/crates/nop/src/lib.rs @@ -1,3 +1,3 @@ //! A crate that does nothing. Used for workspace member inclusion. -#![no_std] \ No newline at end of file +#![no_std] diff --git a/doc/shell.md b/doc/shell.md index 0d06f79d..e0c4188f 100644 --- a/doc/shell.md +++ b/doc/shell.md @@ -87,6 +87,22 @@ pub struct CommandNode { - **选项解析**: 支持短选项(-x)和长选项(--option) - **参数验证**: 自动验证必需选项和参数格式 - **错误处理**: 详细的错误信息和使用提示 +- **灵活格式**: 支持 `--option=value` 和 `--option value` 两种格式 + +#### 分词示例 + +```rust +// src/shell/command/mod.rs:186-215 +fn tokenize(input: &str) -> Vec { + // 支持引号包围的参数 + // 例: echo "hello world" -> ["echo", "hello world"] + + // 支持转义字符 + // 例: echo \"quoted\" -> ["echo", "\"quoted\""] + + // 自动处理空白符分隔 +} +``` #### 解析错误类型 ```rust @@ -148,27 +164,37 @@ fn file_type_to_char(ty: FileType) -> char { - **vm start**: 启动虚拟机 - 不带参数:启动所有虚拟机 - 指定VM ID:启动特定虚拟机 - - 支持 `--detach` 后台模式运行(**有计划实现**) + - 支持 `--detach` 后台模式运行 + - 支持 `--console` 连接到控制台(计划实现) - **vm stop**: 停止虚拟机 - 必须指定VM ID - 支持 `--force` 强制停止 -- **vm restart**: 重启虚拟机,必须指定VM ID + - 支持 `--graceful` 优雅关闭 +- **vm suspend**: 暂停(挂起)运行中的虚拟机 (功能不完善) + - 必须指定VM ID + - 所有VCpu将在下次VMExit时进入等待队列 + - VM状态转换为Suspended +- **vm resume**: 恢复已暂停的虚拟机 (功能不完善) + - 必须指定VM ID + - 唤醒所有VCpu任务,恢复执行 + - VM状态从Suspended转换回Running +- **vm restart**: 重启虚拟机,必须指定VM ID (功能不完善) + - 支持 `--force` 强制重启 + - 自动等待VM完全停止后再启动 - **vm delete**: 删除虚拟机 - 必须指定VM ID - 需要 `--force` 确认删除 - 支持 `--keep-data` 保留数据选项 - **vm list**: 列出虚拟机 - - 默认只显示运行中的虚拟机 - - `--all` 显示所有虚拟机(包括已停止) + - 显示所有已创建的虚拟机 - `--format json` 支持JSON格式输出 + - 表格模式显示:ID、名称、状态、VCPU列表、内存、VCPU状态汇总 - **vm show**: 显示虚拟机详细信息 - 必须指定VM ID - - `--config` 显示配置信息 - - `--stats` 显示统计信息 -- **vm status**: 显示虚拟机状态 - - 不带参数:显示所有VM的状态概览 - - 指定VM ID:显示特定VM的详细状态 - - 支持 `--watch` 实时监控(计划实现) + - 默认模式:显示基本信息和摘要 + - `--full` / `-f`: 显示完整详细信息(内存区域、设备、配置等) + - `--config` / `-c`: 显示配置信息(入口点、中断模式、直通设备等) + - `--stats` / `-s`: 显示统计信息(EPT、内存区域、设备数量等) #### 功能特性 ``` rust @@ -199,23 +225,33 @@ let state = if vm.running() { - 虚拟机整体状态 (运行中/停止中/已停止) #### 支持的选项和标志 -- `--all` / `-a`: 显示所有虚拟机(包括已停止的) -- `--format json`: JSON格式输出 -- `--config` / `-c`: 显示配置信息 -- `--stats` / `-s`: 显示统计信息 -- `--force` / `-f`: 强制操作(无需确认) -- `--detach` / `-d`: 后台运行 -- `--watch` / `-w`: 实时监控(计划实现) -- `--keep-data`: 保留VM数据 +- `--all` / `-a`: (vm list) 显示所有虚拟机(默认已包含所有VM) +- `--format json`: (vm list) JSON格式输出 +- `--full` / `-f`: (vm show) 显示完整详细信息 +- `--config` / `-c`: (vm show) 显示配置信息 +- `--stats` / `-s`: (vm show) 显示统计信息 +- `--force` / `-f`: (vm stop/delete/restart) 强制操作(无需确认) +- `--graceful` / `-g`: (vm stop) 优雅关闭 +- `--console` / `-c`: (vm start) 连接到控制台(计划实现) +- `--watch` / `-w`: (vm status) 实时监控(已移除,功能未实现) +- `--keep-data`: (vm delete) 保留VM数据(功能未实现) #### 输出格式示例 **Table格式** (默认): ``` +VM ID NAME STATUS VCPU MEMORY VCPU STATE +------ --------------- ------------ --------------- ---------- -------------------- +0 linux-vm Running 0,1 512MB Run:2 +1 test-vm Stopped 0 256MB Free:1 +``` + +**简化表格** (vm list 输出): +``` ID NAME STATE VCPU MEMORY ---- ----------- ------- ---- ------ -0 linux-vm 🟢 running 2 512MB -1 test-vm 🔴 stopped 1 256MB +0 linux-vm Running 2 512MB +1 test-vm Stopped 1 256MB ``` **JSON格式** (`--format json`): @@ -284,6 +320,33 @@ pub fn clear_line_and_redraw( - **clear**: 清屏 (发送ANSI清屏序列 `\x1b[2J\x1b[H`) - **exit/quit**: 退出shell +### VM 管理命令列表 + +执行 `help vm` 可以看到完整的 VM 命令列表: + +``` +VM - virtual machine management + +Most commonly used vm commands: + create Create a new virtual machine + start Start a virtual machine + stop Stop a virtual machine + suspend Suspend (pause) a running virtual machine + resume Resume a suspended virtual machine + restart Restart a virtual machine + delete Delete a virtual machine + +Information commands: + list Show table of all VMs + show Show VM details (requires VM_ID) + - Default: basic information + - --full: complete detailed information + - --config: show configuration + - --stats: show statistics + +Use 'vm --help' for more information on a specific command. +``` + ### 错误处理 Shell会对命令解析和执行错误提供友好的提示信息: ```bash @@ -307,6 +370,240 @@ Error: No VM specified Usage: vm stop [OPTIONS] ``` +## VM 生命周期和状态管理 + +### VM 状态机 + +AxVisor 的 VM 状态遵循严格的状态机模型: + +``` + ┌──────────┐ + │ Loading │ (VM 正在创建/加载) + └────┬─────┘ + │ create complete + ▼ + ┌──────────┐ + ┌─────▶│ Loaded │ (VM 已加载,未启动) + │ └────┬─────┘ + │ │ start + │ ▼ + │ ┌──────────┐ + │ ┌───┤ Running │ (VM 正在运行) + │ │ └────┬─────┘ + │ │ │ + │ │ ├─── suspend ────▶ ┌───────────┐ + │ │ │ │ Suspended │ (VM 已暂停) + │ │ │ └─────┬─────┘ + │ │ │ │ resume + │ │ │ ◀──────────────────────┘ + │ │ │ + │ │ │ shutdown/stop + │ │ ▼ + │ │ ┌──────────┐ + │ │ │ Stopping │ (VM 正在关闭) + │ │ └────┬─────┘ + │ │ │ all vcpus exited + │ │ ▼ + │ │ ┌──────────┐ + │ └──▶│ Stopped │ (VM 已停止) + │ └────┬─────┘ + │ │ delete + │ ▼ + │ [Resources Freed] + │ │ + └───────────┘ restart +``` + +### VM 状态定义 + +```rust +pub enum VMStatus { + Loading, // VM 正在创建/加载 + Loaded, // VM 已加载但未启动 + Running, // VM 正在运行 + Suspended, // VM 已暂停(可恢复) + Stopping, // VM 正在关闭中 + Stopped, // VM 已完全停止 +} +``` + +#### 状态转换规则 + +| 当前状态 | 可执行操作 | 目标状态 | 说明 | +|---------|-----------|---------|------| +| Loading | - | Loaded | 创建完成后自动转换 | +| Loaded | `vm start` | Running | 启动 VCpu 任务开始执行 | +| Loaded | `vm delete` | Stopped | 直接删除未启动的 VM | +| Running | `vm stop` | Stopping | 发送关闭信号给所有 VCpu | +| Running | `vm suspend` | Suspended | 暂停所有 VCpu 执行 | +| Suspended | `vm resume` | Running | 恢复 VCpu 执行 | +| Suspended | `vm stop` | Stopping | 从暂停状态直接关闭 | +| Stopping | - | Stopped | 所有 VCpu 退出后自动转换 | +| Stopped | `vm delete` | [释放资源] | 清理并释放 VM 资源 | +| Stopped | `vm start` | Running | 重新启动已停止的 VM | + +### VCpu 生命周期 + +每个 VM 包含一个或多个 VCpu(虚拟 CPU),它们的生命周期与 VM 状态紧密关联: + +``` +VM Start + │ + ├─▶ 创建 VCpu 任务 (alloc_vcpu_task) + │ │ + │ ├─ 设置 CPU 亲和性 + │ ├─ 初始化 TaskExt (Weak 引用 VM) + │ └─ spawn_task 到调度器 + │ + ├─▶ VCpu 任务运行 (vcpu_run) + │ │ + │ ├─ 等待 VM Running 状态 + │ ├─ mark_vcpu_running() + │ └─ 进入运行循环 + │ │ + │ ├─ vm.run_vcpu() - 执行 Guest 代码 + │ ├─ 处理 VM Exit (hypercall, interrupt, halt...) + │ ├─ 检查 VM 暂停状态 + │ └─ 检查 VM 关闭状态 ──┐ + │ │ + │ ▼ vm.stopping() == true + ├─▶ VCpu 任务退出 │ + │ │◀───────────────────────┘ + │ ├─ mark_vcpu_exiting() - 递减运行计数 + │ ├─ 最后一个 VCpu 设置 VM 为 Stopped + │ └─ 任务函数返回,进入 Exited 状态 + │ + └─▶ VCpu 清理 (cleanup_vm_vcpus) + │ + ├─ 遍历所有 VCpu 任务 + ├─ 调用 task.join() 等待退出 + ├─ 释放 VM 的 Arc 引用 + └─ 清理等待队列资源 +``` + +#### VCpu 任务特性 + +1. **Weak 引用**:VCpu 任务通过 `TaskExt` 持有 VM 的 `Weak` 引用,避免循环引用 +2. **CPU 亲和性**:可配置 VCpu 绑定到特定物理 CPU +3. **协作式退出**:VCpu 检测到 `vm.stopping()` 后主动退出 +4. **引用计数管理**:退出前释放所有对 VM 的引用 + +#### VCpu 任务生命周期扩展 + +``` +VM Running + │ + ├─▶ VCpu 任务运行循环 + │ │ + │ ├─ vm.run_vcpu() - 执行 Guest 代码 + │ ├─ 处理 VM Exit + │ ├─ 检查 VM 状态 + │ │ │ + │ │ ├─ vm.stopping() == true ──▶ 退出循环 + │ │ │ + │ │ └─ vm.vm_status() == Suspended ──▶ 进入等待队列 + │ │ │ + │ │ │ wait for notify + │ │ │ + │ │ ▼ + │ │ 被唤醒 (resume) + │ │ │ + │ │ ◀────────────────────────────────────┘ + │ │ + │ └─ 继续执行 +``` + +### VM 删除流程详解 + +`vm delete` 命令执行完整的资源清理流程,确保没有资源泄漏: + +#### 删除流程步骤 + +``` +1. 状态检查和关闭信号 + ├─ 检查 VM 当前状态 + ├─ 如果 Running/Suspended/Stopping + │ ├─ 设置状态为 Stopping + │ └─ 调用 vm.shutdown() 通知 Guest + └─ 如果 Loaded + └─ 直接设置为 Stopped + +2. 从全局列表移除 + ├─ 调用 vm_list::remove_vm(vm_id) + ├─ 获得 VM 的 Arc 引用 + └─ 打印当前 Arc 引用计数 (调试信息) + +3. VCpu 任务清理 ⭐ (核心步骤) + ├─ 调用 cleanup_vm_vcpus(vm_id) + │ ├─ 从全局队列移除 VM 的 VCpu 列表 + │ ├─ 遍历所有 VCpu 任务 + │ │ ├─ task.join() - 阻塞等待任务退出 + │ │ └─ 释放 VCpu 持有的 VM Arc 引用 + │ └─ 清理等待队列资源 + └─ 打印清理后的 Arc 引用计数 + +4. 验证引用计数 + ├─ 期望:Arc count == 1 (仅剩当前函数持有) + ├─ 实际:检查并打印 Arc::strong_count(&vm) + └─ 如果 count > 1:警告可能的引用泄漏 + +5. 资源释放 + ├─ 函数返回时 vm (Arc) 被 drop + ├─ 如果 count == 1,触发 AxVM::drop() + │ ├─ 释放 EPT 页表 + │ ├─ 释放内存区域 + │ └─ 释放设备资源 + └─ VM 对象完全销毁 +``` + +#### 关键实现代码片段 + +```rust +// src/vmm/vcpus.rs:241-260 +pub(crate) fn cleanup_vm_vcpus(vm_id: usize) { + if let Some(vm_vcpus) = VM_VCPU_TASK_WAIT_QUEUE.remove(&vm_id) { + let task_count = vm_vcpus.vcpu_task_list.len(); + + info!("VM[{}] Joining {} VCpu tasks...", vm_id, task_count); + + // ⭐ 关键:真正 join 所有 VCpu 任务 + for (idx, task) in vm_vcpus.vcpu_task_list.iter().enumerate() { + debug!("VM[{}] Joining VCpu task[{}]: {}", vm_id, idx, task.id_name()); + if let Some(exit_code) = task.join() { + debug!("VM[{}] VCpu task[{}] exited with code: {}", vm_id, idx, exit_code); + } + } + + info!("VM[{}] VCpu resources cleaned up, {} VCpu tasks joined successfully", + vm_id, task_count); + } +} +``` + +#### 删除示例输出 + +```bash +$ vm delete 2 +Deleting stopped VM[2]... + [Debug] VM Arc strong_count: 2 +✓ VM[2] removed from VM list + Waiting for vCPU threads to exit... + [Debug] VM Arc count before cleanup: 1 + Cleaning up VCpu resources... +[ 67.812092 0:2 axvisor::vmm::vcpus:243] VM[2] Joining 1 VCpu tasks... +[ 67.819730 0:2 axvisor::vmm::vcpus:253] VM[2] VCpu resources cleaned up, 1 VCpu tasks joined successfully + [Debug] VM Arc count after final wait: 1 +✓ VM[2] deleted completely + [Debug] VM Arc strong_count: 1 + ✓ Perfect! VM will be freed immediately when function returns + VM[2] will be freed now +[ 67.848026 0:2 axvm::vm:884] Dropping VM[2] +[ 67.853407 0:2 axvm::vm:775] Cleaning up VM[2] resources... +[ 67.860698 0:2 axvm::vm:878] VM[2] resources cleanup completed +[ 67.867209 0:2 axvm::vm:889] VM[2] dropped +✓ VM[2] deletion completed +``` + ### 命令提示符 ```rust pub fn print_prompt() { @@ -730,8 +1027,7 @@ touch file.txt # 创建空文件 ### 虚拟机管理 ```bash -vm list # 列出运行中的虚拟机 -vm list -a # 列出所有虚拟机(包括已停止) +vm list # 列出所有虚拟机 vm list --format json # JSON格式输出 vm create config.toml # 创建虚拟机 vm create vm1.toml vm2.toml # 批量创建虚拟机 @@ -739,10 +1035,15 @@ vm start # 启动所有虚拟机 vm start 1 # 启动VM(ID=1) vm start -d 1 # 后台启动VM vm stop -f 1 # 强制停止VM +vm suspend 1 # 暂停VM(ID=1) +vm resume 1 # 恢复暂停的VM vm restart 1 # 重启VM +vm restart -f 1 # 强制重启VM vm delete -f 1 # 删除VM(需要确认) -vm status # 显示所有VM状态概览 -vm status 1 # 查看特定VM状态 +vm status # 显示所有VM状态概览(已移除) +vm status 1 # 查看特定VM状态(已移除) +vm show 1 # 查看VM基本信息 +vm show -f 1 # 查看VM完整详细信息 vm show -c 1 # 查看VM配置 vm show -s 1 # 查看VM统计信息 vm show -c -s 1 # 查看VM配置和统计信息 diff --git a/kernel/src/hal/mod.rs b/kernel/src/hal/mod.rs index 47942c5e..acfbab68 100644 --- a/kernel/src/hal/mod.rs +++ b/kernel/src/hal/mod.rs @@ -48,7 +48,7 @@ impl AxVMHal for AxVMHalImpl { } fn current_vm_id() -> usize { - axtask::current().task_ext().vm.id() + axtask::current().task_ext().vm().id() } fn current_vcpu_id() -> usize { diff --git a/kernel/src/shell/command/vm.rs b/kernel/src/shell/command/vm.rs index 9b64914e..b9b8911f 100644 --- a/kernel/src/shell/command/vm.rs +++ b/kernel/src/shell/command/vm.rs @@ -5,25 +5,104 @@ use std::{ vec::Vec, }; +use axvm::VMStatus; #[cfg(feature = "fs")] use std::fs::read_to_string; use crate::{ shell::command::{CommandNode, FlagDef, OptionDef, ParsedCommand}, - vmm::{add_running_vm_count, get_running_vm_count, vcpus, vm_list, with_vm}, + vmm::{add_running_vm_count, config::init_guest_vm, vcpus, vm_list, with_vm}, }; +/// Check if a VM can transition to Running state. +/// Returns Ok(()) if the transition is valid, Err with a message otherwise. +fn can_start_vm(status: VMStatus) -> Result<(), &'static str> { + match status { + VMStatus::Loaded | VMStatus::Stopped => Ok(()), + VMStatus::Running => Err("VM is already running"), + VMStatus::Suspended => Err("VM is suspended, use 'vm resume' instead"), + VMStatus::Stopping => Err("VM is stopping, wait for it to fully stop"), + VMStatus::Loading => Err("VM is still loading"), + } +} + +/// Check if a VM can transition to Stopping state. +/// Returns Ok(()) if the transition is valid, Err with a message otherwise. +fn can_stop_vm(status: VMStatus, force: bool) -> Result<(), &'static str> { + match status { + VMStatus::Running | VMStatus::Suspended => Ok(()), + VMStatus::Stopping => { + if force { + Ok(()) + } else { + Err("VM is already stopping") + } + } + VMStatus::Stopped => Err("VM is already stopped"), + VMStatus::Loading | VMStatus::Loaded => Ok(()), // Allow stopping VMs in these states + } +} + +/// Check if a VM can be suspended. +fn can_suspend_vm(status: VMStatus) -> Result<(), &'static str> { + match status { + VMStatus::Running => Ok(()), + VMStatus::Suspended => Err("VM is already suspended"), + VMStatus::Stopped => Err("VM is stopped, cannot suspend"), + VMStatus::Stopping => Err("VM is stopping, cannot suspend"), + VMStatus::Loading => Err("VM is loading, cannot suspend"), + VMStatus::Loaded => Err("VM is not running, cannot suspend"), + } +} + +/// Check if a VM can be resumed. +fn can_resume_vm(status: VMStatus) -> Result<(), &'static str> { + match status { + VMStatus::Suspended => Ok(()), + VMStatus::Running => Err("VM is already running"), + VMStatus::Stopped => Err("VM is stopped, use 'vm start' instead"), + VMStatus::Stopping => Err("VM is stopping, cannot resume"), + VMStatus::Loading => Err("VM is loading, cannot resume"), + VMStatus::Loaded => Err("VM is not started yet, use 'vm start' instead"), + } +} + +/// Format memory size in a human-readable way. +fn format_memory_size(bytes: usize) -> String { + if bytes < 1024 { + format!("{}B", bytes) + } else if bytes < 1024 * 1024 { + format!("{}KB", bytes / 1024) + } else if bytes < 1024 * 1024 * 1024 { + format!("{}MB", bytes / (1024 * 1024)) + } else { + format!("{}GB", bytes / (1024 * 1024 * 1024)) + } +} + +// ============================================================================ +// Command Handlers +// ============================================================================ + fn vm_help(_cmd: &ParsedCommand) { println!("VM - virtual machine management"); + println!(); println!("Most commonly used vm commands:"); println!(" create Create a new virtual machine"); println!(" start Start a virtual machine"); println!(" stop Stop a virtual machine"); + println!(" suspend Suspend (pause) a running virtual machine"); + println!(" resume Resume a suspended virtual machine"); println!(" restart Restart a virtual machine"); println!(" delete Delete a virtual machine"); - println!(" list Show virtual machine lists"); - println!(" show Show virtual machine details"); - println!(" status Show virtual machine status"); + println!(); + println!("Information commands:"); + println!(" list Show table of all VMs"); + println!(" show Show VM details (requires VM_ID)"); + println!(" - Default: basic information"); + println!(" - --full: complete detailed information"); + println!(" - --config: show configuration"); + println!(" - --stats: show statistics"); println!(); println!("Use 'vm --help' for more information on a specific command."); } @@ -42,17 +121,16 @@ fn vm_create(cmd: &ParsedCommand) { let initial_vm_count = vm_list::get_vm_list().len(); - let mut processed_count = 0; for config_path in args.iter() { - use crate::vmm::config::init_guest_vm; - println!("Creating VM from config: {}", config_path); match read_to_string(config_path) { Ok(raw_cfg) => match init_guest_vm(&raw_cfg) { - Ok(_) => { - println!("✓ Successfully created VM from config: {}", config_path); - processed_count += 1; + Ok(vm_id) => { + println!( + "✓ Successfully created VM[{}] from config: {}", + vm_id, config_path + ); } Err(_) => { println!( @@ -73,11 +151,9 @@ fn vm_create(cmd: &ParsedCommand) { if created_count > 0 { println!("Successfully created {} VM(s)", created_count); - } else if processed_count > 0 { - println!( - "Processed {} config file(s) but no VMs were actually created", - processed_count - ); + println!("Use 'vm start ' to start the created VMs."); + } else { + println!("No VMs were created."); } } @@ -92,19 +168,23 @@ fn vm_start(cmd: &ParsedCommand) { let mut started_count = 0; for vm in vm_list::get_vm_list() { - // Set up primary virtual CPU before starting - vcpus::setup_vm_primary_vcpu(vm.clone()); - - match vm.boot() { - Ok(_) => { - vcpus::notify_primary_vcpu(vm.id()); - add_running_vm_count(1); - println!("✓ VM[{}] started successfully", vm.id()); - started_count += 1; - } - Err(err) => { - println!("✗ VM[{}] failed to start: {:?}", vm.id(), err); - } + // Check current status before starting + let status: VMStatus = vm.vm_status(); + if status == VMStatus::Running { + println!("⚠ VM[{}] is already running, skipping", vm.id()); + continue; + } + + if status != VMStatus::Loaded && status != VMStatus::Stopped { + println!("⚠ VM[{}] is in {:?} state, cannot start", vm.id(), status); + continue; + } + + if let Err(e) = start_single_vm(vm.clone()) { + println!("✗ VM[{}] failed to start: {:?}", vm.id(), e); + } else { + println!("✓ VM[{}] started successfully", vm.id()); + started_count += 1; } } println!("Started {} VM(s)", started_count); @@ -127,19 +207,43 @@ fn vm_start(cmd: &ParsedCommand) { } } -fn start_vm_by_id(vm_id: usize) { +/// Start a single VM by setting up vCPUs and calling boot. +/// Returns Ok(()) if successful, Err otherwise. +fn start_single_vm(vm: crate::vmm::VMRef) -> Result<(), &'static str> { + let vm_id = vm.id(); + let status = vm.vm_status(); + + // Validate state transition using helper function + can_start_vm(status)?; + // Set up primary virtual CPU before starting - match with_vm(vm_id, |vm| { - vcpus::setup_vm_primary_vcpu(vm.clone()); - vm.boot() - }) { - Some(Ok(_)) => { + vcpus::setup_vm_primary_vcpu(vm.clone()); + + // Boot the VM + match vm.boot() { + Ok(_) => { + // Transition to Running state and notify the primary VCpu + // Note: Since the VCpu task is created directly in the wait queue (blocked state), + // we can immediately notify it without waiting for it to be scheduled first. vcpus::notify_primary_vcpu(vm_id); add_running_vm_count(1); + Ok(()) + } + Err(err) => { + // Revert status on failure + error!("Failed to boot VM[{}]: {:?}", vm_id, err); + Err("Failed to boot VM") + } + } +} + +fn start_vm_by_id(vm_id: usize) { + match with_vm(vm_id, |vm| start_single_vm(vm.clone())) { + Some(Ok(_)) => { println!("✓ VM[{}] started successfully", vm_id); } Some(Err(err)) => { - println!("✗ VM[{}] failed to start: {:?}", vm_id, err); + println!("✗ VM[{}] failed to start: {}", vm_id, err); } None => { println!("✗ VM[{}] not found", vm_id); @@ -168,17 +272,49 @@ fn vm_stop(cmd: &ParsedCommand) { fn stop_vm_by_id(vm_id: usize, force: bool) { match with_vm(vm_id, |vm| { - if force { - println!("Force stopping VM[{}]...", vm_id); - // Force shutdown, directly call shutdown - vm.shutdown() - } else { - println!("Stopping VM[{}]...", vm_id); - vm.shutdown() + let status = vm.vm_status(); + + // Validate state transition using helper function + if let Err(err) = can_stop_vm(status, force) { + println!("⚠ VM[{}] {}", vm_id, err); + return Err(err); + } + + // Print appropriate message based on status + match status { + VMStatus::Stopping if force => { + println!("Force stopping VM[{}]...", vm_id); + } + VMStatus::Running => { + if force { + println!("Force stopping VM[{}]...", vm_id); + } else { + println!("Gracefully stopping VM[{}]...", vm_id); + } + } + VMStatus::Loading | VMStatus::Loaded => { + println!( + "⚠ VM[{}] is in {:?} state, stopping anyway...", + vm_id, status + ); + } + _ => {} + } + + // Call shutdown + match vm.shutdown() { + Ok(_) => Ok(()), + Err(_err) => { + // Revert status on failure + Err("Failed to shutdown VM") + } } }) { Some(Ok(_)) => { - println!("✓ VM[{}] stopped successfully", vm_id); + println!("✓ VM[{}] stop signal sent successfully", vm_id); + println!( + " Note: vCPU threads will exit gracefully, VM status will transition to Stopped" + ); } Some(Err(err)) => { println!("✗ Failed to stop VM[{}]: {:?}", vm_id, err); @@ -189,6 +325,7 @@ fn stop_vm_by_id(vm_id: usize, force: bool) { } } +/// Restart a VM by stopping it (if running) and then starting it again.(functionality incomplete) fn vm_restart(cmd: &ParsedCommand) { let args = &cmd.positional_args; let force = cmd.flags.get("force").unwrap_or(&false); @@ -211,14 +348,242 @@ fn vm_restart(cmd: &ParsedCommand) { fn restart_vm_by_id(vm_id: usize, force: bool) { println!("Restarting VM[{}]...", vm_id); - // First stop the virtual machine - stop_vm_by_id(vm_id, force); + // Check current status + let current_status = with_vm(vm_id, |vm| vm.vm_status()); + if current_status.is_none() { + println!("✗ VM[{}] not found", vm_id); + return; + } + + let status = current_status.unwrap(); + match status { + VMStatus::Stopped | VMStatus::Loaded => { + // VM is already stopped, just start it + println!("VM[{}] is already stopped, starting...", vm_id); + start_vm_by_id(vm_id); + } + VMStatus::Suspended | VMStatus::Running => { + // Stop the VM (this will wake up suspended VCpus automatically) + println!("Stopping VM[{}]...", vm_id); + stop_vm_by_id(vm_id, force); + + // Wait for VM to fully stop + println!("Waiting for VM[{}] to stop completely...", vm_id); + let max_wait_iterations = 50; // 5 seconds timeout (50 * 100ms) + let mut iterations = 0; + + loop { + if let Some(vm_status) = with_vm(vm_id, |vm| vm.vm_status()) { + match vm_status { + VMStatus::Stopped => { + println!("✓ VM[{}] stopped successfully", vm_id); + break; + } + VMStatus::Stopping => { + // Still stopping, wait a bit + iterations += 1; + if iterations >= max_wait_iterations { + println!( + "⚠ VM[{}] stop timeout, it may still be shutting down", + vm_id + ); + println!(" Use 'vm status {}' to check status manually", vm_id); + return; + } + // Sleep for 100ms + std::os::arceos::modules::axhal::time::busy_wait( + core::time::Duration::from_millis(100), + ); + } + _ => { + println!("⚠ VM[{}] in unexpected state: {:?}", vm_id, vm_status); + return; + } + } + } else { + println!("✗ VM[{}] no longer exists", vm_id); + return; + } + } + + // Now restart the VM + println!("Starting VM[{}]...", vm_id); + start_vm_by_id(vm_id); + } + VMStatus::Stopping => { + if force { + println!( + "⚠ VM[{}] is currently stopping, waiting for shutdown to complete...", + vm_id + ); + // Could implement similar wait logic here if needed + } else { + println!("⚠ VM[{}] is currently stopping", vm_id); + println!( + " Wait for shutdown to complete, then use 'vm start {}'", + vm_id + ); + println!(" Or use --force to wait and then restart"); + } + } + VMStatus::Loading => { + println!("✗ VM[{}] is still loading, cannot restart", vm_id); + } + } +} + +/// Suspend a running VM (functionality incomplete) +fn vm_suspend(cmd: &ParsedCommand) { + let args = &cmd.positional_args; + + if args.is_empty() { + println!("Error: No VM specified"); + println!("Usage: vm suspend ..."); + return; + } + + for vm_name in args { + if let Ok(vm_id) = vm_name.parse::() { + suspend_vm_by_id(vm_id); + } else { + println!("Error: Invalid VM ID: {}", vm_name); + } + } +} + +fn suspend_vm_by_id(vm_id: usize) { + println!("Suspending VM[{}]...", vm_id); + + let result = with_vm(vm_id, |vm| { + let status = vm.vm_status(); + + // Check if VM can be suspended + if let Err(err_msg) = can_suspend_vm(status) { + return Err(err_msg); + } + + // Set VM status to Suspended + vm.set_vm_status(VMStatus::Suspended); + info!("VM[{}] status set to Suspended", vm_id); + + Ok(()) + }); - // Wait for a period to ensure complete shutdown - // In actual implementation, more complex state checking may be needed + match result { + Some(Ok(_)) => { + println!("✓ VM[{}] suspend signal sent", vm_id); + + // Get VM to check VCpu count + let vcpu_count = with_vm(vm_id, |vm| vm.vcpu_num()).unwrap_or(0); + println!( + " Note: {} VCpu task(s) will enter wait queue at next VMExit", + vcpu_count + ); + + // Wait a brief moment for VCpus to enter suspended state + println!(" Waiting for VCpus to suspend..."); + let max_wait_iterations = 10; // 1 second timeout (10 * 100ms) + let mut iterations = 0; + let mut all_suspended = false; + + while iterations < max_wait_iterations { + // Check if all VCpus are in blocked state + if let Some(vm) = crate::vmm::vm_list::get_vm_by_id(vm_id) { + let vcpu_states: Vec<_> = + vm.vcpu_list().iter().map(|vcpu| vcpu.state()).collect(); + + let blocked_count = vcpu_states + .iter() + .filter(|s| matches!(s, axvcpu::VCpuState::Blocked)) + .count(); + + if blocked_count == vcpu_states.len() { + all_suspended = true; + break; + } + + // Show progress for the first few iterations + if iterations < 3 { + debug!(" VCpus blocked: {}/{}", blocked_count, vcpu_states.len()); + } + } - // Restart the virtual machine - start_vm_by_id(vm_id); + iterations += 1; + std::os::arceos::modules::axhal::time::busy_wait( + core::time::Duration::from_millis(100), + ); + } + + if all_suspended { + println!("✓ All VCpu tasks are now suspended"); + } else { + println!("⚠ Some VCpu tasks may still be transitioning to suspended state"); + println!(" VCpus will suspend at next VMExit (timer interrupt, I/O, etc.)"); + println!(" This is normal for VMs with low interrupt rates"); + } + + println!(" Use 'vm resume {}' to resume the VM", vm_id); + } + Some(Err(err)) => { + println!("✗ Failed to suspend VM[{}]: {}", vm_id, err); + } + None => { + println!("✗ VM[{}] not found", vm_id); + } + } +} + +// Resume a suspended VM (functionality incomplete) +fn vm_resume(cmd: &ParsedCommand) { + let args = &cmd.positional_args; + + if args.is_empty() { + println!("Error: No VM specified"); + println!("Usage: vm resume ..."); + return; + } + + for vm_name in args { + if let Ok(vm_id) = vm_name.parse::() { + resume_vm_by_id(vm_id); + } else { + println!("Error: Invalid VM ID: {}", vm_name); + } + } +} + +fn resume_vm_by_id(vm_id: usize) { + println!("Resuming VM[{}]...", vm_id); + + let result = with_vm(vm_id, |vm| { + let status = vm.vm_status(); + + // Check if VM can be resumed + if let Err(err_msg) = can_resume_vm(status) { + return Err(err_msg); + } + + // Set VM status back to Running + vm.set_vm_status(VMStatus::Running); + + // Notify all VCpus to wake up + vcpus::notify_all_vcpus(vm_id); + + info!("VM[{}] resumed", vm_id); + Ok(()) + }); + + match result { + Some(Ok(_)) => { + println!("✓ VM[{}] resumed successfully", vm_id); + } + Some(Err(err)) => { + println!("✗ Failed to resume VM[{}]: {}", vm_id, err); + } + None => { + println!("✗ VM[{}] not found", vm_id); + } + } } fn vm_delete(cmd: &ParsedCommand) { @@ -235,15 +600,49 @@ fn vm_delete(cmd: &ParsedCommand) { let vm_name = &args[0]; if let Ok(vm_id) = vm_name.parse::() { - if !force { - println!( - "Are you sure you want to delete VM[{}]? (This operation cannot be undone)", - vm_id - ); - println!("Use --force to skip confirmation"); + // Check if VM exists and get its status first + let vm_status = with_vm(vm_id, |vm| vm.vm_status()); + + if vm_status.is_none() { + println!("✗ VM[{}] not found", vm_id); return; } + let status = vm_status.unwrap(); + + // Check if VM is running + match status { + VMStatus::Running => { + if !force { + println!("✗ VM[{}] is currently running", vm_id); + println!( + " Use 'vm stop {}' first, or use '--force' to force delete", + vm_id + ); + return; + } + println!("⚠ Force deleting running VM[{}]...", vm_id); + } + VMStatus::Stopping => { + if !force { + println!("⚠ VM[{}] is currently stopping", vm_id); + println!(" Wait for it to stop completely, or use '--force' to force delete"); + return; + } + println!("⚠ Force deleting stopping VM[{}]...", vm_id); + } + VMStatus::Stopped => { + println!("Deleting stopped VM[{}]...", vm_id); + } + _ => { + println!("⚠ VM[{}] is in {:?} state", vm_id, status); + if !force { + println!("Use --force to force delete"); + return; + } + } + } + delete_vm_by_id(vm_id, *keep_data); } else { println!("Error: Invalid VM ID: {}", vm_name); @@ -251,23 +650,121 @@ fn vm_delete(cmd: &ParsedCommand) { } fn delete_vm_by_id(vm_id: usize, keep_data: bool) { - // First ensure VM is stopped - with_vm(vm_id, |vm| vm.shutdown()).unwrap_or(Ok(())).ok(); + // First check VM status and try to stop it if running + let vm_status = with_vm(vm_id, |vm| { + let status = vm.vm_status(); + + // If VM is running, suspended, or stopping, send shutdown signal + match status { + VMStatus::Running | VMStatus::Suspended | VMStatus::Stopping => { + println!( + " VM[{}] is {:?}, sending shutdown signal...", + vm_id, status + ); + vm.set_vm_status(VMStatus::Stopping); + let _ = vm.shutdown(); + } + VMStatus::Loaded => { + // Transition from Loaded to Stopped + vm.set_vm_status(VMStatus::Stopped); + } + _ => {} + } + + use alloc::sync::Arc; + let count = Arc::strong_count(&vm); + println!(" [Debug] VM Arc strong_count: {}", count); + + status + }); + + if vm_status.is_none() { + println!("✗ VM[{}] not found or already removed", vm_id); + return; + } + + let status = vm_status.unwrap(); // Remove VM from global list + // Note: This drops the reference from the global list, but the VM object + // will only be fully destroyed when all vCPU threads exit and drop their references match crate::vmm::vm_list::remove_vm(vm_id) { - Some(_) => { + Some(vm) => { + println!("✓ VM[{}] removed from VM list", vm_id); + + // Wait for vCPU threads to exit if VM has VCpu tasks + match status { + VMStatus::Running + | VMStatus::Suspended + | VMStatus::Stopping + | VMStatus::Stopped => { + println!(" Waiting for vCPU threads to exit..."); + + // Debug: Check Arc count before cleanup + use alloc::sync::Arc; + println!( + " [Debug] VM Arc count before cleanup: {}", + Arc::strong_count(&vm) + ); + + // Clean up VCpu resources after threads have exited + println!(" Cleaning up VCpu resources..."); + vcpus::cleanup_vm_vcpus(vm_id); + + // Debug: Check Arc count after final wait + println!( + " [Debug] VM Arc count after final wait: {}", + Arc::strong_count(&vm) + ); + } + _ => { + // VM not running, no vCPU threads to wait for + // But still need to clean up VCpu queue entry if it exists + vcpus::cleanup_vm_vcpus(vm_id); + } + } + if keep_data { - println!("✓ VM[{}] deleted (data preserved)", vm_id); + println!("✓ VM[{}] deleted (configuration and data preserved)", vm_id); } else { println!("✓ VM[{}] deleted completely", vm_id); - // Here all VM-related data files should be cleaned up + + // Debug: Check Arc count - should be 1 now (only this variable) + // TaskExt uses Weak reference, so it doesn't count + use alloc::sync::Arc; + let count = Arc::strong_count(&vm); + println!(" [Debug] VM Arc strong_count: {}", count); + + if count == 1 { + println!(" ✓ Perfect! VM will be freed immediately when function returns"); + } else { + println!( + " ⚠ Warning: Unexpected Arc count {}, possible reference leak!", + count + ); + } + + // TODO: Clean up VM-related data files + // - Remove disk images + // - Remove configuration files + // - Remove log files } + + // When function returns, the 'vm' variable is dropped + // Since Arc count is 1, AxVM::drop() is called immediately + println!(" VM[{}] will be freed now", vm_id); } None => { - println!("✗ VM[{}] not found", vm_id); + println!( + "✗ Failed to remove VM[{}] from list (may have been removed already)", + vm_id + ); } } + + // When function returns, the 'vm' Arc is dropped + // If all vCPU threads have exited (ref_count was 1), AxVM::drop() is called here + println!("✓ VM[{}] deletion completed", vm_id); } fn vm_list_simple() { @@ -275,61 +772,49 @@ fn vm_list_simple() { println!("ID NAME STATE VCPU MEMORY"); println!("---- ----------- ------- ---- ------"); for vm in vms { - let state = if vm.running() { - "running" - } else if vm.shutting_down() { - "stopping" - } else { - "stopped" - }; + let status = vm.vm_status(); // Calculate total memory size let total_memory: usize = vm.memory_regions().iter().map(|region| region.size()).sum(); println!( - "{:<4} {:<11} {:<7} {:<4} {}MB", + "{:<4} {:<11} {:<7} {:<4} {}", vm.id(), vm.with_config(|cfg| cfg.name()), - state, + status.as_str(), vm.vcpu_num(), - total_memory / (1024 * 1024) // Convert to MB + format_memory_size(total_memory) ); } } fn vm_list(cmd: &ParsedCommand) { - let show_all = cmd.flags.get("all").unwrap_or(&false); let binding = "table".to_string(); let format = cmd.options.get("format").unwrap_or(&binding); - let vms = vm_list::get_vm_list(); + let display_vms = vm_list::get_vm_list(); + + if display_vms.is_empty() { + println!("No virtual machines found."); + return; + } if format == "json" { + // JSON output println!("{{"); println!(" \"vms\": ["); - for (i, vm) in vms.iter().enumerate() { - let state = if vm.running() { - "running" - } else if vm.shutting_down() { - "stopping" - } else { - "stopped" - }; - + for (i, vm) in display_vms.iter().enumerate() { + let status = vm.vm_status(); let total_memory: usize = vm.memory_regions().iter().map(|region| region.size()).sum(); println!(" {{"); println!(" \"id\": {},", vm.id()); println!(" \"name\": \"{}\",", vm.with_config(|cfg| cfg.name())); - println!(" \"state\": \"{}\",", state); + println!(" \"state\": \"{}\",", status.as_str()); println!(" \"vcpu\": {},", vm.vcpu_num()); - println!(" \"memory\": \"{}MB\",", total_memory / (1024 * 1024)); - println!( - " \"interrupt_mode\": \"{:?}\"", - vm.with_config(|cfg| cfg.interrupt_mode()) - ); + println!(" \"memory\": \"{}\"", format_memory_size(total_memory)); - if i < vms.len() - 1 { + if i < display_vms.len() - 1 { println!(" }},"); } else { println!(" }}"); @@ -338,56 +823,57 @@ fn vm_list(cmd: &ParsedCommand) { println!(" ]"); println!("}}"); } else { - println!("Virtual Machines:"); - if vms.is_empty() { - println!("No virtual machines found."); - return; - } - - // Count running VMs before filtering - let running_count = vms.iter().filter(|vm| vm.running()).count(); - let total_count = vms.len(); + // Table output (default) + println!( + "{:<6} {:<15} {:<12} {:<15} {:<10} {:<20}", + "VM ID", "NAME", "STATUS", "VCPU", "MEMORY", "VCPU STATE" + ); + println!( + "{:-<6} {:-<15} {:-<12} {:-<15} {:-<10} {:-<20}", + "", "", "", "", "", "" + ); - // Filter displayed VMs - let display_vms: Vec<_> = if *show_all { - vms - } else { - vms.into_iter().filter(|vm| vm.running()).collect() - }; + for vm in display_vms { + let status = vm.vm_status(); + let total_memory: usize = vm.memory_regions().iter().map(|region| region.size()).sum(); - if display_vms.is_empty() && !*show_all { - println!("No running virtual machines found."); - println!("Use --all to show all VMs including stopped ones."); - return; - } + // Get VCpu ID list + let vcpu_ids: Vec = vm + .vcpu_list() + .iter() + .map(|vcpu| vcpu.id().to_string()) + .collect(); + let vcpu_id_list = vcpu_ids.join(","); - println!("ID NAME STATE VCPU MEMORY"); - println!("---- ----------- ------- ---- ------"); - for vm in display_vms { - let state = if vm.running() { - "🟢 running" - } else if vm.shutting_down() { - "🟡 stopping" - } else { - "🔴 stopped" - }; + // Get VCpu state summary + let mut state_counts = std::collections::BTreeMap::new(); + for vcpu in vm.vcpu_list() { + let state = match vcpu.state() { + axvcpu::VCpuState::Free => "Free", + axvcpu::VCpuState::Running => "Run", + axvcpu::VCpuState::Blocked => "Blk", + axvcpu::VCpuState::Invalid => "Inv", + axvcpu::VCpuState::Created => "Cre", + axvcpu::VCpuState::Ready => "Rdy", + }; + *state_counts.entry(state).or_insert(0) += 1; + } - let total_memory: usize = vm.memory_regions().iter().map(|region| region.size()).sum(); + // Format: Run:2,Blk:1 + let summary: Vec = state_counts + .iter() + .map(|(state, count)| format!("{}:{}", state, count)) + .collect(); + let vcpu_state_summary = summary.join(","); println!( - "{:<4} {:<11} {:<9} {:<4} {:<8}", + "{:<6} {:<15} {:<12} {:<15} {:<10} {:<20}", vm.id(), vm.with_config(|cfg| cfg.name()), - state, - vm.vcpu_num(), - format!("{}MB", total_memory / (1024 * 1024)) - ); - } - - if !show_all && running_count < total_count { - println!( - "\nShowing {} running VMs. Use --all to show all {} VMs.", - running_count, total_count + status.as_str(), + vcpu_id_list, + format_memory_size(total_memory), + vcpu_state_summary ); } } @@ -397,108 +883,125 @@ fn vm_show(cmd: &ParsedCommand) { let args = &cmd.positional_args; let show_config = cmd.flags.get("config").unwrap_or(&false); let show_stats = cmd.flags.get("stats").unwrap_or(&false); + let show_full = cmd.flags.get("full").unwrap_or(&false); if args.is_empty() { println!("Error: No VM specified"); println!("Usage: vm show [OPTIONS] "); + println!(); + println!("Options:"); + println!(" -f, --full Show full detailed information"); + println!(" -c, --config Show configuration details"); + println!(" -s, --stats Show statistics"); + println!(); + println!("Use 'vm list' to see all VMs"); return; } + // Show specific VM details let vm_name = &args[0]; if let Ok(vm_id) = vm_name.parse::() { - show_vm_details(vm_id, *show_config, *show_stats); + if *show_full { + show_vm_full_details(vm_id); + } else { + show_vm_basic_details(vm_id, *show_config, *show_stats); + } } else { println!("Error: Invalid VM ID: {}", vm_name); } } -/// Show detailed information about a specific VM. -fn show_vm_details(vm_id: usize, show_config: bool, show_stats: bool) { +/// Show basic VM information (default view) +fn show_vm_basic_details(vm_id: usize, show_config: bool, show_stats: bool) { match with_vm(vm_id, |vm| { - let state = if vm.running() { - "🟢 running" - } else if vm.shutting_down() { - "🟡 stopping" - } else { - "🔴 stopped" - }; + let status = vm.vm_status(); println!("VM Details: {}", vm_id); - println!(" ID: {}", vm.id()); - println!(" Name: {}", vm.with_config(|cfg| cfg.name())); - println!(" State: {}", state); - println!(" VCPUs: {}", vm.vcpu_num()); - - // show VCPU information - println!(" VCPU List:"); - for (i, vcpu) in vm.vcpu_list().iter().enumerate() { - if let Some(phys_cpu_set) = vcpu.phys_cpu_set() { - println!(" VCPU[{}]: CPU affinity mask = {:#x}", i, phys_cpu_set); - } else { - println!(" VCPU[{}]: No CPU affinity set", i); + println!(); + + // Basic Information + println!(" VM ID: {}", vm.id()); + println!(" Name: {}", vm.with_config(|cfg| cfg.name())); + println!(" Status: {}", status.as_str_with_icon()); + println!(" VCPUs: {}", vm.vcpu_num()); + + // Calculate total memory + let total_memory: usize = vm.memory_regions().iter().map(|region| region.size()).sum(); + println!(" Memory: {}", format_memory_size(total_memory)); + + // Add state-specific information + match status { + VMStatus::Suspended => { + println!(); + println!(" ℹ VM is paused. Use 'vm resume {}' to continue.", vm_id); + } + VMStatus::Stopped => { + println!(); + println!(" ℹ VM is stopped. Use 'vm delete {}' to clean up.", vm_id); + } + VMStatus::Loaded => { + println!(); + println!(" ℹ VM is ready. Use 'vm start {}' to boot.", vm_id); } + _ => {} } + // VCPU Summary + println!(); + println!("VCPU Summary:"); + let mut state_counts = std::collections::BTreeMap::new(); + for vcpu in vm.vcpu_list() { + let state = match vcpu.state() { + axvcpu::VCpuState::Free => "Free", + axvcpu::VCpuState::Running => "Running", + axvcpu::VCpuState::Blocked => "Blocked", + axvcpu::VCpuState::Invalid => "Invalid", + axvcpu::VCpuState::Created => "Created", + axvcpu::VCpuState::Ready => "Ready", + }; + *state_counts.entry(state).or_insert(0) += 1; + } + + for (state, count) in state_counts { + println!(" {}: {}", state, count); + } + + // Memory Summary + println!(); + println!("Memory Summary:"); + println!(" Total Regions: {}", vm.memory_regions().len()); + println!(" Total Size: {}", format_memory_size(total_memory)); + + // Configuration Summary if show_config { println!(); println!("Configuration:"); vm.with_config(|cfg| { - println!(" BSP Entry: {:#x}", cfg.bsp_entry().as_usize()); - println!(" AP Entry: {:#x}", cfg.ap_entry().as_usize()); + println!(" BSP Entry: {:#x}", cfg.bsp_entry().as_usize()); + println!(" AP Entry: {:#x}", cfg.ap_entry().as_usize()); println!(" Interrupt Mode: {:?}", cfg.interrupt_mode()); - - // show passthrough devices - if !cfg.pass_through_devices().is_empty() { - println!(" Passthrough Devices:"); - for device in cfg.pass_through_devices() { - println!( - " {}: GPA[{:#x}~{:#x}] -> HPA[{:#x}~{:#x}]", - device.name, - device.base_gpa, - device.base_gpa + device.length, - device.base_hpa, - device.base_hpa + device.length - ); - } - } - - // show emulated devices - if !cfg.emu_devices().is_empty() { - println!(" Emulated Devices:"); - for device in cfg.emu_devices() { - println!(" {:?}", device); - } + if let Some(dtb_addr) = cfg.image_config().dtb_load_gpa { + println!(" DTB Address: {:#x}", dtb_addr.as_usize()); } }); } + // Device Summary if show_stats { println!(); - println!("Statistics:"); - println!(" EPT Root: {:#x}", vm.ept_root().as_usize()); + println!("Device Summary:"); println!( - " Device Count: {}", + " MMIO Devices: {}", vm.get_devices().iter_mmio_dev().count() ); - - let mut vcpu_states = BTreeMap::new(); - for vcpu in vm.vcpu_list() { - let state_key = match vcpu.state() { - axvcpu::VCpuState::Free => "Free", - axvcpu::VCpuState::Running => "Running", - axvcpu::VCpuState::Blocked => "Blocked", - axvcpu::VCpuState::Invalid => "Invalid", - axvcpu::VCpuState::Created => "Created", - axvcpu::VCpuState::Ready => "Ready", - }; - *vcpu_states.entry(state_key).or_insert(0) += 1; - } - - println!(" VCPU States:"); - for (state, count) in vcpu_states { - println!(" {}: {}", state, count); - } + println!( + " SysReg Devices: {}", + vm.get_devices().iter_sys_reg_dev().count() + ); } + + println!(); + println!("Use 'vm show {} --full' for detailed information", vm_id); }) { Some(_) => {} None => { @@ -507,64 +1010,78 @@ fn show_vm_details(vm_id: usize, show_config: bool, show_stats: bool) { } } -fn vm_status(cmd: &ParsedCommand) { - let args = &cmd.positional_args; - let watch = cmd.flags.get("watch").unwrap_or(&false); - - if args.is_empty() { - // show all VM status - show_all_vm_status(*watch); - return; - } - - let vm_name = &args[0]; - if let Ok(vm_id) = vm_name.parse::() { - show_vm_status(vm_id, *watch); - } else { - println!("Error: Invalid VM ID: {}", vm_name); - } -} - -/// Show status of a specific VM. -fn show_vm_status(vm_id: usize, watch: bool) { - if watch { - println!("Watching VM[{}] status (press Ctrl+C to stop):", vm_id); - // TODO: add real-time status information - } - +/// Show full detailed information about a specific VM (--full flag) +fn show_vm_full_details(vm_id: usize) { match with_vm(vm_id, |vm| { - let state = if vm.running() { - "🟢 running" - } else if vm.shutting_down() { - "🟡 stopping" - } else { - "🔴 stopped" - }; + let status = vm.vm_status(); - println!("Virtual machine status for VM[{}]:", vm_id); - println!(" ID: {}", vm.id()); - println!(" Name: {}", vm.with_config(|cfg| cfg.name())); - println!(" State: {}", state); - println!(" VCPUs: {}", vm.vcpu_num()); + println!("=== VM Details: {} ===", vm_id); + println!(); + + // Basic Information + println!("Basic Information:"); + println!(" VM ID: {}", vm.id()); + println!(" Name: {}", vm.with_config(|cfg| cfg.name())); + println!(" Status: {}", status.as_str_with_icon()); + println!(" VCPUs: {}", vm.vcpu_num()); // Calculate total memory let total_memory: usize = vm.memory_regions().iter().map(|region| region.size()).sum(); + println!(" Memory: {}", format_memory_size(total_memory)); + println!(" EPT Root: {:#x}", vm.ept_root().as_usize()); + + // Add state-specific information + match status { + VMStatus::Suspended => { + println!( + " ℹ VM is paused, VCpu tasks are waiting. Use 'vm resume {}' to continue.", + vm_id + ); + } + VMStatus::Stopping => { + println!(" ℹ VM is shutting down, VCpu tasks are exiting."); + } + VMStatus::Stopped => { + println!( + " ℹ VM is stopped, all VCpu tasks have exited. Use 'vm delete {}' to clean up.", + vm_id + ); + } + VMStatus::Loaded => { + println!( + " ℹ VM is ready to start. Use 'vm start {}' to boot.", + vm_id + ); + } + _ => {} + } - println!(" Total Memory: {}MB", total_memory / (1024 * 1024)); + // VCPU Details + println!(); + println!("VCPU Details:"); - // Show memory region details - println!(" Memory Regions:"); - for (i, region) in vm.memory_regions().iter().enumerate() { - println!( - " Region[{}]: GPA[{:#x}~{:#x}] Size={}KB", - i, - region.gpa, - region.gpa + region.size(), - region.size() / 1024 - ); + // Count VCpu states for summary + let mut state_counts = std::collections::BTreeMap::new(); + for vcpu in vm.vcpu_list() { + let state = match vcpu.state() { + axvcpu::VCpuState::Free => "Free", + axvcpu::VCpuState::Running => "Running", + axvcpu::VCpuState::Blocked => "Blocked", + axvcpu::VCpuState::Invalid => "Invalid", + axvcpu::VCpuState::Created => "Created", + axvcpu::VCpuState::Ready => "Ready", + }; + *state_counts.entry(state).or_insert(0) += 1; } - println!(" VCPU Details:"); + // Show summary first + let summary: Vec = state_counts + .iter() + .map(|(state, count)| format!("{}: {}", state, count)) + .collect(); + println!(" Summary: {}", summary.join(", ")); + println!(); + for vcpu in vm.vcpu_list() { let vcpu_state = match vcpu.state() { axvcpu::VCpuState::Free => "Free", @@ -577,124 +1094,165 @@ fn show_vm_status(vm_id: usize, watch: bool) { if let Some(phys_cpu_set) = vcpu.phys_cpu_set() { println!( - " VCPU[{}]: {} (CPU affinity: {:#x})", + " VCPU {}: {} (Affinity: {:#x})", vcpu.id(), vcpu_state, phys_cpu_set ); } else { - println!(" VCPU[{}]: {} (No CPU affinity)", vcpu.id(), vcpu_state); + println!(" VCPU {}: {} (No affinity)", vcpu.id(), vcpu_state); } } - // show device information - let mmio_dev_count = vm.get_devices().iter_mmio_dev().count(); - println!(" Devices: {} MMIO devices", mmio_dev_count); - - // TODO: add more real-time status information - // println!(" Network: connected/disconnected"); - // println!(" Uptime: {} seconds", uptime); - }) { - Some(_) => {} - None => { - println!("✗ VM[{}] not found", vm_id); + // Add note for Suspended VMs + if status == VMStatus::Suspended { + println!(); + println!( + " Note: VCpu tasks are blocked in wait queue and will resume when VM is unpaused." + ); } - } -} - -/// Show status of all VMs in a summary format. -fn show_all_vm_status(watch: bool) { - if watch { - println!("Watching all VMs status (press Ctrl+C to stop):"); - } - let vms = vm_list::get_vm_list(); - if vms.is_empty() { - println!("No virtual machines found."); - return; - } - - println!("System Status:"); - println!(" Total VMs: {}", vms.len()); - println!(" Running VMs: {}", get_running_vm_count()); - - let mut running_count = 0; - let mut stopping_count = 0; - let mut stopped_count = 0; - let mut total_vcpus = 0; - let mut total_memory = 0; - - for vm in &vms { - if vm.running() { - running_count += 1; - } else if vm.shutting_down() { - stopping_count += 1; - } else { - stopped_count += 1; + // Memory Regions + println!(); + println!( + "Memory Regions: ({} region(s), {} total)", + vm.memory_regions().len(), + format_memory_size(total_memory) + ); + for (i, region) in vm.memory_regions().iter().enumerate() { + let region_type = if region.needs_dealloc { + "Allocated" + } else { + "Reserved" + }; + let identical = if region.is_identical() { + " [identical]" + } else { + "" + }; + println!( + " Region {}: GPA={:#x} HVA={:#x} Size={} Type={}{}", + i, + region.gpa, + region.hva, + format_memory_size(region.size()), + region_type, + identical + ); } - total_vcpus += vm.vcpu_num(); - total_memory += vm - .memory_regions() - .iter() - .map(|region| region.size()) - .sum::(); - } - - println!(" Total VCPUs: {}", total_vcpus); - println!(" Total Memory: {}MB", total_memory / (1024 * 1024)); - println!(); + // Configuration + println!(); + println!("Configuration:"); + vm.with_config(|cfg| { + println!(" BSP Entry: {:#x}", cfg.bsp_entry().as_usize()); + println!(" AP Entry: {:#x}", cfg.ap_entry().as_usize()); + println!(" Interrupt Mode: {:?}", cfg.interrupt_mode()); - println!("VM Status Overview:"); - println!(" 🟢 Running: {}", running_count); - println!(" 🟡 Stopping: {}", stopping_count); - println!(" 🔴 Stopped: {}", stopped_count); - println!(); + if let Some(dtb_addr) = cfg.image_config().dtb_load_gpa { + println!(" DTB Address: {:#x}", dtb_addr.as_usize()); + } - println!("Individual VM Status:"); - for vm in vms { - let state_icon = if vm.running() { - "🟢" - } else if vm.shutting_down() { - "🟡" - } else { - "🔴" - }; + // Show kernel info + println!( + " Kernel GPA: {:#x}", + cfg.image_config().kernel_load_gpa.as_usize() + ); - let vm_memory: usize = vm.memory_regions().iter().map(|region| region.size()).sum(); + // Show passthrough devices + if !cfg.pass_through_devices().is_empty() { + println!(); + println!( + " Passthrough Devices: ({} device(s))", + cfg.pass_through_devices().len() + ); + for device in cfg.pass_through_devices() { + println!( + " - {}: GPA[{:#x}~{:#x}] -> HPA[{:#x}~{:#x}] ({})", + device.name, + device.base_gpa, + device.base_gpa + device.length, + device.base_hpa, + device.base_hpa + device.length, + format_memory_size(device.length) + ); + } + } - println!( - " {} VM[{}] {} ({} VCPUs, {}MB)", - state_icon, - vm.id(), - vm.with_config(|cfg| cfg.name()), - vm.vcpu_num(), - vm_memory / (1024 * 1024), - ); + // Show passthrough addresses + if !cfg.pass_through_addresses().is_empty() { + println!(); + println!( + " Passthrough Memory Regions: ({} region(s))", + cfg.pass_through_addresses().len() + ); + for pt_addr in cfg.pass_through_addresses() { + println!( + " - GPA[{:#x}~{:#x}] ({})", + pt_addr.base_gpa, + pt_addr.base_gpa + pt_addr.length, + format_memory_size(pt_addr.length) + ); + } + } - if vm.running() { - let mut vcpu_summary = BTreeMap::new(); - for vcpu in vm.vcpu_list() { - let state = match vcpu.state() { - axvcpu::VCpuState::Free => "Free", - axvcpu::VCpuState::Running => "Running", - axvcpu::VCpuState::Blocked => "Blocked", - axvcpu::VCpuState::Invalid => "Invalid", - axvcpu::VCpuState::Created => "Created", - axvcpu::VCpuState::Ready => "Ready", - }; - *vcpu_summary.entry(state).or_insert(0) += 1; + // Show passthrough SPIs (ARM specific) + #[cfg(target_arch = "aarch64")] + { + let spis = cfg.pass_through_spis(); + if !spis.is_empty() { + println!(); + println!(" Passthrough SPIs: {:?}", spis); + } } - let summary_str: Vec = vcpu_summary - .into_iter() - .map(|(state, count)| format!("{state}:{count}")) - .collect(); + // Show emulated devices + if !cfg.emu_devices().is_empty() { + println!(); + println!( + " Emulated Devices: ({} device(s))", + cfg.emu_devices().len() + ); + for (idx, device) in cfg.emu_devices().iter().enumerate() { + println!(" {}. {:?}", idx + 1, device); + } + } + }); - if !summary_str.is_empty() { - println!(" VCPUs: {}", summary_str.join(", ")); + // Devices + println!(); + let mmio_dev_count = vm.get_devices().iter_mmio_dev().count(); + let sysreg_dev_count = vm.get_devices().iter_sys_reg_dev().count(); + println!("Devices:"); + println!(" MMIO Devices: {}", mmio_dev_count); + println!(" SysReg Devices: {}", sysreg_dev_count); + + // Additional Statistics + println!(); + println!("Additional Statistics:"); + println!(" Total Memory Regions: {}", vm.memory_regions().len()); + + // Show VCpu affinity details + println!(); + println!(" VCpu Affinity Details:"); + for (vcpu_id, affinity, pcpu_id) in vm.get_vcpu_affinities_pcpu_ids() { + if let Some(aff) = affinity { + println!( + " VCpu {}: Physical CPU mask {:#x}, PCpu ID {}", + vcpu_id, aff, pcpu_id + ); + } else { + println!( + " VCpu {}: No specific affinity, PCpu ID {}", + vcpu_id, pcpu_id + ); } } + }) { + Some(_) => {} + None => { + println!("✗ VM[{}] not found", vm_id); + } } } @@ -763,6 +1321,14 @@ pub fn build_vm_cmd(tree: &mut BTreeMap) { .with_long("force"), ); + let suspend_cmd = CommandNode::new("Suspend (pause) a running virtual machine") + .with_handler(vm_suspend) + .with_usage("vm suspend ..."); + + let resume_cmd = CommandNode::new("Resume a suspended virtual machine") + .with_handler(vm_resume) + .with_usage("vm resume ..."); + let delete_cmd = CommandNode::new("Delete a virtual machine") .with_handler(vm_delete) .with_usage("vm delete [OPTIONS] ") @@ -783,29 +1349,25 @@ pub fn build_vm_cmd(tree: &mut BTreeMap) { ) .with_option(OptionDef::new("format", "Output format (table, json)").with_long("format")); - let show_cmd = CommandNode::new("Show virtual machine details") + let show_cmd = CommandNode::new("Show detailed VM information") .with_handler(vm_show) .with_usage("vm show [OPTIONS] ") .with_flag( - FlagDef::new("config", "Show configuration") + FlagDef::new("full", "Show full detailed information") + .with_short('f') + .with_long("full"), + ) + .with_flag( + FlagDef::new("config", "Show configuration details") .with_short('c') .with_long("config"), ) .with_flag( - FlagDef::new("stats", "Show statistics") + FlagDef::new("stats", "Show device statistics") .with_short('s') .with_long("stats"), ); - let status_cmd = CommandNode::new("Show virtual machine status") - .with_handler(vm_status) - .with_usage("vm status [OPTIONS] [VM_ID]") - .with_flag( - FlagDef::new("watch", "Watch status changes") - .with_short('w') - .with_long("watch"), - ); - // main VM command let mut vm_node = CommandNode::new("Virtual machine management") .with_handler(vm_help) @@ -824,11 +1386,12 @@ pub fn build_vm_cmd(tree: &mut BTreeMap) { vm_node = vm_node .add_subcommand("stop", stop_cmd) + .add_subcommand("suspend", suspend_cmd) + .add_subcommand("resume", resume_cmd) .add_subcommand("restart", restart_cmd) .add_subcommand("delete", delete_cmd) .add_subcommand("list", list_cmd) - .add_subcommand("show", show_cmd) - .add_subcommand("status", status_cmd); + .add_subcommand("show", show_cmd); tree.insert("vm".to_string(), vm_node); } diff --git a/kernel/src/shell/mod.rs b/kernel/src/shell/mod.rs index 8af9d4e0..89850240 100644 --- a/kernel/src/shell/mod.rs +++ b/kernel/src/shell/mod.rs @@ -37,6 +37,8 @@ pub fn console_init() { println!("Welcome to AxVisor Shell!"); println!("Type 'help' to see available commands"); println!("Use UP/DOWN arrows to navigate command history"); + #[cfg(not(feature = "fs"))] + println!("Note: Running with limited features (filesystem support disabled)."); println!(); print_prompt(); diff --git a/kernel/src/task.rs b/kernel/src/task.rs index 8aeea74f..ee1f9ece 100644 --- a/kernel/src/task.rs +++ b/kernel/src/task.rs @@ -1,19 +1,34 @@ +use crate::vmm::{VCpuRef, VM, VMRef}; +use alloc::sync::{Arc, Weak}; use std::os::arceos::modules::axtask::def_task_ext; -use crate::vmm::{VCpuRef, VMRef}; - /// Task extended data for the hypervisor. pub struct TaskExt { - /// The VM. - pub vm: VMRef, + /// The VM (Weak reference to avoid keeping VM alive). + pub vm: Weak, /// The virtual CPU. pub vcpu: VCpuRef, } impl TaskExt { - pub const fn new(vm: VMRef, vcpu: VCpuRef) -> Self { + /// Create TaskExt with a Weak reference from a VMRef + pub const fn new(vm: Weak, vcpu: VCpuRef) -> Self { Self { vm, vcpu } } + + /// Get a strong reference to the VM if it's still alive. + /// Returns None if the VM has been dropped. + pub fn vm(&self) -> VMRef { + self.vm.upgrade().expect("VM has been dropped") + } + + /// Helper to create TaskExt from a VMRef by downgrading to Weak. + pub fn from_vm_ref(vm: VMRef, vcpu: VCpuRef) -> Self { + Self { + vm: Arc::downgrade(&vm), + vcpu, + } + } } def_task_ext!(TaskExt); diff --git a/kernel/src/vmm/config.rs b/kernel/src/vmm/config.rs index f4c913fa..99994387 100644 --- a/kernel/src/vmm/config.rs +++ b/kernel/src/vmm/config.rs @@ -155,7 +155,7 @@ pub fn init_guest_vms() { } } -pub fn init_guest_vm(raw_cfg: &str) -> AxResult { +pub fn init_guest_vm(raw_cfg: &str) -> AxResult { let vm_create_config = AxVMCrateConfig::from_toml(raw_cfg).expect("Failed to resolve VM config"); @@ -181,6 +181,7 @@ pub fn init_guest_vm(raw_cfg: &str) -> AxResult { // Create VM. let vm = VM::new(vm_config).expect("Failed to create VM"); + let vm_id = vm.id(); push_vm(vm.clone()); vm_alloc_memorys(&vm_create_config, &vm); @@ -203,7 +204,9 @@ pub fn init_guest_vm(raw_cfg: &str) -> AxResult { panic!("VM[{}] setup failed: {:?}", vm.id(), e); } - Ok(()) + vm.set_vm_status(axvm::VMStatus::Loaded); + + Ok(vm_id) } fn config_guest_address(vm: &VM, main_memory: &VMMemoryRegion) { diff --git a/kernel/src/vmm/mod.rs b/kernel/src/vmm/mod.rs index ab564063..2b686502 100644 --- a/kernel/src/vmm/mod.rs +++ b/kernel/src/vmm/mod.rs @@ -107,7 +107,7 @@ pub fn with_vm_and_vcpu_on_pcpu( // Disables preemption and IRQs to prevent the current task from being preempted or re-scheduled. let guard = kernel_guard::NoPreemptIrqSave::new(); - let current_vm = axtask::current().task_ext().vm.id(); + let current_vm = axtask::current().task_ext().vm().id(); let current_vcpu = axtask::current().task_ext().vcpu.id(); // The target vCPU is the current task, execute the closure directly. @@ -136,3 +136,7 @@ pub fn get_running_vm_count() -> usize { pub fn add_running_vm_count(count: usize) { RUNNING_VM_COUNT.fetch_add(count, Ordering::Release); } + +pub fn sub_running_vm_count(count: usize) { + RUNNING_VM_COUNT.fetch_sub(count, Ordering::Release); +} diff --git a/kernel/src/vmm/vcpus.rs b/kernel/src/vmm/vcpus.rs index 2d33410e..67a83afd 100644 --- a/kernel/src/vmm/vcpus.rs +++ b/kernel/src/vmm/vcpus.rs @@ -18,7 +18,7 @@ use axaddrspace::GuestPhysAddr; use axtask::{AxTaskRef, TaskExtRef, TaskInner, WaitQueue}; use axvcpu::{AxVCpuExitReason, VCpuState}; -use crate::vmm::{VCpuRef, VMRef}; +use crate::vmm::{VCpuRef, VMRef, sub_running_vm_count}; use crate::{hal::arch::inject_interrupt, task::TaskExt}; const KERNEL_STACK_SIZE: usize = 0x40000; // 256 KiB @@ -68,6 +68,11 @@ impl Queue { (*self.0.get()).insert(vm_id, vcpus); } } + + /// Removes the VMVCpus entry for the specified VM ID. + fn remove(&self, vm_id: &usize) -> Option { + unsafe { (*self.0.get()).remove(vm_id) } + } } /// A structure representing the VCpus of a specific VM, including a wait queue @@ -132,10 +137,18 @@ impl VMVCpus { self.wait_queue.wait_until(condition) } + #[allow(dead_code)] fn notify_one(&mut self) { + info!("Current wait queue length: {}", self.wait_queue.len()); self.wait_queue.notify_one(false); } + /// Notify all waiting vCPU threads to wake up. + /// This is useful when shutting down a VM to ensure all vCPUs can check the shutdown flag. + fn notify_all(&mut self) { + self.wait_queue.notify_all(false); + } + /// Increments the count of running or halting VCpus by one. fn mark_vcpu_running(&self) { self.running_halting_vcpu_count @@ -199,6 +212,61 @@ pub(crate) fn notify_primary_vcpu(vm_id: usize) { .notify_one() } +/// Notifies all VCpu tasks associated with the specified VM to wake up. +/// This is useful when shutting down a VM to ensure all waiting vCPUs can check the shutdown flag. +/// +/// # Arguments +/// +/// * `vm_id` - The ID of the VM whose VCpus should be notified. +/// +pub(crate) fn notify_all_vcpus(vm_id: usize) { + if let Some(vm_vcpus) = VM_VCPU_TASK_WAIT_QUEUE.get_mut(&vm_id) { + vm_vcpus.notify_all(); + } +} + +/// Cleans up VCpu resources for a VM that is being deleted. +/// This removes the VM's entry from the global VCpu wait queue. +/// +/// # Arguments +/// +/// * `vm_id` - The ID of the VM whose VCpu resources should be cleaned up. +/// +/// # Note +/// +/// This should be called after all VCpu threads have exited to avoid resource leaks. +/// It will join all VCpu tasks to ensure they are fully cleaned up. +pub(crate) fn cleanup_vm_vcpus(vm_id: usize) { + if let Some(vm_vcpus) = VM_VCPU_TASK_WAIT_QUEUE.remove(&vm_id) { + let task_count = vm_vcpus.vcpu_task_list.len(); + + info!("VM[{}] Joining {} VCpu tasks...", vm_id, task_count); + + // Join all VCpu tasks to ensure they have fully exited and cleaned up + for (idx, task) in vm_vcpus.vcpu_task_list.iter().enumerate() { + debug!( + "VM[{}] Joining VCpu task[{}]: {}", + vm_id, + idx, + task.id_name() + ); + if let Some(exit_code) = task.join() { + debug!( + "VM[{}] VCpu task[{}] exited with code: {}", + vm_id, idx, exit_code + ); + } + } + + info!( + "VM[{}] VCpu resources cleaned up, {} VCpu tasks joined successfully", + vm_id, task_count + ); + } else { + warn!("VM[{}] VCpu resources not found in queue", vm_id); + } +} + /// Marks the VCpu of the specified VM as running. fn mark_vcpu_running(vm_id: usize) { VM_VCPU_TASK_WAIT_QUEUE @@ -313,7 +381,8 @@ pub fn with_vcpu_task T>( /// # Note /// /// * The task associated with the VCpu is created with a kernel stack size of 256 KiB. -/// * The task is scheduled on the scheduler of arceos after it is spawned. +/// * The task is created in blocked state and added to the wait queue directly, +/// instead of being added to the ready queue. It will be woken up by notify_primary_vcpu(). fn alloc_vcpu_task(vm: VMRef, vcpu: VCpuRef) -> AxTaskRef { info!("Spawning task for VM[{}] VCpu[{}]", vm.id(), vcpu.id()); let mut vcpu_task = TaskInner::new( @@ -325,7 +394,9 @@ fn alloc_vcpu_task(vm: VMRef, vcpu: VCpuRef) -> AxTaskRef { if let Some(phys_cpu_set) = vcpu.phys_cpu_set() { vcpu_task.set_cpumask(AxCpuMask::from_raw_bits(phys_cpu_set)); } - vcpu_task.init_task_ext(TaskExt::new(vm, vcpu)); + + // Use Weak reference in TaskExt to avoid keeping VM alive + vcpu_task.init_task_ext(TaskExt::from_vm_ref(vm.clone(), vcpu)); info!( "VCpu task {} created {:?}", @@ -343,7 +414,7 @@ fn alloc_vcpu_task(vm: VMRef, vcpu: VCpuRef) -> AxTaskRef { fn vcpu_run() { let curr = axtask::current(); - let vm = curr.task_ext().vm.clone(); + let vm = curr.task_ext().vm(); let vcpu = curr.task_ext().vcpu.clone(); let vm_id = vm.id(); let vcpu_id = vcpu.id(); @@ -473,14 +544,32 @@ fn vcpu_run() { } } - // Check if the VM is shutting down. - if vm.shutting_down() { - warn!("VM[{vm_id}] VCpu[{vcpu_id}] shutting down because of VM shutdown"); + // Check if the VM is suspended + if vm.suspending() { + debug!( + "VM[{}] VCpu[{}] is suspended, waiting for resume...", + vm_id, vcpu_id + ); + wait_for(vm_id, || !vm.suspending()); + info!("VM[{}] VCpu[{}] resumed from suspend", vm_id, vcpu_id); + continue; + } + + // Check if the VM is stopping. + if vm.stopping() { + warn!( + "VM[{}] VCpu[{}] stopping because of VM stopping", + vm_id, vcpu_id + ); if mark_vcpu_exiting(vm_id) { info!("VM[{vm_id}] VCpu[{vcpu_id}] last VCpu exiting, decreasing running VM count"); - super::RUNNING_VM_COUNT.fetch_sub(1, Ordering::Release); + // Transition from Stopping to Stopped + vm.set_vm_status(axvm::VMStatus::Stopped); + info!("VM[{}] state changed to Stopped", vm_id); + + sub_running_vm_count(1); ax_wait_queue_wake(&super::VMM, 1); } @@ -488,5 +577,5 @@ fn vcpu_run() { } } - info!("VM[{vm_id}] VCpu[{vcpu_id}] exiting..."); + info!("VM[{}] VCpu[{}] exiting...", vm_id, vcpu_id); } diff --git a/modules/driver/Cargo.toml b/modules/driver/Cargo.toml index 831ac67c..238d71fa 100644 --- a/modules/driver/Cargo.toml +++ b/modules/driver/Cargo.toml @@ -11,6 +11,7 @@ rk3568-clk = ["dep:rk3568_clk"] rk3588-clk = ["dep:rk3588-clk"] sdmmc = ["dep:sdmmc"] rockchip-pm = ["dep:rockchip-pm"] +phytium-blk = ["dep:phytium-mci"] [dependencies] axklib = {git = "https://github.com/arceos-hypervisor/axklib"} @@ -23,4 +24,5 @@ spin.workspace = true rk3568_clk = {git = "https://github.com/drivercraft/rk3568-clk.git", optional = true} sdmmc = {git = "https://github.com/drivercraft/sdmmc.git", default-features = false, features = ["pio"], optional = true} rk3588-clk = {git = "https://github.com/drivercraft/rk3588-clk", optional = true} -rockchip-pm = {git = "https://github.com/drivercraft/rockchip-pm.git", optional = true} \ No newline at end of file +rockchip-pm = {git = "https://github.com/drivercraft/rockchip-pm.git", optional = true} +phytium-mci = { git = "https://github.com/YanQD/phytium-mci.git", rev = "99c9ee5", default-features = false, features = ["pio"], optional = true} diff --git a/modules/driver/src/blk/mod.rs b/modules/driver/src/blk/mod.rs index cc889894..dd47c943 100644 --- a/modules/driver/src/blk/mod.rs +++ b/modules/driver/src/blk/mod.rs @@ -1,2 +1,5 @@ #[cfg(feature = "sdmmc")] mod rockchip; + +#[cfg(feature = "phytium-blk")] +mod phytium; diff --git a/modules/driver/src/blk/phytium.rs b/modules/driver/src/blk/phytium.rs new file mode 100644 index 00000000..1fbc046e --- /dev/null +++ b/modules/driver/src/blk/phytium.rs @@ -0,0 +1,304 @@ +extern crate alloc; + +use axklib::{mem::iomap, time::busy_wait}; + +use core::{ + cmp, + marker::{Send, Sync}, + ptr::NonNull, + time::Duration, +}; + +use log::{debug, info}; +use rdrive::{PlatformDevice, module_driver, probe::OnProbeError, register::FdtInfo}; + +use phytium_mci::sd::SdCard; +use phytium_mci::{IoPad, PAD_ADDRESS, mci_host::err::MCIHostError}; +pub use phytium_mci::{Kernel, set_impl}; + +use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use log::trace; + +use rdif_block::{BlkError, IQueue, Interface, Request, RequestId}; +use rdrive::{DriverGeneric, KError}; + +use spin::Mutex; + +// pub use dma_api::{Direction, Impl as DmaImpl}; +// pub use dma_api::set_impl as set_dma_impl; + +const OFFSET: usize = 0x400_0000; +const BLOCK_SIZE: usize = 512; + +pub struct KernelImpl; + +impl Kernel for KernelImpl { + fn sleep(us: Duration) { + busy_wait(us); + } +} + +set_impl!(KernelImpl); + +module_driver!( + name: "Phytium SdCard", + level: ProbeLevel::PostKernel, + priority: ProbePriority::DEFAULT, + probe_kinds: &[ + ProbeKind::Fdt { + compatibles: &["phytium,mci"], + on_probe: probe_sdcard + } + ], +); + +fn probe_sdcard(info: FdtInfo<'_>, plat_dev: PlatformDevice) -> Result<(), OnProbeError> { + info!("Probing Phytium SDCard..."); + let mci_reg = info + .node + .reg() + .and_then(|mut regs| regs.next()) + .ok_or(OnProbeError::other(alloc::format!( + "[{}] has no reg", + info.node.name() + )))?; + + info!( + "MCI reg: addr={:#x}, size={:#x}", + mci_reg.address as usize, + mci_reg.size.unwrap_or(0) + ); + + let mci_reg_base = iomap( + (mci_reg.address as usize).into(), + mci_reg.size.unwrap_or(0x10000), + ) + .expect("Failed to iomap mci reg"); + + let iopad_reg_base = + iomap((PAD_ADDRESS as usize).into(), 0x2000).expect("Failed to iomap iopad reg"); + + info!("MCI reg base mapped at {:#x}", mci_reg_base.as_usize()); + + let mci_reg = + NonNull::new(mci_reg_base.as_usize() as *mut u8).expect("Failed to create NonNull pointer"); + + let iopad_reg = NonNull::new(iopad_reg_base.as_usize() as *mut u8) + .expect("Failed to create NonNull pointer for iopad"); + + let iopad = IoPad::new(iopad_reg); + + info!("MCI reg mapped at {:p}", mci_reg); + + let sdcard = SdCardDriver::new(mci_reg, iopad); + let dev = rdif_block::Block::new(sdcard); + plat_dev.register(dev); + + debug!("phytium block device registered successfully"); + + Ok(()) +} + +pub struct SdCardDriver { + sd_card: Arc>>, +} + +impl SdCardDriver { + pub fn new(sd_addr: NonNull, iopad: IoPad) -> Self { + let sd_card = Arc::new(Mutex::new(Box::new(SdCard::new(sd_addr, iopad)))); + SdCardDriver { sd_card } + } +} + +unsafe impl Send for SdCardDriver {} +unsafe impl Sync for SdCardDriver {} + +unsafe impl Send for SdCardQueue {} +unsafe impl Sync for SdCardQueue {} + +impl DriverGeneric for SdCardDriver { + fn open(&mut self) -> Result<(), KError> { + Ok(()) + } + + fn close(&mut self) -> Result<(), KError> { + Ok(()) + } +} + +impl Interface for SdCardDriver { + fn create_queue(&mut self) -> Option> { + Some(Box::new(SdCardQueue { + sd_card: Arc::clone(&self.sd_card), + })) + } + + fn enable_irq(&mut self) { + todo!() + } + + fn disable_irq(&mut self) { + todo!() + } + + fn is_irq_enabled(&self) -> bool { + false + } + + fn handle_irq(&mut self) -> rdif_block::Event { + rdif_block::Event::none() + } +} + +pub struct SdCardQueue { + sd_card: Arc>>, +} + +impl IQueue for SdCardQueue { + /// Returns the number of blocks on the SD card. + fn num_blocks(&self) -> usize { + self.sd_card.lock().block_count() as usize + } + + /// Returns the block size in bytes. + fn block_size(&self) -> usize { + self.sd_card.lock().block_size() as usize + } + + fn id(&self) -> usize { + 0 + } + + fn buff_config(&self) -> rdif_block::BuffConfig { + rdif_block::BuffConfig { + dma_mask: u64::MAX, + align: 0x1000, + size: self.block_size(), + } + } + + fn submit_request(&mut self, request: Request<'_>) -> Result { + let actual_block_id = request.block_id + OFFSET / 512; + + match request.kind { + rdif_block::RequestKind::Read(mut buffer) => { + trace!("read block {}", actual_block_id); + + Self::validate_buffer(&buffer)?; + + let (_, aligned_buf, _) = unsafe { buffer.align_to_mut::() }; + let mut temp_buf: Vec = Vec::with_capacity(aligned_buf.len()); + + self.sd_card + .lock() + .read_blocks(&mut temp_buf, actual_block_id as u32, 1) + .map_err(|err| map_mci_error_to_blk_error(err))?; + + let copy_len = cmp::min(temp_buf.len(), aligned_buf.len()); + aligned_buf[..copy_len].copy_from_slice(&temp_buf[..copy_len]); + + Ok(RequestId::new(0)) + } + rdif_block::RequestKind::Write(buffer) => { + trace!("write block {}", actual_block_id); + + Self::validate_buffer(&buffer)?; + + let (_, aligned_buf, _) = unsafe { buffer.align_to::() }; + let mut write_buf: Vec = aligned_buf.to_vec(); + + self.sd_card + .lock() + .write_blocks(&mut write_buf, actual_block_id as u32, 1) + .map_err(|err| map_mci_error_to_blk_error(err))?; + + Ok(RequestId::new(0)) + } + } + } + + fn poll_request( + &mut self, + _request: rdif_block::RequestId, + ) -> Result<(), rdif_block::BlkError> { + Ok(()) + } +} + +impl SdCardQueue { + fn validate_buffer(buffer: &[u8]) -> Result<(), BlkError> { + if buffer.len() < BLOCK_SIZE { + return Err(BlkError::Other(Box::new(BufferError::InvalidSize { + expected: BLOCK_SIZE, + actual: buffer.len(), + }))); + } + + let (prefix, _, suffix) = unsafe { buffer.align_to::() }; + if !prefix.is_empty() || !suffix.is_empty() { + return Err(BlkError::Other(Box::new(BufferError::InvalidAlignment))); + } + + Ok(()) + } +} + +#[derive(Debug)] +enum BufferError { + InvalidSize { expected: usize, actual: usize }, + InvalidAlignment, +} + +impl core::fmt::Display for BufferError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + BufferError::InvalidSize { expected, actual } => { + write!( + f, + "Invalid buffer size: expected at least {}, got {}", + expected, actual + ) + } + BufferError::InvalidAlignment => { + write!(f, "Buffer is not properly aligned for u32 access") + } + } + } +} + +impl core::error::Error for BufferError {} + +#[derive(Debug)] +struct MCIErrorWrapper(MCIHostError); + +impl core::fmt::Display for MCIErrorWrapper { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "MCI Host Error: {:?}", self.0) + } +} + +impl core::error::Error for MCIErrorWrapper {} + +// 错误映射函数 +fn map_mci_error_to_blk_error(err: MCIHostError) -> BlkError { + match err { + MCIHostError::Timeout => BlkError::Retry, + + MCIHostError::OutOfRange | MCIHostError::InvalidArgument => { + BlkError::Other(Box::new(MCIErrorWrapper(err))) + } + + MCIHostError::CardDetectFailed | MCIHostError::CardInitFailed => BlkError::NotSupported, + + MCIHostError::InvalidVoltage + | MCIHostError::SwitchVoltageFail + | MCIHostError::SwitchVoltage18VFail33VSuccess => BlkError::NotSupported, + + MCIHostError::TransferFailed + | MCIHostError::StopTransmissionFailed + | MCIHostError::WaitWriteCompleteFailed => BlkError::Retry, + + // 其他所有错误包装为Other + _ => BlkError::Other(Box::new(MCIErrorWrapper(err))), + } +} From 127a6acafa57f7712e6f48e44071647ab28fcf5c Mon Sep 17 00:00:00 2001 From: YanLien Date: Thu, 13 Nov 2025 15:45:22 +0800 Subject: [PATCH 2/4] refactor: simplify dependencies in Cargo.lock and update source references --- Cargo.lock | 99 ++++++++++-------------------------------------------- 1 file changed, 17 insertions(+), 82 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2ea481c9..4b6440ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -233,24 +233,7 @@ dependencies = [ "axaddrspace", "axdevice_base", "axerrno", - "axvcpu 0.1.2 (git+https://github.com/arceos-hypervisor/axvcpu.git?branch=next)", - "axvisor_api", - "log", - "numeric-enum-macro", - "percpu", - "spin 0.10.0", -] - -[[package]] -name = "arm_vcpu" -version = "0.1.1" -source = "git+https://github.com/arceos-hypervisor/arm_vcpu?branch=next#b24cc3635c049302ab8d58d3b54007bb5a053a96" -dependencies = [ - "aarch64-cpu", - "axaddrspace", - "axdevice_base", - "axerrno", - "axvcpu 0.1.2 (git+https://github.com/arceos-hypervisor/axvcpu.git?branch=next)", + "axvcpu", "axvisor_api", "log", "numeric-enum-macro", @@ -415,22 +398,6 @@ dependencies = [ "x86_64", ] -[[package]] -name = "axdevice" -version = "0.1.0" -dependencies = [ - "arm_vgic 0.1.0 (git+https://github.com/arceos-hypervisor/arm_vgic.git)", - "axaddrspace", - "axdevice_base", - "axerrno", - "axvmconfig", - "cfg-if", - "log", - "memory_addr", - "range-alloc", - "spin 0.9.8", -] - [[package]] name = "axdevice" version = "0.1.0" @@ -1078,17 +1045,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "axvcpu" -version = "0.1.2" -dependencies = [ - "axaddrspace", - "axerrno", - "axvisor_api", - "memory_addr", - "percpu", -] - [[package]] name = "axvcpu" version = "0.1.2" @@ -1110,13 +1066,13 @@ dependencies = [ "arm-gic-driver", "axaddrspace", "axconfig", - "axdevice 0.1.0 (git+https://github.com/arceos-hypervisor/axdevice.git)", + "axdevice", "axdevice_base", "axerrno", "axhvc", "axruntime", "axstd", - "axvcpu 0.1.2 (git+https://github.com/arceos-hypervisor/axvcpu.git?branch=next)", + "axvcpu", "axvisor_api", "axvm 0.1.0 (git+https://github.com/arceos-hypervisor/axvm.git?branch=next)", "bitflags 2.10.0", @@ -1174,38 +1130,13 @@ dependencies = [ name = "axvm" version = "0.1.0" dependencies = [ - "arm_vcpu 0.1.1 (git+https://github.com/arceos-hypervisor/arm_vcpu?branch=next)", - "arm_vgic 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "axaddrspace", - "axdevice 0.1.0 (git+https://github.com/arceos-hypervisor/axdevice.git)", - "axdevice_base", - "axerrno", - "axvcpu 0.1.2 (git+https://github.com/arceos-hypervisor/axvcpu.git?branch=next)", - "axvmconfig", - "cfg-if", - "cpumask", - "log", - "memory_addr", - "page_table_entry", - "page_table_multiarch", - "percpu", - "riscv_vcpu", - "spin 0.9.8", - "x86_vcpu", -] - -[[package]] -name = "axvm" -version = "0.1.0" -source = "git+https://github.com/arceos-hypervisor/axvm.git?branch=next#22437d2b8e853bfd576f77f00308380463bd2710" -dependencies = [ - "arm_vcpu 0.1.1 (git+https://github.com/arceos-hypervisor/arm_vcpu?branch=next)", + "arm_vcpu", "arm_vgic 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "axaddrspace", - "axdevice 0.1.0 (git+https://github.com/arceos-hypervisor/axdevice.git)", + "axdevice", "axdevice_base", "axerrno", - "axvcpu 0.1.2 (git+https://github.com/arceos-hypervisor/axvcpu.git?branch=next)", + "axvcpu", "axvmconfig", "cfg-if", "cpumask", @@ -1222,7 +1153,7 @@ dependencies = [ [[package]] name = "axvm" version = "0.1.0" -source = "git+https://github.com/arceos-hypervisor/axvm.git?branch=next#22437d2b8e853bfd576f77f00308380463bd2710" +source = "git+https://github.com/arceos-hypervisor/axvm.git?branch=next#0393f27dea948433e53285a400e356cdfd4c4fa3" dependencies = [ "arm_vcpu", "arm_vgic 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2371,7 +2302,8 @@ checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" [[package]] name = "fitimage" version = "0.1.0" -source = "git+https://github.com/ZR233/ostool.git#e9f6c62829f8aa232292dc9fe46de4174c734928" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2150e49b6ec3953ad97382c97356d81332a099de888bf822cab2d98660a000f8" dependencies = [ "anyhow", "byteorder", @@ -3142,7 +3074,8 @@ dependencies = [ [[package]] name = "jkconfig" version = "0.1.4" -source = "git+https://github.com/ZR233/ostool.git#e9f6c62829f8aa232292dc9fe46de4174c734928" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2e10a1f061b3e2b23f16bdc16961acf4f5e25534953153f999e28ad70ab7a09" dependencies = [ "anyhow", "axum", @@ -3740,7 +3673,8 @@ dependencies = [ [[package]] name = "ostool" version = "0.8.0" -source = "git+https://github.com/ZR233/ostool.git#e9f6c62829f8aa232292dc9fe46de4174c734928" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1f90223d51db91747d032ef49378e725a6e42a3d53da62c246abc73fe35cb7" dependencies = [ "anyhow", "byte-unit", @@ -4597,7 +4531,7 @@ checksum = "13f38f28fe6c02bb3ced43087c9667b23d18adf729becdc5adf1253f7df83904" dependencies = [ "axaddrspace", "axerrno", - "axvcpu 0.1.2 (git+https://github.com/arceos-hypervisor/axvcpu.git?branch=next)", + "axvcpu", "axvisor_api", "bit_field", "bitflags 2.10.0", @@ -5864,7 +5798,8 @@ dependencies = [ [[package]] name = "uboot-shell" version = "0.2.0" -source = "git+https://github.com/ZR233/ostool.git#e9f6c62829f8aa232292dc9fe46de4174c734928" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faee7e4264cfd306c84e88bebf5e708a11e206ced00d5f833950a9f1ccf30979" dependencies = [ "colored", "log", @@ -6610,7 +6545,7 @@ dependencies = [ "axaddrspace", "axdevice_base", "axerrno", - "axvcpu 0.1.2 (git+https://github.com/arceos-hypervisor/axvcpu.git?branch=next)", + "axvcpu", "axvisor_api", "bit_field", "bitflags 2.10.0", From a2895031dd7465a50c59fbaa6c3c04b28a3cff51 Mon Sep 17 00:00:00 2001 From: YanLien Date: Thu, 13 Nov 2025 17:41:25 +0800 Subject: [PATCH 3/4] feat: add success regex for Phytium Buildroot welcome message --- .github/workflows/uboot.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/uboot.toml b/.github/workflows/uboot.toml index c7f1d0df..5ed6e04b 100644 --- a/.github/workflows/uboot.toml +++ b/.github/workflows/uboot.toml @@ -12,6 +12,7 @@ success_regex = [ "Hello World!", "root@firefly:~#", "root@phytium-Ubuntu:~#", + "Welcome to Phytium Buildroot", ] [net] From 3eba80fbb57dff88b808835bbc5add8da2b89fcb Mon Sep 17 00:00:00 2001 From: YanLien Date: Thu, 13 Nov 2025 17:43:16 +0800 Subject: [PATCH 4/4] refactor: comment out unused roc-rk3568-pc board configurations in CI workflow --- .github/workflows/test.yml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 087e33ad..97181a42 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -19,16 +19,16 @@ jobs: vmconfigs: configs/vms/linux-aarch64-e2000-smp1.toml vmconfigs_name: Linux vmimage_name: phytiumpi_linux.tar.gz - - board: roc-rk3568-pc - vmconfigs: configs/vms/arceos-aarch64-rk3568-smp1.toml - vmconfigs_name: ArceOS - # Multiple image archive names separated by commas, for example, roc-rk3568-pc_arceos.tar.gz,roc-rk3568-pc_linux.tar.gz[,...] - vmimage_name: roc-rk3568-pc_arceos.tar.gz - - board: roc-rk3568-pc - vmconfigs: configs/vms/linux-aarch64-rk3568-smp1.toml - vmconfigs_name: Linux - # Multiple image archive names separated by commas, for example, roc-rk3568-pc_arceos.tar.gz,roc-rk3568-pc_linux.tar.gz[,...] - vmimage_name: roc-rk3568-pc_linux.tar.gz + # - board: roc-rk3568-pc + # vmconfigs: configs/vms/arceos-aarch64-rk3568-smp1.toml + # vmconfigs_name: ArceOS + # # Multiple image archive names separated by commas, for example, roc-rk3568-pc_arceos.tar.gz,roc-rk3568-pc_linux.tar.gz[,...] + # vmimage_name: roc-rk3568-pc_arceos.tar.gz + # - board: roc-rk3568-pc + # vmconfigs: configs/vms/linux-aarch64-rk3568-smp1.toml + # vmconfigs_name: Linux + # # Multiple image archive names separated by commas, for example, roc-rk3568-pc_arceos.tar.gz,roc-rk3568-pc_linux.tar.gz[,...] + # vmimage_name: roc-rk3568-pc_linux.tar.gz fail-fast: false runs-on: - self-hosted