From 9c467c65bf329c0423448879c7f6202354297a6a Mon Sep 17 00:00:00 2001 From: Tolga Karatas Date: Tue, 5 May 2026 15:33:40 +0300 Subject: [PATCH 1/5] fix: code quality, musl compatibility, and cross-platform module gating Source code changes (no CI/infrastructure): - Cross-platform module gating: storage/virtio keep tests portable, Linux-only modules gated with cfg(target_os = "linux") - Shared compat module (IoctlReq, SendPthreadT) for glibc/musl differences - All clippy lints resolved via cargo fix + cargo clippy --fix on Rust 1.95 - musl static build compatibility: SYS_renameat2 raw syscall, platform- correct ioctl types, Send wrapper for pthread_t - Fix _host_offset naming bug in balloon inflate (compile error on Linux) - Platform-conditional cast for libc::S_IFMT (u16 macOS, u32 Linux) - dead_code allow on modules with forward-declared upstream API - rustfmt applied with max_width=120 Verified: 0 clippy errors on Linux (rust:1.95) and macOS, 266+188 tests pass. --- crates/clone-init/Cargo.toml | 1 + crates/clone-init/src/main.rs | 39 +++-- crates/guest-agent/Cargo.toml | 1 + crates/guest-agent/src/main.rs | 213 ++++++++++++++++---------- src/boot/acpi.rs | 124 +++++++-------- src/boot/identity.rs | 23 +-- src/boot/measured.rs | 39 ++--- src/boot/mod.rs | 101 ++++++------ src/boot/template.rs | 85 +++++------ src/compat.rs | 31 ++++ src/control/daemon.rs | 44 +++--- src/control/jailer.rs | 42 ++--- src/control/metrics.rs | 123 +++++++++++---- src/control/mod.rs | 7 +- src/control/protocol.rs | 14 +- src/control/sync_server.rs | 112 +++++++------- src/main.rs | 158 ++++++++++++------- src/memory/balloon.rs | 7 +- src/memory/mod.rs | 44 ++++-- src/memory/overcommit.rs | 21 +-- src/migration/mod.rs | 134 ++++++++-------- src/net/mod.rs | 162 +++++++++++++------- src/pci/mod.rs | 7 +- src/pci/vfio.rs | 91 +++++------ src/rootfs.rs | 32 ++-- src/rootfs_create.rs | 52 +++---- src/storage/mod.rs | 51 +++---- src/storage/qcow2.rs | 99 ++++-------- src/virtio/balloon.rs | 53 +++---- src/virtio/block.rs | 48 +++--- src/virtio/fs.rs | 113 +++++++------- src/virtio/mmio.rs | 128 ++++++++-------- src/virtio/mod.rs | 9 +- src/virtio/net.rs | 173 ++++++++++----------- src/virtio/queue.rs | 51 +++---- src/virtio/vsock.rs | 114 +++++++++----- src/vmm/agent_listener.rs | 96 ++++++------ src/vmm/mod.rs | 270 ++++++++++++++++++--------------- src/vmm/serial.rs | 6 +- src/vmm/vcpu.rs | 206 +++++++++++++++---------- 40 files changed, 1664 insertions(+), 1460 deletions(-) create mode 100644 src/compat.rs diff --git a/crates/clone-init/Cargo.toml b/crates/clone-init/Cargo.toml index 2409e83..92dd986 100644 --- a/crates/clone-init/Cargo.toml +++ b/crates/clone-init/Cargo.toml @@ -2,6 +2,7 @@ name = "clone-init" version = "0.1.0" edition = "2021" +rust-version = "1.87" description = "Minimal init binary for Clone guest rootfs boot" [dependencies] diff --git a/crates/clone-init/src/main.rs b/crates/clone-init/src/main.rs index 5aab9ac..a4277a3 100644 --- a/crates/clone-init/src/main.rs +++ b/crates/clone-init/src/main.rs @@ -88,8 +88,7 @@ fn main() { // overlay.ko is always needed. vsock modules are only loaded if the agent // binary is embedded — on CoW fork snapshots, we skip vsock so the forked // VM starts with clean vsock state (no stale connections from the template). - let has_agent = Path::new("/usr/local/bin/clone-agent").exists() - || Path::new("/clone-agent").exists(); + let has_agent = Path::new("/usr/local/bin/clone-agent").exists() || Path::new("/clone-agent").exists(); if has_agent { for module in &[ @@ -113,7 +112,9 @@ fn main() { let overlay_mode = parse_param(&cmdline, "clone.overlay").unwrap_or("none"); let rootfs_type = parse_param(&cmdline, "clone.fstype").unwrap_or("auto"); - msg(&format!("[clone-init] rootfs={rootfs_mode} overlay={overlay_mode} fstype={rootfs_type}")); + msg(&format!( + "[clone-init] rootfs={rootfs_mode} overlay={overlay_mode} fstype={rootfs_type}" + )); // Wait for the root block device to appear let root_dev = "/dev/vda"; @@ -123,11 +124,7 @@ fn main() { mkdir("/mnt"); mkdir("/mnt/root"); - let mount_flags: u64 = if rootfs_mode == "ro" { - MS_RDONLY - } else { - 0 - }; + let mount_flags: u64 = if rootfs_mode == "ro" { MS_RDONLY } else { 0 }; if rootfs_type == "auto" { // Try common filesystems @@ -228,15 +225,21 @@ fn main() { let ptr = unsafe { libc::mmap( - std::ptr::null_mut(), 4096, libc::PROT_READ, libc::MAP_SHARED, - f.as_raw_fd(), page_offset as libc::off_t, + std::ptr::null_mut(), + 4096, + libc::PROT_READ, + libc::MAP_SHARED, + f.as_raw_fd(), + page_offset as libc::off_t, ) }; if ptr != libc::MAP_FAILED { let offset_in_page = (id_addr - page_offset) as usize; let data = unsafe { std::slice::from_raw_parts((ptr as *const u8).add(offset_in_page), 256) }; let _ = fs::write("/run/clone-identity", data); - unsafe { libc::munmap(ptr, 4096); } + unsafe { + libc::munmap(ptr, 4096); + } } } } @@ -259,12 +262,14 @@ fn main() { let c_path_env = CString::new("PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin").unwrap(); let c_home = CString::new("HOME=/root").unwrap(); let c_term = CString::new("TERM=linux").unwrap(); - let envp: [*const libc::c_char; 4] = [ - c_path_env.as_ptr(), c_home.as_ptr(), c_term.as_ptr(), std::ptr::null() - ]; + let envp: [*const libc::c_char; 4] = + [c_path_env.as_ptr(), c_home.as_ptr(), c_term.as_ptr(), std::ptr::null()]; libc::execve(c_path.as_ptr(), argv.as_ptr(), envp.as_ptr()); // If execve fails, write error marker and exit - let _ = fs::write("/tmp/clone-agent-exec-failed", format!("errno: {}", *libc::__errno_location())); + let _ = fs::write( + "/tmp/clone-agent-exec-failed", + format!("errno: {}", *libc::__errno_location()), + ); libc::_exit(1); } else if pid > 0 { // agent forked successfully @@ -335,7 +340,9 @@ fn do_mount(source: &str, target: &str, fstype: Option<&str>, flags: u64, data: c_target.as_ptr(), c_fstype.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()), flags, - c_data.as_ref().map_or(std::ptr::null(), |s| s.as_ptr() as *const libc::c_void), + c_data + .as_ref() + .map_or(std::ptr::null(), |s| s.as_ptr() as *const libc::c_void), ) }; ret == 0 diff --git a/crates/guest-agent/Cargo.toml b/crates/guest-agent/Cargo.toml index 9f91692..db0b914 100644 --- a/crates/guest-agent/Cargo.toml +++ b/crates/guest-agent/Cargo.toml @@ -2,6 +2,7 @@ name = "clone-agent" version = "0.1.0" edition = "2021" +rust-version = "1.87" description = "Clone guest agent — reports activity and memory pressure to VMM" [dependencies] diff --git a/crates/guest-agent/src/main.rs b/crates/guest-agent/src/main.rs index 06b6a6a..c191de0 100644 --- a/crates/guest-agent/src/main.rs +++ b/crates/guest-agent/src/main.rs @@ -43,10 +43,7 @@ enum AgentMessage { enum VmmMessage { Poll, Shutdown, - Exec { - command: String, - args: Vec, - }, + Exec { command: String, args: Vec }, } /// Runs on every reconnect (fork or not). Fixes state that drifts @@ -69,7 +66,8 @@ fn on_reconnect() { fn cleanup_after_fork() { // Restart D-Bus to clear stale sockets let _ = std::process::Command::new("systemctl") - .args(["restart", "dbus"]).output(); + .args(["restart", "dbus"]) + .output(); // Remove stale user session sockets let _ = std::fs::remove_file("/run/nologin"); if let Ok(entries) = std::fs::read_dir("/run/user") { @@ -81,13 +79,18 @@ fn cleanup_after_fork() { // Restart services that use Go runtime (crashes after fork due to // stale goroutine stacks). Latch is the main one. let _ = std::process::Command::new("systemctl") - .args(["restart", "latch"]).output(); + .args(["restart", "latch"]) + .output(); // Restart user latch service if lingering is enabled if let Ok(entries) = std::fs::read_dir("/home") { for entry in entries.flatten() { let user = entry.file_name(); let _ = std::process::Command::new("su") - .args(["-c", "systemctl --user restart latch 2>/dev/null", user.to_str().unwrap_or("shell")]) + .args([ + "-c", + "systemctl --user restart latch 2>/dev/null", + user.to_str().unwrap_or("shell"), + ]) .output(); } } @@ -110,11 +113,15 @@ fn read_identity_ip() -> Option { let iomem = std::fs::read_to_string("/proc/iomem").ok()?; let mut ram_end: u64 = 0; for line in iomem.lines() { - if !line.contains("System RAM") { continue; } + if !line.contains("System RAM") { + continue; + } let range = line.split(':').next()?.trim(); let end_str = range.split('-').nth(1)?.trim(); if let Ok(end) = u64::from_str_radix(end_str, 16) { - if end > ram_end { ram_end = end; } + if end > ram_end { + ram_end = end; + } } } @@ -138,9 +145,8 @@ fn read_identity_ip() -> Option { } // Last resort: /run/clone-identity - read_identity_from_file("/run/clone-identity").and_then(|_| - parse_identity_ip(&std::fs::read("/run/clone-identity").ok()?)) - + read_identity_from_file("/run/clone-identity") + .and_then(|_| parse_identity_ip(&std::fs::read("/run/clone-identity").ok()?)) } fn read_identity_from_file(path: &str) -> Option { @@ -157,30 +163,43 @@ fn read_identity_from_devmem(addr: u64) -> Option { let map_size = offset_in_page + 0x100; let ptr = unsafe { libc::mmap( - std::ptr::null_mut(), map_size, - libc::PROT_READ, libc::MAP_SHARED, - f.as_raw_fd(), aligned as libc::off_t, + std::ptr::null_mut(), + map_size, + libc::PROT_READ, + libc::MAP_SHARED, + f.as_raw_fd(), + aligned as libc::off_t, ) }; - if ptr == libc::MAP_FAILED { return None; } + if ptr == libc::MAP_FAILED { + return None; + } let data = unsafe { std::slice::from_raw_parts(ptr.add(offset_in_page) as *const u8, 0x6C) }; let result = parse_identity_ip(data); - unsafe { libc::munmap(ptr, map_size); } + unsafe { + libc::munmap(ptr, map_size); + } result } fn read_identity_mac() -> Option { // Try /run/clone-identity first, then /dev/mem - if let Some(mac) = read_identity_from_file("/run/clone-identity").and_then(|_| parse_identity_mac(&std::fs::read("/run/clone-identity").ok()?)) { + if let Some(mac) = read_identity_from_file("/run/clone-identity") + .and_then(|_| parse_identity_mac(&std::fs::read("/run/clone-identity").ok()?)) + { return Some(mac); } let iomem = std::fs::read_to_string("/proc/iomem").ok()?; for line in iomem.lines() { - if !line.contains("reserved") && !line.contains("Reserved") { continue; } + if !line.contains("reserved") && !line.contains("Reserved") { + continue; + } let range = line.split(':').next()?.trim(); let start_str = range.split('-').next()?.trim(); let start = u64::from_str_radix(start_str, 16).ok()?; - if start < 0x100000 { continue; } + if start < 0x100000 { + continue; + } if let Some(mac) = read_mac_from_devmem(start) { return Some(mac); } @@ -197,35 +216,56 @@ fn read_mac_from_devmem(addr: u64) -> Option { let map_size = offset_in_page + 0x100; let ptr = unsafe { libc::mmap( - std::ptr::null_mut(), map_size, - libc::PROT_READ, libc::MAP_SHARED, - f.as_raw_fd(), aligned as libc::off_t, + std::ptr::null_mut(), + map_size, + libc::PROT_READ, + libc::MAP_SHARED, + f.as_raw_fd(), + aligned as libc::off_t, ) }; - if ptr == libc::MAP_FAILED { return None; } + if ptr == libc::MAP_FAILED { + return None; + } let data = unsafe { std::slice::from_raw_parts(ptr.add(offset_in_page) as *const u8, 0x6C) }; let result = parse_identity_mac(data); - unsafe { libc::munmap(ptr, map_size); } + unsafe { + libc::munmap(ptr, map_size); + } result } fn parse_identity_mac(data: &[u8]) -> Option { - if data.len() < 0x66 { return None; } + if data.len() < 0x66 { + return None; + } let magic = u32::from_le_bytes([data[0], data[1], data[2], data[3]]); - if magic != 0x494D564E { return None; } // "NVMI" - // MAC at offset 0x060 (6 bytes) + if magic != 0x494D564E { + return None; + } // "NVMI" + // MAC at offset 0x060 (6 bytes) let mac = &data[0x60..0x66]; - if mac == [0, 0, 0, 0, 0, 0] { return None; } - Some(format!("{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}", - mac[0], mac[1], mac[2], mac[3], mac[4], mac[5])) + if mac == [0, 0, 0, 0, 0, 0] { + return None; + } + Some(format!( + "{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5] + )) } fn parse_identity_ip(data: &[u8]) -> Option { - if data.len() < 0x6C { return None; } + if data.len() < 0x6C { + return None; + } let magic = u32::from_le_bytes([data[0], data[1], data[2], data[3]]); - if magic != 0x494D564E { return None; } // "NVMI" + if magic != 0x494D564E { + return None; + } // "NVMI" let ip = &data[0x68..0x6C]; - if ip == [0, 0, 0, 0] { return None; } + if ip == [0, 0, 0, 0] { + return None; + } Some(format!("{}.{}.{}.{}", ip[0], ip[1], ip[2], ip[3])) } @@ -245,8 +285,7 @@ fn main() { // fork's new IP (injected before vCPUs resume), so reading it here // would make fork detection impossible. let cmdline = fs::read_to_string("/proc/cmdline").unwrap_or_default(); - let mut current_ip: Option = parse_cmdline_param(&cmdline, "clone.net_ip") - .or_else(|| read_identity_ip()); + let mut current_ip: Option = parse_cmdline_param(&cmdline, "clone.net_ip").or_else(|| read_identity_ip()); loop { let fd = match connect_vsock(VMADDR_CID_HOST, agent_port) { @@ -281,10 +320,15 @@ fn run_agent(fd: i32) { let heartbeat_interval = Duration::from_secs(2); // Set recv timeout to 50ms for fast exec response - let tv = libc::timeval { tv_sec: 0, tv_usec: 50_000 }; + let tv = libc::timeval { + tv_sec: 0, + tv_usec: 50_000, + }; unsafe { libc::setsockopt( - fd, libc::SOL_SOCKET, libc::SO_RCVTIMEO, + fd, + libc::SOL_SOCKET, + libc::SO_RCVTIMEO, &tv as *const libc::timeval as *const libc::c_void, std::mem::size_of::() as libc::socklen_t, ); @@ -298,14 +342,17 @@ fn run_agent(fd: i32) { let metrics = collect_metrics(); let active = is_active(&metrics, last_load); last_load = metrics.load_avg_1m; - let _ = send_message(fd, &AgentMessage::Heartbeat { - active, - load_avg_1m: metrics.load_avg_1m, - mem_pressure_pct: metrics.mem_pressure_pct, - mem_available_pct: metrics.mem_available_pct, - process_count: metrics.process_count, - uptime_secs: metrics.uptime_secs, - }); + let _ = send_message( + fd, + &AgentMessage::Heartbeat { + active, + load_avg_1m: metrics.load_avg_1m, + mem_pressure_pct: metrics.mem_pressure_pct, + mem_available_pct: metrics.mem_available_pct, + process_count: metrics.process_count, + uptime_secs: metrics.uptime_secs, + }, + ); last_heartbeat = std::time::Instant::now(); continue; } @@ -321,7 +368,10 @@ fn run_agent(fd: i32) { .env("USER", "root") .env("SHELL", "/bin/bash") .env("TERM", "xterm-256color") - .env("PATH", "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin") + .env( + "PATH", + "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin", + ) .stdout(std::process::Stdio::piped()) .stderr(std::process::Stdio::piped()) .spawn() @@ -391,14 +441,19 @@ fn run_agent(fd: i32) { let metrics = collect_metrics(); let active = is_active(&metrics, last_load); last_load = metrics.load_avg_1m; - if send_message(fd, &AgentMessage::Heartbeat { - active, - load_avg_1m: metrics.load_avg_1m, - mem_pressure_pct: metrics.mem_pressure_pct, - mem_available_pct: metrics.mem_available_pct, - process_count: metrics.process_count, - uptime_secs: metrics.uptime_secs, - }).is_err() { + if send_message( + fd, + &AgentMessage::Heartbeat { + active, + load_avg_1m: metrics.load_avg_1m, + mem_pressure_pct: metrics.mem_pressure_pct, + mem_available_pct: metrics.mem_available_pct, + process_count: metrics.process_count, + uptime_secs: metrics.uptime_secs, + }, + ) + .is_err() + { return; } last_heartbeat = std::time::Instant::now(); @@ -509,11 +564,19 @@ fn read_mem_available_pct() -> f64 { let mut available_kb: u64 = 0; for line in contents.lines() { if let Some(val) = line.strip_prefix("MemTotal:") { - total_kb = val.trim().split_whitespace().next() - .and_then(|v| v.parse().ok()).unwrap_or(0); + total_kb = val + .trim() + .split_whitespace() + .next() + .and_then(|v| v.parse().ok()) + .unwrap_or(0); } else if let Some(val) = line.strip_prefix("MemAvailable:") { - available_kb = val.trim().split_whitespace().next() - .and_then(|v| v.parse().ok()).unwrap_or(0); + available_kb = val + .trim() + .split_whitespace() + .next() + .and_then(|v| v.parse().ok()) + .unwrap_or(0); } } if total_kb == 0 { @@ -583,10 +646,15 @@ fn connect_vsock(cid: u32, port: u32) -> Option { addr.svm_port = port; // Fast connect timeout (vsock connects in microseconds) - let tv = libc::timeval { tv_sec: 0, tv_usec: 500_000 }; + let tv = libc::timeval { + tv_sec: 0, + tv_usec: 500_000, + }; unsafe { libc::setsockopt( - fd, libc::SOL_SOCKET, libc::SO_SNDTIMEO, + fd, + libc::SOL_SOCKET, + libc::SO_SNDTIMEO, &tv as *const libc::timeval as *const libc::c_void, std::mem::size_of::() as libc::socklen_t, ); @@ -620,15 +688,17 @@ fn send_message(fd: i32, msg: &AgentMessage) -> Result<(), ()> { buf.extend_from_slice(&len); buf.extend_from_slice(&json); - let mut pfd = libc::pollfd { fd, events: libc::POLLOUT, revents: 0 }; + let mut pfd = libc::pollfd { + fd, + events: libc::POLLOUT, + revents: 0, + }; let poll_ret = unsafe { libc::poll(&mut pfd, 1, 3000) }; if poll_ret <= 0 || (pfd.revents & (libc::POLLERR | libc::POLLHUP)) != 0 { return Err(()); } - let written = unsafe { - libc::write(fd, buf.as_ptr() as *const libc::c_void, buf.len()) - }; + let written = unsafe { libc::write(fd, buf.as_ptr() as *const libc::c_void, buf.len()) }; if written as usize == buf.len() { Ok(()) @@ -639,9 +709,7 @@ fn send_message(fd: i32, msg: &AgentMessage) -> Result<(), ()> { fn recv_message(fd: i32) -> Option { let mut len_buf = [0u8; 4]; - let n = unsafe { - libc::recv(fd, len_buf.as_mut_ptr() as *mut libc::c_void, 4, 0) - }; + let n = unsafe { libc::recv(fd, len_buf.as_mut_ptr() as *mut libc::c_void, 4, 0) }; if n != 4 { return None; } @@ -654,14 +722,7 @@ fn recv_message(fd: i32) -> Option { let mut body = vec![0u8; len]; let mut read = 0; while read < len { - let n = unsafe { - libc::recv( - fd, - body[read..].as_mut_ptr() as *mut libc::c_void, - len - read, - 0, - ) - }; + let n = unsafe { libc::recv(fd, body[read..].as_mut_ptr() as *mut libc::c_void, len - read, 0) }; if n <= 0 { return None; } diff --git a/src/boot/acpi.rs b/src/boot/acpi.rs index f3641d9..f4ffe4d 100644 --- a/src/boot/acpi.rs +++ b/src/boot/acpi.rs @@ -4,8 +4,8 @@ //! and IOAPIC. Without these, the kernel falls back to "virtual wire mode" //! and timer interrupts don't route properly, stalling the boot. -use anyhow::Result; use crate::memory::GuestMem; +use anyhow::Result; /// RSDP is placed at 0xE0000 (in the EBDA/ROM region the kernel scans). const RSDP_ADDR: u64 = 0x000E_0000; @@ -96,10 +96,10 @@ fn build_mcfg() -> Vec { // Allocation entry (16 bytes, starting at offset 44) let ecam_base: u64 = crate::pci::ECAM_BASE; mcfg[44..52].copy_from_slice(&ecam_base.to_le_bytes()); // Base address - mcfg[52..54].copy_from_slice(&0u16.to_le_bytes()); // PCI Segment Group - mcfg[54] = 0; // Start Bus Number - mcfg[55] = 0; // End Bus Number (only bus 0) - // 4 bytes reserved (56-59) — already zero + mcfg[52..54].copy_from_slice(&0u16.to_le_bytes()); // PCI Segment Group + mcfg[54] = 0; // Start Bus Number + mcfg[55] = 0; // End Bus Number (only bus 0) + // 4 bytes reserved (56-59) — already zero // Checksum (byte 9) let cksum: u8 = mcfg.iter().fold(0u8, |a, &b| a.wrapping_add(b)); @@ -109,7 +109,7 @@ fn build_mcfg() -> Vec { } /// Build RSDP v2 (Root System Description Pointer). -fn build_rsdp(xsdt_addr: u64, xsdt_len: u32) -> Vec { +fn build_rsdp(xsdt_addr: u64, _xsdt_len: u32) -> Vec { let mut rsdp = vec![0u8; 36]; // RSDP v2 = 36 bytes // Signature: "RSD PTR " (8 bytes) @@ -142,14 +142,14 @@ fn build_xsdt(table_addrs: &[u64]) -> Vec { let mut xsdt = vec![0u8; total_len as usize]; // Header - xsdt[0..4].copy_from_slice(b"XSDT"); // Signature - xsdt[4..8].copy_from_slice(&total_len.to_le_bytes()); // Length - xsdt[8] = 1; // Revision - xsdt[10..16].copy_from_slice(b"CLONE "); // OEM ID - xsdt[16..24].copy_from_slice(b"CLONE "); // OEM Table ID - xsdt[24..28].copy_from_slice(&1u32.to_le_bytes()); // OEM Revision - xsdt[28..32].copy_from_slice(b"NVM "); // Creator ID - xsdt[32..36].copy_from_slice(&1u32.to_le_bytes()); // Creator Revision + xsdt[0..4].copy_from_slice(b"XSDT"); // Signature + xsdt[4..8].copy_from_slice(&total_len.to_le_bytes()); // Length + xsdt[8] = 1; // Revision + xsdt[10..16].copy_from_slice(b"CLONE "); // OEM ID + xsdt[16..24].copy_from_slice(b"CLONE "); // OEM Table ID + xsdt[24..28].copy_from_slice(&1u32.to_le_bytes()); // OEM Revision + xsdt[28..32].copy_from_slice(b"NVM "); // Creator ID + xsdt[32..36].copy_from_slice(&1u32.to_le_bytes()); // Creator Revision // Table pointers for (i, &addr) in table_addrs.iter().enumerate() { @@ -175,14 +175,14 @@ fn build_fadt(dsdt_addr: u64) -> Vec { let mut fadt = vec![0u8; total_len as usize]; // Standard ACPI header (36 bytes) - fadt[0..4].copy_from_slice(b"FACP"); // Signature - fadt[4..8].copy_from_slice(&total_len.to_le_bytes()); // Length - fadt[8] = 6; // Revision (ACPI 6.0) - fadt[10..16].copy_from_slice(b"CLONE "); // OEM ID - fadt[16..24].copy_from_slice(b"CLONE "); // OEM Table ID - fadt[24..28].copy_from_slice(&1u32.to_le_bytes()); // OEM Revision - fadt[28..32].copy_from_slice(b"NVM "); // Creator ID - fadt[32..36].copy_from_slice(&1u32.to_le_bytes()); // Creator Revision + fadt[0..4].copy_from_slice(b"FACP"); // Signature + fadt[4..8].copy_from_slice(&total_len.to_le_bytes()); // Length + fadt[8] = 6; // Revision (ACPI 6.0) + fadt[10..16].copy_from_slice(b"CLONE "); // OEM ID + fadt[16..24].copy_from_slice(b"CLONE "); // OEM Table ID + fadt[24..28].copy_from_slice(&1u32.to_le_bytes()); // OEM Revision + fadt[28..32].copy_from_slice(b"NVM "); // Creator ID + fadt[32..36].copy_from_slice(&1u32.to_le_bytes()); // Creator Revision // DSDT address (legacy 32-bit field at offset 40) fadt[40..44].copy_from_slice(&(dsdt_addr as u32).to_le_bytes()); @@ -247,24 +247,24 @@ fn build_dsdt() -> Vec { // Total AML = 1(ScopeOp) + 1(PkgLen) + 5(body) = 7 bytes let aml: &[u8] = &[ // Scope (\_SB) - 0x10, // ScopeOp - 0x06, // PkgLength = 6 (1-byte encoding) - 0x5C, // RootChar '\' - 0x5F, 0x53, 0x42, 0x5F, // "_SB_" + 0x10, // ScopeOp + 0x06, // PkgLength = 6 (1-byte encoding) + 0x5C, // RootChar '\' + 0x5F, 0x53, 0x42, 0x5F, // "_SB_" ]; let header_len = 36; let total_len = (header_len + aml.len()) as u32; let mut dsdt = vec![0u8; total_len as usize]; - dsdt[0..4].copy_from_slice(b"DSDT"); // Signature - dsdt[4..8].copy_from_slice(&total_len.to_le_bytes()); // Length - dsdt[8] = 2; // Revision - dsdt[10..16].copy_from_slice(b"CLONE "); // OEM ID - dsdt[16..24].copy_from_slice(b"CLONE "); // OEM Table ID - dsdt[24..28].copy_from_slice(&1u32.to_le_bytes()); // OEM Revision - dsdt[28..32].copy_from_slice(b"NVM "); // Creator ID - dsdt[32..36].copy_from_slice(&1u32.to_le_bytes()); // Creator Revision + dsdt[0..4].copy_from_slice(b"DSDT"); // Signature + dsdt[4..8].copy_from_slice(&total_len.to_le_bytes()); // Length + dsdt[8] = 2; // Revision + dsdt[10..16].copy_from_slice(b"CLONE "); // OEM ID + dsdt[16..24].copy_from_slice(b"CLONE "); // OEM Table ID + dsdt[24..28].copy_from_slice(&1u32.to_le_bytes()); // OEM Revision + dsdt[28..32].copy_from_slice(b"NVM "); // Creator ID + dsdt[32..36].copy_from_slice(&1u32.to_le_bytes()); // Creator Revision // Copy AML body after header dsdt[header_len..].copy_from_slice(aml); @@ -302,14 +302,14 @@ fn build_madt(num_cpus: u32) -> Vec { let mut madt = vec![0u8; total_len]; // Standard ACPI header - madt[0..4].copy_from_slice(b"APIC"); // Signature + madt[0..4].copy_from_slice(b"APIC"); // Signature madt[4..8].copy_from_slice(&(total_len as u32).to_le_bytes()); // Length - madt[8] = 4; // Revision (ACPI 6.0) - madt[10..16].copy_from_slice(b"CLONE "); // OEM ID - madt[16..24].copy_from_slice(b"CLONE "); // OEM Table ID - madt[24..28].copy_from_slice(&1u32.to_le_bytes()); // OEM Revision - madt[28..32].copy_from_slice(b"NVM "); // Creator ID - madt[32..36].copy_from_slice(&1u32.to_le_bytes()); // Creator Revision + madt[8] = 4; // Revision (ACPI 6.0) + madt[10..16].copy_from_slice(b"CLONE "); // OEM ID + madt[16..24].copy_from_slice(b"CLONE "); // OEM Table ID + madt[24..28].copy_from_slice(&1u32.to_le_bytes()); // OEM Revision + madt[28..32].copy_from_slice(b"NVM "); // Creator ID + madt[32..36].copy_from_slice(&1u32.to_le_bytes()); // Creator Revision // Local Interrupt Controller Address (offset 36) madt[36..40].copy_from_slice(&LAPIC_DEFAULT_ADDR.to_le_bytes()); @@ -320,21 +320,21 @@ fn build_madt(num_cpus: u32) -> Vec { // Local APIC entries (type 0, length 8) for i in 0..num_cpus { - madt[offset] = 0; // Type: Processor Local APIC - madt[offset + 1] = 8; // Length + madt[offset] = 0; // Type: Processor Local APIC + madt[offset + 1] = 8; // Length madt[offset + 2] = i as u8; // ACPI Processor UID madt[offset + 3] = i as u8; // APIC ID - // Flags: bit 0 = Enabled + // Flags: bit 0 = Enabled madt[offset + 4..offset + 8].copy_from_slice(&1u32.to_le_bytes()); offset += 8; } // I/O APIC entry (type 1, length 12) - madt[offset] = 1; // Type: I/O APIC - madt[offset + 1] = 12; // Length - madt[offset + 2] = 0; // I/O APIC ID - madt[offset + 3] = 0; // Reserved - // I/O APIC Address + madt[offset] = 1; // Type: I/O APIC + madt[offset + 1] = 12; // Length + madt[offset + 2] = 0; // I/O APIC ID + madt[offset + 3] = 0; // Reserved + // I/O APIC Address madt[offset + 4..offset + 8].copy_from_slice(&IOAPIC_DEFAULT_ADDR.to_le_bytes()); // Global System Interrupt Base madt[offset + 8..offset + 12].copy_from_slice(&0u32.to_le_bytes()); @@ -366,11 +366,11 @@ fn build_madt(num_cpus: u32) -> Vec { // Local APIC NMI entry (type 4, length 6) // All processors, LINT1 = NMI, flags = 0 (conforms) - madt[offset] = 4; // Type: Local APIC NMI - madt[offset + 1] = 6; // Length + madt[offset] = 4; // Type: Local APIC NMI + madt[offset + 1] = 6; // Length madt[offset + 2] = 0xFF; // ACPI Processor UID (0xFF = all processors) madt[offset + 3..offset + 5].copy_from_slice(&0u16.to_le_bytes()); // Flags - madt[offset + 5] = 1; // Local APIC LINT# (1 = LINT1 for NMI) + madt[offset + 5] = 1; // Local APIC LINT# (1 = LINT1 for NMI) // Checksum (byte 9) let cksum: u8 = madt.iter().fold(0u8, |a, &b| a.wrapping_add(b)); @@ -381,10 +381,10 @@ fn build_madt(num_cpus: u32) -> Vec { /// Write an Interrupt Source Override entry at the given offset. fn write_iso(madt: &mut [u8], offset: usize, bus: u8, source: u8, gsi: u32, flags: u16) { - madt[offset] = 2; // Type: Interrupt Source Override - madt[offset + 1] = 10; // Length - madt[offset + 2] = bus; // Bus (0 = ISA) - madt[offset + 3] = source; // Source (ISA IRQ) + madt[offset] = 2; // Type: Interrupt Source Override + madt[offset + 1] = 10; // Length + madt[offset + 2] = bus; // Bus (0 = ISA) + madt[offset + 3] = source; // Source (ISA IRQ) madt[offset + 4..offset + 8].copy_from_slice(&gsi.to_le_bytes()); // Global System Interrupt madt[offset + 8..offset + 10].copy_from_slice(&flags.to_le_bytes()); // Flags } @@ -476,9 +476,9 @@ mod tests { // 44 + 4*8 + 12 + 50 + 6 = 144 assert_eq!(madt.len(), 144); // Check CPU 3's LAPIC entry (at offset 44 + 3*8 = 68) - assert_eq!(madt[68], 0); // type - assert_eq!(madt[70], 3); // processor UID - assert_eq!(madt[71], 3); // APIC ID + assert_eq!(madt[68], 0); // type + assert_eq!(madt[70], 3); // processor UID + assert_eq!(madt[71], 3); // APIC ID } #[test] @@ -543,7 +543,11 @@ mod tests { fn test_fadt_no_hw_reduced_flag() { let fadt = build_fadt(0x1000); let flags = u32::from_le_bytes(fadt[112..116].try_into().unwrap()); - assert_eq!(flags & (1 << 20), 0, "HW_REDUCED_ACPI flag must NOT be set (breaks IOAPIC)"); + assert_eq!( + flags & (1 << 20), + 0, + "HW_REDUCED_ACPI flag must NOT be set (breaks IOAPIC)" + ); } #[test] diff --git a/src/boot/identity.rs b/src/boot/identity.rs index 0c84837..1d8c913 100644 --- a/src/boot/identity.rs +++ b/src/boot/identity.rs @@ -87,8 +87,7 @@ impl VmIdentity { let b = &self.vm_id; format!( "{:02x}{:02x}{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}", - b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7], - b[8], b[9], b[10], b[11], b[12], b[13], b[14], b[15], + b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7], b[8], b[9], b[10], b[11], b[12], b[13], b[14], b[15], ) } @@ -144,8 +143,7 @@ pub fn generate_identity() -> Result { // Read random bytes for UUID, MAC, entropy seed, and CID // Total: 16 (uuid) + 5 (mac random bytes) + 32 (entropy) + 8 (cid randomness) = 61 bytes let mut random_bytes = [0u8; 61]; - let mut urandom = std::fs::File::open("/dev/urandom") - .context("Failed to open /dev/urandom")?; + let mut urandom = std::fs::File::open("/dev/urandom").context("Failed to open /dev/urandom")?; urandom .read_exact(&mut random_bytes) .context("Failed to read from /dev/urandom")?; @@ -173,10 +171,7 @@ pub fn generate_identity() -> Result { vsock_cid = (vsock_cid % (u32::MAX as u64 - 3)) + 3; // Hostname derived from first 4 bytes of UUID - let hostname = format!( - "clone-{:02x}{:02x}{:02x}{:02x}", - vm_id[0], vm_id[1], vm_id[2], vm_id[3], - ); + let hostname = format!("clone-{:02x}{:02x}{:02x}{:02x}", vm_id[0], vm_id[1], vm_id[2], vm_id[3],); let identity = VmIdentity { vm_id, @@ -232,8 +227,7 @@ mod tests { fn test_identity_page_layout() { let identity = VmIdentity { vm_id: [ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x47, 0x08, 0x89, 0x0A, 0x0B, 0x0C, 0x0D, - 0x0E, 0x0F, 0x10, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x47, 0x08, 0x89, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, ], hostname: "test-vm".to_string(), vsock_cid: 42, @@ -331,11 +325,7 @@ mod tests { for _ in 0..10 { let id = generate_identity().unwrap(); assert!(id.vsock_cid >= 3, "CID {} is below 3", id.vsock_cid); - assert!( - id.vsock_cid < u32::MAX as u64, - "CID {} exceeds u32::MAX", - id.vsock_cid - ); + assert!(id.vsock_cid < u32::MAX as u64, "CID {} exceeds u32::MAX", id.vsock_cid); } } @@ -393,8 +383,7 @@ mod tests { fn test_vm_id_string_format() { let identity = VmIdentity { vm_id: [ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x47, 0x08, - 0x89, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x47, 0x08, 0x89, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, ], hostname: "test".to_string(), vsock_cid: 3, diff --git a/src/boot/measured.rs b/src/boot/measured.rs index 33c4680..b4ea906 100644 --- a/src/boot/measured.rs +++ b/src/boot/measured.rs @@ -7,7 +7,6 @@ use anyhow::{Context, Result}; use sha2::{Digest, Sha256}; use std::collections::HashMap; -use std::path::Path; /// A kernel verifier that checks kernel binaries against expected SHA-256 hashes. #[derive(Debug)] @@ -26,10 +25,7 @@ impl KernelVerifier { pub fn from_hex(hex: &str) -> Result { let bytes = hex_decode(hex).context("Invalid hex string for kernel hash")?; if bytes.len() != 32 { - anyhow::bail!( - "Expected 32-byte SHA-256 hash, got {} bytes", - bytes.len() - ); + anyhow::bail!("Expected 32-byte SHA-256 hash, got {} bytes", bytes.len()); } let mut hash = [0u8; 32]; hash.copy_from_slice(&bytes); @@ -39,8 +35,7 @@ impl KernelVerifier { /// Read the kernel file, compute its SHA-256 hash, and verify it matches /// the expected hash. Returns the kernel bytes on success. pub fn verify_kernel(&self, path: &str) -> Result> { - let kernel_data = std::fs::read(path) - .with_context(|| format!("Failed to read kernel: {path}"))?; + let kernel_data = std::fs::read(path).with_context(|| format!("Failed to read kernel: {path}"))?; let actual_hash = compute_sha256(&kernel_data); @@ -52,11 +47,7 @@ impl KernelVerifier { ); } - tracing::info!( - "Kernel verified: {} (SHA-256: {})", - path, - hex_encode(&actual_hash), - ); + tracing::info!("Kernel verified: {} (SHA-256: {})", path, hex_encode(&actual_hash),); Ok(kernel_data) } @@ -93,11 +84,10 @@ pub struct TrustedManifest { /// The manifest is a JSON file containing a map of kernel names to their /// expected SHA-256 hashes, plus a signature field for future verification. pub fn load_trusted_hashes(manifest_path: &str) -> Result { - let data = std::fs::read_to_string(manifest_path) - .with_context(|| format!("Failed to read manifest: {manifest_path}"))?; + let data = + std::fs::read_to_string(manifest_path).with_context(|| format!("Failed to read manifest: {manifest_path}"))?; - let manifest: TrustedManifest = - serde_json::from_str(&data).context("Failed to parse kernel manifest JSON")?; + let manifest: TrustedManifest = serde_json::from_str(&data).context("Failed to parse kernel manifest JSON")?; if manifest.hashes.is_empty() { anyhow::bail!("Kernel manifest contains no hashes"); @@ -144,15 +134,12 @@ fn hex_encode(bytes: &[u8]) -> String { /// Decode a hex string into bytes. fn hex_decode(hex: &str) -> Result> { - if hex.len() % 2 != 0 { + if !hex.len().is_multiple_of(2) { anyhow::bail!("Hex string has odd length"); } (0..hex.len()) .step_by(2) - .map(|i| { - u8::from_str_radix(&hex[i..i + 2], 16) - .with_context(|| format!("Invalid hex at position {i}")) - }) + .map(|i| u8::from_str_radix(&hex[i..i + 2], 16).with_context(|| format!("Invalid hex at position {i}"))) .collect() } @@ -164,10 +151,7 @@ mod tests { fn test_compute_sha256() { let hash = compute_sha256(b"hello world"); let hex = hex_encode(&hash); - assert_eq!( - hex, - "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9" - ); + assert_eq!(hex, "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"); } #[test] @@ -286,10 +270,7 @@ mod tests { let hash = compute_sha256(b""); let hex = hex_encode(&hash); // Known SHA-256 of empty string - assert_eq!( - hex, - "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - ); + assert_eq!(hex, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); } #[test] diff --git a/src/boot/mod.rs b/src/boot/mod.rs index 96fd5ea..9811871 100644 --- a/src/boot/mod.rs +++ b/src/boot/mod.rs @@ -43,14 +43,9 @@ pub fn load_kernel_with_pci( // Load kernel via mmap + MADV_SEQUENTIAL|WILLNEED for async readahead. // Pages fault in while we set up ACPI/page tables in parallel, saving ~50ms // vs synchronous read() for a ~14MB kernel. - let kernel_data = mmap_kernel(kernel_path) - .with_context(|| format!("Failed to mmap kernel: {kernel_path}"))?; + let kernel_data = mmap_kernel(kernel_path).with_context(|| format!("Failed to mmap kernel: {kernel_path}"))?; - tracing::info!( - "Loading kernel: {} ({} bytes)", - kernel_path, - kernel_data.len() - ); + tracing::info!("Loading kernel: {} ({} bytes)", kernel_path, kernel_data.len()); // Write kernel command line to guest memory let cmdline_bytes = cmdline.as_bytes(); @@ -104,7 +99,7 @@ const E820_RESERVED: u32 = 2; /// Boot params offsets (struct boot_params from Linux arch/x86/include/uapi/asm/bootparam.h). const BP_E820_ENTRIES: u64 = 0x1E8; // offset of e820_entries count (u8) -const BP_E820_TABLE: u64 = 0x2D0; // offset of e820_table array +const BP_E820_TABLE: u64 = 0x2D0; // offset of e820_table array const BP_HEAP_END_PTR: u64 = 0x224; // heap_end_ptr const BP_CMD_LINE_PTR: u64 = 0x228; // cmd_line_ptr @@ -116,7 +111,10 @@ fn load_bzimage(mem: &GuestMem, data: &[u8], _cmdline: &str, ram_size: u64) -> R let kernel_offset = setup_size; if kernel_offset >= data.len() { - anyhow::bail!("Invalid bzImage: setup_size ({setup_size}) >= file size ({})", data.len()); + anyhow::bail!( + "Invalid bzImage: setup_size ({setup_size}) >= file size ({})", + data.len() + ); } // Zero the boot_params area first, then copy setup header into it. @@ -130,10 +128,7 @@ fn load_bzimage(mem: &GuestMem, data: &[u8], _cmdline: &str, ram_size: u64) -> R let header_start = 0x1F1usize; let header_end = setup_size.min(data.len()).min(4096); if header_end > header_start { - mem.write_at( - BOOT_PARAMS_ADDR + header_start as u64, - &data[header_start..header_end], - )?; + mem.write_at(BOOT_PARAMS_ADDR + header_start as u64, &data[header_start..header_end])?; } // Patch boot_params with our cmdline pointer @@ -164,31 +159,67 @@ fn load_bzimage(mem: &GuestMem, data: &[u8], _cmdline: &str, ram_size: u64) -> R let mut e820: Vec = Vec::with_capacity(9); // Usable RAM below 640K - e820.push(E820Entry { addr: 0, size: 0x9FC00, type_: E820_RAM }); + e820.push(E820Entry { + addr: 0, + size: 0x9FC00, + type_: E820_RAM, + }); // Reserved: EBDA - e820.push(E820Entry { addr: 0x9FC00, size: 0x400, type_: E820_RESERVED }); + e820.push(E820Entry { + addr: 0x9FC00, + size: 0x400, + type_: E820_RESERVED, + }); // Reserved: BIOS ROM - e820.push(E820Entry { addr: 0xF0000, size: 0x10000, type_: E820_RESERVED }); + e820.push(E820Entry { + addr: 0xF0000, + size: 0x10000, + type_: E820_RESERVED, + }); if ram_size <= mmio_hole_start { // Small VM: single RAM region let reserved_top: u64 = 0x20000; let ram_end = ram_size - reserved_top; - e820.push(E820Entry { addr: 0x100000, size: ram_end - 0x100000, type_: E820_RAM }); - e820.push(E820Entry { addr: ram_end, size: reserved_top, type_: E820_RESERVED }); + e820.push(E820Entry { + addr: 0x100000, + size: ram_end - 0x100000, + type_: E820_RAM, + }); + e820.push(E820Entry { + addr: ram_end, + size: reserved_top, + type_: E820_RESERVED, + }); } else { // Large VM: split RAM around the MMIO hole // Region below hole: 1MB to 3GB - e820.push(E820Entry { addr: 0x100000, size: mmio_hole_start - 0x100000, type_: E820_RAM }); + e820.push(E820Entry { + addr: 0x100000, + size: mmio_hole_start - 0x100000, + type_: E820_RAM, + }); // Region above hole: 4GB to 4GB + overflow let above_hole = ram_size - mmio_hole_start; - e820.push(E820Entry { addr: mmio_hole_end, size: above_hole, type_: E820_RAM }); + e820.push(E820Entry { + addr: mmio_hole_end, + size: above_hole, + type_: E820_RAM, + }); } // Reserved: IOAPIC/LAPIC MMIO - e820.push(E820Entry { addr: 0xFEFFC000, size: 0x4000, type_: E820_RESERVED }); + e820.push(E820Entry { + addr: 0xFEFFC000, + size: 0x4000, + type_: E820_RESERVED, + }); // Reserved: High BIOS ROM - e820.push(E820Entry { addr: 0xFFFC0000, size: 0x40000, type_: E820_RESERVED }); + e820.push(E820Entry { + addr: 0xFFFC0000, + size: 0x40000, + type_: E820_RESERVED, + }); let e820_entries = &e820; @@ -261,28 +292,17 @@ fn load_elf(mem: &GuestMem, data: &[u8]) -> Result { fn mmap_kernel(path: &str) -> Result> { use std::os::unix::io::AsRawFd; - let file = std::fs::File::open(path) - .with_context(|| format!("Failed to open kernel: {path}"))?; + let file = std::fs::File::open(path).with_context(|| format!("Failed to open kernel: {path}"))?; let len = file.metadata()?.len() as usize; if len == 0 { anyhow::bail!("Kernel file is empty: {path}"); } let fd = file.as_raw_fd(); - let ptr = unsafe { - libc::mmap( - std::ptr::null_mut(), - len, - libc::PROT_READ, - libc::MAP_PRIVATE, - fd, - 0, - ) - }; + let ptr = unsafe { libc::mmap(std::ptr::null_mut(), len, libc::PROT_READ, libc::MAP_PRIVATE, fd, 0) }; if ptr == libc::MAP_FAILED { // Fall back to regular read - return std::fs::read(path) - .with_context(|| format!("Failed to read kernel: {path}")); + return std::fs::read(path).with_context(|| format!("Failed to read kernel: {path}")); } // Advise sequential access + willneed — triggers async readahead @@ -303,16 +323,11 @@ fn mmap_kernel(path: &str) -> Result> { /// Load initrd into guest memory at a high address. fn load_initrd(mem: &GuestMem, path: &str) -> Result<()> { - let initrd_data = std::fs::read(path) - .with_context(|| format!("Failed to read initrd: {path}"))?; + let initrd_data = std::fs::read(path).with_context(|| format!("Failed to read initrd: {path}"))?; // Place initrd at a high address below the MMIO hole (or below memory top for small VMs). // For large VMs with a hole, place it below 3GB to keep it in the first memory slot. - let initrd_top = if mem.has_hole() { - mem.hole_start() - } else { - mem.size() - }; + let initrd_top = if mem.has_hole() { mem.hole_start() } else { mem.size() }; let initrd_addr = initrd_top - initrd_data.len() as u64; let initrd_addr = initrd_addr & !0xFFF; // page-align down diff --git a/src/boot/template.rs b/src/boot/template.rs index a27c157..a2644ca 100644 --- a/src/boot/template.rs +++ b/src/boot/template.rs @@ -48,17 +48,24 @@ pub struct VcpuState { impl VcpuState { pub fn empty() -> Self { Self { - regs: Vec::new(), sregs: Vec::new(), lapic: Vec::new(), - fpu: Vec::new(), xsave: Vec::new(), xcrs: Vec::new(), - mp_state: Vec::new(), vcpu_events: Vec::new(), - debug_regs: Vec::new(), msrs: Vec::new(), tsc_khz: 0, + regs: Vec::new(), + sregs: Vec::new(), + lapic: Vec::new(), + fpu: Vec::new(), + xsave: Vec::new(), + xcrs: Vec::new(), + mp_state: Vec::new(), + vcpu_events: Vec::new(), + debug_regs: Vec::new(), + msrs: Vec::new(), + tsc_khz: 0, cpuid: Vec::new(), } } } /// Serialized device state for template restoration. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Default)] pub struct DeviceStates { /// Serial port state (if any). pub serial: Option>, @@ -75,18 +82,6 @@ pub struct DeviceStates { pub pit: Vec, } -impl Default for DeviceStates { - fn default() -> Self { - Self { - serial: None, - virtio_configs: HashMap::new(), - transports: Vec::new(), - irqchip: Vec::new(), - pit: Vec::new(), - } - } -} - /// A template snapshot capturing a VM's full state for CoW forking. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct TemplateSnapshot { @@ -128,22 +123,24 @@ impl TemplateSnapshot { // Verify the memory file exists if !snapshot.memory_file.exists() { - anyhow::bail!( - "Template memory file not found: {}", - snapshot.memory_file.display() - ); + anyhow::bail!("Template memory file not found: {}", snapshot.memory_file.display()); } // Verify memory file integrity if verify { - let mem_data = std::fs::read(&snapshot.memory_file) - .with_context(|| format!("Failed to read template memory for verification: {}", snapshot.memory_file.display()))?; + let mem_data = std::fs::read(&snapshot.memory_file).with_context(|| { + format!( + "Failed to read template memory for verification: {}", + snapshot.memory_file.display() + ) + })?; let actual_hash = crate::boot::measured::compute_sha256(&mem_data); let actual_hex: String = actual_hash.iter().map(|b| format!("{b:02x}")).collect(); if actual_hex != snapshot.memory_hash { anyhow::bail!( "Template integrity check failed: expected {}, got {}", - snapshot.memory_hash, actual_hex + snapshot.memory_hash, + actual_hex ); } tracing::info!("Template integrity verified (SHA-256 matches)"); @@ -165,8 +162,7 @@ impl TemplateSnapshot { .with_context(|| format!("Failed to create template dir: {template_dir}"))?; let meta_path = Path::new(template_dir).join(TEMPLATE_METADATA_FILE); - let json = serde_json::to_string_pretty(self) - .context("Failed to serialize template metadata")?; + let json = serde_json::to_string_pretty(self).context("Failed to serialize template metadata")?; std::fs::write(&meta_path, json) .with_context(|| format!("Failed to write template metadata: {}", meta_path.display()))?; @@ -197,8 +193,7 @@ pub fn save_template( let memory_size = guest_mem.size(); let mem_file_path = Path::new(output_dir).join("memory.raw"); - std::fs::create_dir_all(output_dir) - .with_context(|| format!("Failed to create output dir: {output_dir}"))?; + std::fs::create_dir_all(output_dir).with_context(|| format!("Failed to create output dir: {output_dir}"))?; // Dump raw guest memory to file let mem_data = guest_mem.read_at(0, memory_size as usize)?; @@ -245,12 +240,8 @@ pub fn save_template( pub fn fork_from_template(template: &TemplateSnapshot) -> Result { use std::os::unix::io::AsRawFd; - let mem_file = std::fs::File::open(&template.memory_file).with_context(|| { - format!( - "Failed to open template memory: {}", - template.memory_file.display() - ) - })?; + let mem_file = std::fs::File::open(&template.memory_file) + .with_context(|| format!("Failed to open template memory: {}", template.memory_file.display()))?; let fd = mem_file.as_raw_fd(); let size = template.memory_size as usize; @@ -294,7 +285,10 @@ pub fn fork_from_template(template: &TemplateSnapshot) -> Result mmio_hole_start { Ok(crate::memory::GuestMem::from_raw_with_hole( - ptr as *mut u8, template.memory_size, mmio_hole_start, mmio_hole_end, + ptr as *mut u8, + template.memory_size, + mmio_hole_start, + mmio_hole_end, )) } else { Ok(crate::memory::GuestMem::from_raw(ptr as *mut u8, template.memory_size)) @@ -326,12 +320,10 @@ const INCREMENTAL_METADATA_FILE: &str = "incremental.json"; impl IncrementalSnapshot { /// Save incremental snapshot metadata. pub fn save_metadata(&self, output_dir: &str) -> Result<()> { - std::fs::create_dir_all(output_dir) - .with_context(|| format!("Failed to create output dir: {output_dir}"))?; + std::fs::create_dir_all(output_dir).with_context(|| format!("Failed to create output dir: {output_dir}"))?; let meta_path = Path::new(output_dir).join(INCREMENTAL_METADATA_FILE); - let json = serde_json::to_string_pretty(self) - .context("Failed to serialize incremental snapshot metadata")?; + let json = serde_json::to_string_pretty(self).context("Failed to serialize incremental snapshot metadata")?; std::fs::write(&meta_path, json) .with_context(|| format!("Failed to write metadata: {}", meta_path.display()))?; @@ -344,8 +336,8 @@ impl IncrementalSnapshot { let meta_path = Path::new(snapshot_dir).join(INCREMENTAL_METADATA_FILE); let meta_data = std::fs::read_to_string(&meta_path) .with_context(|| format!("Failed to read incremental metadata: {}", meta_path.display()))?; - let snapshot: IncrementalSnapshot = serde_json::from_str(&meta_data) - .context("Failed to parse incremental snapshot metadata")?; + let snapshot: IncrementalSnapshot = + serde_json::from_str(&meta_data).context("Failed to parse incremental snapshot metadata")?; Ok(snapshot) } } @@ -372,8 +364,7 @@ pub fn save_incremental( let (bitmap, dirty_data) = tracker.collect_dirty_pages(vm_fd, guest_mem.as_ptr() as *const u8, mem_size)?; - std::fs::create_dir_all(output_dir) - .with_context(|| format!("Failed to create output dir: {output_dir}"))?; + std::fs::create_dir_all(output_dir).with_context(|| format!("Failed to create output dir: {output_dir}"))?; let dirty_file = Path::new(output_dir).join("dirty_pages.raw"); std::fs::write(&dirty_file, &dirty_data) @@ -428,12 +419,8 @@ impl TemplatePool { pub fn get_or_load(&mut self, runtime_type: &str) -> Result<&TemplateSnapshot> { if !self.templates.contains_key(runtime_type) { let template_dir = self.base_dir.join(runtime_type); - let template = TemplateSnapshot::load( - template_dir - .to_str() - .context("Invalid template directory path")?, - true, - )?; + let template = + TemplateSnapshot::load(template_dir.to_str().context("Invalid template directory path")?, true)?; self.templates.insert(runtime_type.to_string(), template); } Ok(self.templates.get(runtime_type).unwrap()) diff --git a/src/compat.rs b/src/compat.rs new file mode 100644 index 0000000..6b35a4a --- /dev/null +++ b/src/compat.rs @@ -0,0 +1,31 @@ +//! Shared compatibility helpers for cross-libc (glibc vs musl) builds. + +/// Wrapper for `libc::pthread_t` to implement `Send`. +/// +/// On musl, `pthread_t` is `*mut c_void` which doesn't implement `Send`. +/// This wrapper allows safely sending thread IDs through channels. +pub(crate) struct SendPthreadT(pub libc::pthread_t); +unsafe impl Send for SendPthreadT {} + +/// Platform-correct ioctl request type. +/// glibc uses `c_ulong`, musl uses `c_int`. +#[cfg(target_env = "gnu")] +pub(crate) type IoctlReq = libc::c_ulong; +#[cfg(not(target_env = "gnu"))] +pub(crate) type IoctlReq = libc::c_int; + +/// Convert a u32 ioctl number to the platform's IoctlReq type. +/// On musl (c_int), this reinterprets the bits (safe for ioctl numbers +/// where the high bit is set by _IOW/_IOR macros). +#[inline(always)] +#[allow(overflowing_literals)] +pub(crate) fn ioctl_req(n: u32) -> IoctlReq { + #[cfg(target_env = "gnu")] + { + n as IoctlReq + } + #[cfg(not(target_env = "gnu"))] + { + n as i32 + } +} diff --git a/src/control/daemon.rs b/src/control/daemon.rs index eeefa2a..5a6294e 100644 --- a/src/control/daemon.rs +++ b/src/control/daemon.rs @@ -5,12 +5,13 @@ //! - DestroyVm sends Shutdown to the per-VM control socket //! - Monitors child processes for unexpected exits -use std::sync::Arc; use anyhow::Result; +use std::sync::Arc; /// Spawn a new VM as a child process. /// /// Runs `clone run` with the given parameters and returns the child PID. +#[allow(clippy::too_many_arguments)] pub fn spawn_vm( kernel: &str, initrd: Option<&str>, @@ -27,15 +28,18 @@ pub fn spawn_vm( jail: Option<&str>, cid: Option, ) -> Result { - let exe = std::env::current_exe() - .unwrap_or_else(|_| std::path::PathBuf::from("clone")); + let exe = std::env::current_exe().unwrap_or_else(|_| std::path::PathBuf::from("clone")); let mut cmd = std::process::Command::new(&exe); cmd.arg("run") - .arg("--kernel").arg(kernel) - .arg("--cmdline").arg(cmdline) - .arg("--mem-mb").arg(mem_mb.to_string()) - .arg("--vcpus").arg(vcpus.to_string()); + .arg("--kernel") + .arg(kernel) + .arg("--cmdline") + .arg(cmdline) + .arg("--mem-mb") + .arg(mem_mb.to_string()) + .arg("--vcpus") + .arg(vcpus.to_string()); if let Some(i) = initrd { cmd.arg("--initrd").arg(i); @@ -73,7 +77,8 @@ pub fn spawn_vm( cmd.stdout(std::process::Stdio::inherit()); cmd.stderr(std::process::Stdio::inherit()); - let child = cmd.spawn() + let child = cmd + .spawn() .map_err(|e| anyhow::anyhow!("Failed to spawn VM process: {e}"))?; let pid = child.id(); @@ -99,12 +104,10 @@ pub fn spawn_fork( vcpus: Option, overlay_size: Option<&str>, ) -> Result { - let exe = std::env::current_exe() - .unwrap_or_else(|_| std::path::PathBuf::from("clone")); + let exe = std::env::current_exe().unwrap_or_else(|_| std::path::PathBuf::from("clone")); let mut cmd = std::process::Command::new(&exe); - cmd.arg("fork") - .arg("--template").arg(template_path); + cmd.arg("fork").arg("--template").arg(template_path); if net { cmd.arg("--net"); @@ -129,7 +132,8 @@ pub fn spawn_fork( cmd.stdout(std::process::Stdio::inherit()); cmd.stderr(std::process::Stdio::inherit()); - let child = cmd.spawn() + let child = cmd + .spawn() .map_err(|e| anyhow::anyhow!("Failed to spawn fork process: {e}"))?; let pid = child.id(); @@ -157,8 +161,7 @@ pub fn snapshot_vm(control_socket: &str, output_path: &str) -> Result Result<()> { let request = crate::control::protocol::Request::Shutdown; crate::control::protocol::write_frame_sync(&mut writer, &request)?; - let _response: crate::control::protocol::Response = - crate::control::protocol::read_frame_sync(&mut reader)?; + let _response: crate::control::protocol::Response = crate::control::protocol::read_frame_sync(&mut reader)?; Ok(()) } @@ -197,17 +199,13 @@ pub fn query_vm_status(control_socket: &str) -> Result SockFilter { - SockFilter { - code, - jt: 0, - jf: 0, - k, - } + SockFilter { code, jt: 0, jf: 0, k } } fn bpf_jump(code: u16, k: u32, jt: u8, jf: u8) -> SockFilter { @@ -220,12 +215,7 @@ mod bpf { // jf (no match) should fall through: 0 for (i, &syscall_nr) in allowed.iter().enumerate() { let jt = (n - i) as u8; // jump forward to RET ALLOW - prog.push(bpf_jump( - BPF_JMP | BPF_JEQ | BPF_K, - syscall_nr as u32, - jt, - 0, - )); + prog.push(bpf_jump(BPF_JMP | BPF_JEQ | BPF_K, syscall_nr as u32, jt, 0)); } // Default: kill @@ -275,13 +265,9 @@ mod bpf { pub fn apply_seccomp_filter(policy: &SeccompPolicy) -> Result<()> { #[cfg(target_os = "linux")] { - tracing::info!( - allowed_syscalls = policy.allowed.len(), - "Installing seccomp BPF filter" - ); + tracing::info!(allowed_syscalls = policy.allowed.len(), "Installing seccomp BPF filter"); let filter = bpf::build_filter(&policy.allowed); - bpf::install_filter(&filter) - .map_err(|e| anyhow::anyhow!("Failed to install seccomp filter: {e}"))?; + bpf::install_filter(&filter).map_err(|e| anyhow::anyhow!("Failed to install seccomp filter: {e}"))?; tracing::info!("Seccomp filter installed"); } #[cfg(not(target_os = "linux"))] @@ -319,29 +305,19 @@ fn apply_jail_linux(chroot_dir: &str, policy: &SeccompPolicy) -> Result<()> { let unshare_flags = libc::CLONE_NEWNS | libc::CLONE_NEWPID | libc::CLONE_NEWNET; let ret = unsafe { libc::unshare(unshare_flags) }; if ret != 0 { - return Err(anyhow::anyhow!( - "unshare failed: {}", - std::io::Error::last_os_error() - )); + return Err(anyhow::anyhow!("unshare failed: {}", std::io::Error::last_os_error())); } tracing::info!("Created new namespaces (mount, pid, net)"); // 2. Chroot to minimal directory - let c_dir = CString::new(chroot_dir) - .map_err(|e| anyhow::anyhow!("Invalid chroot path: {e}"))?; + let c_dir = CString::new(chroot_dir).map_err(|e| anyhow::anyhow!("Invalid chroot path: {e}"))?; let ret = unsafe { libc::chroot(c_dir.as_ptr()) }; if ret != 0 { - return Err(anyhow::anyhow!( - "chroot failed: {}", - std::io::Error::last_os_error() - )); + return Err(anyhow::anyhow!("chroot failed: {}", std::io::Error::last_os_error())); } - let ret = unsafe { libc::chdir(b"/\0".as_ptr() as *const libc::c_char) }; + let ret = unsafe { libc::chdir(c"/".as_ptr()) }; if ret != 0 { - return Err(anyhow::anyhow!( - "chdir failed: {}", - std::io::Error::last_os_error() - )); + return Err(anyhow::anyhow!("chdir failed: {}", std::io::Error::last_os_error())); } tracing::info!("Chrooted to {chroot_dir}"); diff --git a/src/control/metrics.rs b/src/control/metrics.rs index 7ac598a..bc188a7 100644 --- a/src/control/metrics.rs +++ b/src/control/metrics.rs @@ -118,8 +118,7 @@ fn collect_host_metrics_linux() -> HostMetrics { #[cfg(target_os = "linux")] fn parse_meminfo_kb(s: &str) -> u64 { // Format: " 12345 kB" - s.trim() - .split_whitespace() + s.split_whitespace() .next() .and_then(|v| v.parse::().ok()) .unwrap_or(0) @@ -127,9 +126,7 @@ fn parse_meminfo_kb(s: &str) -> u64 { #[cfg(target_os = "linux")] fn read_sysfs_u64(path: &str) -> Option { - std::fs::read_to_string(path) - .ok() - .and_then(|s| s.trim().parse().ok()) + std::fs::read_to_string(path).ok().and_then(|s| s.trim().parse().ok()) } // --------------------------------------------------------------------------- @@ -267,10 +264,7 @@ impl MetricsCollector { /// Update or insert metrics for a VM. pub fn update(&self, vm_id: &str, metrics: VmMetrics) { - self.vm_metrics - .lock() - .unwrap() - .insert(vm_id.to_string(), metrics); + self.vm_metrics.lock().unwrap().insert(vm_id.to_string(), metrics); } /// Remove metrics for a destroyed VM. @@ -304,10 +298,7 @@ pub struct MetricsCollectorHandle { impl MetricsCollectorHandle { pub fn update(&self, vm_id: &str, metrics: VmMetrics) { - self.vm_metrics - .lock() - .unwrap() - .insert(vm_id.to_string(), metrics); + self.vm_metrics.lock().unwrap().insert(vm_id.to_string(), metrics); } pub fn remove(&self, vm_id: &str) { @@ -403,8 +394,20 @@ mod tests { #[test] fn test_metrics_collector_all() { let collector = MetricsCollector::new(); - collector.update("vm-1", VmMetrics { private_rss_bytes: 1, ..Default::default() }); - collector.update("vm-2", VmMetrics { private_rss_bytes: 2, ..Default::default() }); + collector.update( + "vm-1", + VmMetrics { + private_rss_bytes: 1, + ..Default::default() + }, + ); + collector.update( + "vm-2", + VmMetrics { + private_rss_bytes: 2, + ..Default::default() + }, + ); let all = collector.all(); assert_eq!(all.len(), 2); @@ -415,8 +418,20 @@ mod tests { #[test] fn test_metrics_collector_update_overwrites() { let collector = MetricsCollector::new(); - collector.update("vm-1", VmMetrics { private_rss_bytes: 100, ..Default::default() }); - collector.update("vm-1", VmMetrics { private_rss_bytes: 200, ..Default::default() }); + collector.update( + "vm-1", + VmMetrics { + private_rss_bytes: 100, + ..Default::default() + }, + ); + collector.update( + "vm-1", + VmMetrics { + private_rss_bytes: 200, + ..Default::default() + }, + ); assert_eq!(collector.get("vm-1").unwrap().private_rss_bytes, 200); } @@ -426,7 +441,13 @@ mod tests { let collector = MetricsCollector::new(); let handle = collector.handle(); - handle.update("vm-1", VmMetrics { vcpu_time_ns: 999, ..Default::default() }); + handle.update( + "vm-1", + VmMetrics { + vcpu_time_ns: 999, + ..Default::default() + }, + ); assert_eq!(collector.get("vm-1").unwrap().vcpu_time_ns, 999); // Handle can also read @@ -457,11 +478,19 @@ mod tests { fn test_event_logger_ring_buffer_overflow() { let logger = EventLogger::new(3); - logger.log(VmEvent::Boot { vm_id: "vm-1".to_string() }); - logger.log(VmEvent::Boot { vm_id: "vm-2".to_string() }); - logger.log(VmEvent::Boot { vm_id: "vm-3".to_string() }); + logger.log(VmEvent::Boot { + vm_id: "vm-1".to_string(), + }); + logger.log(VmEvent::Boot { + vm_id: "vm-2".to_string(), + }); + logger.log(VmEvent::Boot { + vm_id: "vm-3".to_string(), + }); // Buffer is full now (capacity=3) - logger.log(VmEvent::Boot { vm_id: "vm-4".to_string() }); + logger.log(VmEvent::Boot { + vm_id: "vm-4".to_string(), + }); let events = logger.snapshot(); assert_eq!(events.len(), 3); @@ -480,8 +509,12 @@ mod tests { #[test] fn test_event_logger_drain() { let logger = EventLogger::new(100); - logger.log(VmEvent::Boot { vm_id: "vm-1".to_string() }); - logger.log(VmEvent::Shutdown { vm_id: "vm-1".to_string() }); + logger.log(VmEvent::Boot { + vm_id: "vm-1".to_string(), + }); + logger.log(VmEvent::Shutdown { + vm_id: "vm-1".to_string(), + }); let events = logger.drain(); assert_eq!(events.len(), 2); @@ -494,8 +527,12 @@ mod tests { #[test] fn test_event_logger_timestamp_monotonic() { let logger = EventLogger::new(100); - logger.log(VmEvent::Boot { vm_id: "vm-1".to_string() }); - logger.log(VmEvent::Shutdown { vm_id: "vm-1".to_string() }); + logger.log(VmEvent::Boot { + vm_id: "vm-1".to_string(), + }); + logger.log(VmEvent::Shutdown { + vm_id: "vm-1".to_string(), + }); let events = logger.snapshot(); assert!(events[1].timestamp_ms >= events[0].timestamp_ms); @@ -527,13 +564,31 @@ mod tests { let logger = EventLogger::new(100); logger.log(VmEvent::Boot { vm_id: "v".to_string() }); logger.log(VmEvent::Shutdown { vm_id: "v".to_string() }); - logger.log(VmEvent::BalloonInflate { vm_id: "v".to_string(), pages: 10 }); - logger.log(VmEvent::BalloonDeflate { vm_id: "v".to_string(), pages: 5 }); + logger.log(VmEvent::BalloonInflate { + vm_id: "v".to_string(), + pages: 10, + }); + logger.log(VmEvent::BalloonDeflate { + vm_id: "v".to_string(), + pages: 5, + }); logger.log(VmEvent::OomKill { vm_id: "v".to_string() }); - logger.log(VmEvent::VcpuPark { vm_id: "v".to_string(), vcpu_id: 0 }); - logger.log(VmEvent::VcpuWake { vm_id: "v".to_string(), vcpu_id: 0 }); - logger.log(VmEvent::TemplateHit { vm_id: "v".to_string(), template: "t".to_string() }); - logger.log(VmEvent::TemplateMiss { vm_id: "v".to_string(), template: "t".to_string() }); + logger.log(VmEvent::VcpuPark { + vm_id: "v".to_string(), + vcpu_id: 0, + }); + logger.log(VmEvent::VcpuWake { + vm_id: "v".to_string(), + vcpu_id: 0, + }); + logger.log(VmEvent::TemplateHit { + vm_id: "v".to_string(), + template: "t".to_string(), + }); + logger.log(VmEvent::TemplateMiss { + vm_id: "v".to_string(), + template: "t".to_string(), + }); assert_eq!(logger.snapshot().len(), 9); } @@ -591,7 +646,9 @@ mod tests { fn test_timestamped_event_serialization() { let ts = TimestampedEvent { timestamp_ms: 12345, - event: VmEvent::Boot { vm_id: "vm-1".to_string() }, + event: VmEvent::Boot { + vm_id: "vm-1".to_string(), + }, }; let json = serde_json::to_string(&ts).unwrap(); assert!(json.contains("12345")); diff --git a/src/control/mod.rs b/src/control/mod.rs index cbce699..b5b8549 100644 --- a/src/control/mod.rs +++ b/src/control/mod.rs @@ -18,8 +18,8 @@ use anyhow::Result; use tokio::net::{UnixListener, UnixStream}; use tokio::sync::Mutex; -use protocol::{read_frame, write_frame, Request, Response, ResponseBody, VmSummary}; use metrics::{EventLogger, MetricsCollector, VmEvent, VmMetrics}; +use protocol::{read_frame, write_frame, Request, Response, ResponseBody, VmSummary}; /// Default socket path for the control plane. pub const DEFAULT_SOCKET_PATH: &str = "/run/clone/control.sock"; @@ -148,10 +148,7 @@ impl Drop for ControlServer { } } -async fn handle_connection( - stream: UnixStream, - state: Arc>, -) -> Result<()> { +async fn handle_connection(stream: UnixStream, state: Arc>) -> Result<()> { let (mut reader, mut writer) = stream.into_split(); loop { diff --git a/src/control/protocol.rs b/src/control/protocol.rs index 40e2777..ab529b9 100644 --- a/src/control/protocol.rs +++ b/src/control/protocol.rs @@ -381,10 +381,7 @@ mod tests { let json = serde_json::to_string(&req).unwrap(); let deserialized: Request = serde_json::from_str(&json).unwrap(); match deserialized { - Request::Snapshot { - vm_id, - output_path, - } => { + Request::Snapshot { vm_id, output_path } => { assert_eq!(vm_id, "vm-0001"); assert_eq!(output_path, "/tmp/snap"); } @@ -436,7 +433,9 @@ mod tests { let json = serde_json::to_string(&resp).unwrap(); let deserialized: Response = serde_json::from_str(&json).unwrap(); match deserialized { - Response::Ok { body: ResponseBody::Ack {} } => {} + Response::Ok { + body: ResponseBody::Ack {}, + } => {} _ => panic!("Wrong variant"), } } @@ -523,10 +522,7 @@ mod tests { match deserialized { Request::CreateVm { - kernel, - mem_mb, - vcpus, - .. + kernel, mem_mb, vcpus, .. } => { assert_eq!(kernel, "/kernel"); assert_eq!(mem_mb, 128); diff --git a/src/control/sync_server.rs b/src/control/sync_server.rs index 86ca122..9453a5d 100644 --- a/src/control/sync_server.rs +++ b/src/control/sync_server.rs @@ -66,8 +66,7 @@ pub fn start_control_socket(vm_handle: Arc) -> Result { // Remove stale socket let _ = std::fs::remove_file(&path); - let listener = UnixListener::bind(&path) - .with_context(|| format!("Failed to bind control socket: {path}"))?; + let listener = UnixListener::bind(&path).with_context(|| format!("Failed to bind control socket: {path}"))?; // Set a timeout so the accept loop can check for shutdown listener.set_nonblocking(false)?; @@ -88,10 +87,7 @@ pub fn start_control_socket(vm_handle: Arc) -> Result { // Use a short timeout via SO_RCVTIMEO for the accept unsafe { - let tv = libc::timeval { - tv_sec: 1, - tv_usec: 0, - }; + let tv = libc::timeval { tv_sec: 1, tv_usec: 0 }; libc::setsockopt( std::os::unix::io::AsRawFd::as_raw_fd(&listener), libc::SOL_SOCKET, @@ -109,9 +105,7 @@ pub fn start_control_socket(vm_handle: Arc) -> Result { } Err(e) => { // Timeout or interrupted — just loop and check shutdown - if e.kind() != std::io::ErrorKind::WouldBlock - && e.kind() != std::io::ErrorKind::TimedOut - { + if e.kind() != std::io::ErrorKind::WouldBlock && e.kind() != std::io::ErrorKind::TimedOut { // On Linux, SO_RCVTIMEO on accept returns EAGAIN if e.raw_os_error() != Some(libc::EAGAIN) { tracing::error!("Control socket accept error: {e}"); @@ -130,10 +124,7 @@ pub fn start_control_socket(vm_handle: Arc) -> Result { Ok(path) } -fn handle_connection( - stream: std::os::unix::net::UnixStream, - vm_handle: &VmHandle, -) -> Result<()> { +fn handle_connection(stream: std::os::unix::net::UnixStream, vm_handle: &VmHandle) -> Result<()> { use std::io::{BufReader, BufWriter}; let mut reader = BufReader::new(&stream); @@ -160,9 +151,10 @@ fn handle_connection( fn dispatch(req: Request, vm: &VmHandle) -> Response { match req { Request::Snapshot { output_path, .. } => handle_snapshot(vm, &output_path), - Request::IncrementalSnapshot { output_path, base_template } => { - handle_incremental_snapshot(vm, &output_path, &base_template) - } + Request::IncrementalSnapshot { + output_path, + base_template, + } => handle_incremental_snapshot(vm, &output_path, &base_template), Request::Pause => handle_pause(vm), Request::Resume => handle_resume(vm), Request::Shutdown => handle_shutdown(vm), @@ -178,24 +170,22 @@ fn dispatch(req: Request, vm: &VmHandle) -> Response { vcpus: vm.num_vcpus, }, }, - Request::Exec { command, args } => { - match &vm.agent_state { - Some(agent_state) => { - match agent_state.send_exec(&command, &args) { - Ok((exit_code, stdout, stderr)) => Response::Ok { - body: ResponseBody::ExecResult { exit_code, stdout, stderr }, - }, - Err(msg) => Response::Error { message: msg }, - } - } - None => Response::Error { - message: "Guest agent not available".to_string(), + Request::Exec { command, args } => match &vm.agent_state { + Some(agent_state) => match agent_state.send_exec(&command, &args) { + Ok((exit_code, stdout, stderr)) => Response::Ok { + body: ResponseBody::ExecResult { + exit_code, + stdout, + stderr, + }, }, - } - } - Request::SetBalloon { target_mb } => { - handle_set_balloon(vm, target_mb) - } + Err(msg) => Response::Error { message: msg }, + }, + None => Response::Error { + message: "Guest agent not available".to_string(), + }, + }, + Request::SetBalloon { target_mb } => handle_set_balloon(vm, target_mb), _ => Response::Error { message: "Unsupported command on per-VM control socket".to_string(), }, @@ -234,11 +224,15 @@ fn pause_vcpus(vm: &VmHandle) -> Result<(), String> { // Kick each vCPU out of KVM_RUN (send twice to handle races) for &tid in &vm.vcpu_threads { - unsafe { libc::pthread_kill(tid, libc::SIGUSR1); } + unsafe { + libc::pthread_kill(tid, libc::SIGUSR1); + } } std::thread::sleep(std::time::Duration::from_millis(50)); for &tid in &vm.vcpu_threads { - unsafe { libc::pthread_kill(tid, libc::SIGUSR1); } + unsafe { + libc::pthread_kill(tid, libc::SIGUSR1); + } } // Wait for all vCPUs to park @@ -283,9 +277,7 @@ fn handle_set_balloon(vm: &VmHandle, target_mb: u32) -> Response { let mut bus = vm.mmio_bus.lock().unwrap(); // Balloon is device index 0 (registered first in both boot and fork paths). if let Some(transport) = bus.transport_mut(0) { - if let Some(balloon) = transport.device_mut().as_any_mut() - .downcast_mut::() - { + if let Some(balloon) = transport.device_mut().as_any_mut().downcast_mut::() { // Calculate pages to reclaim: template_mb - target_mb let template_mb = (vm.mem_size / (1024 * 1024)) as u32; let reclaim_pages = if target_mb < template_mb { @@ -295,7 +287,9 @@ fn handle_set_balloon(vm: &VmHandle, target_mb: u32) -> Response { }; balloon.update_target(reclaim_pages); } else { - return Response::Error { message: "balloon device not found".to_string() }; + return Response::Error { + message: "balloon device not found".to_string(), + }; } // Raise config-change interrupt so the guest driver sees the new target. transport.raise_config_change_interrupt(); @@ -306,9 +300,13 @@ fn handle_set_balloon(vm: &VmHandle, target_mb: u32) -> Response { let _ = vm_fd.set_irq_line(irq, false); } tracing::info!(target_mb, "balloon target set via control socket"); - return Response::Ok { body: ResponseBody::Ack {} }; + return Response::Ok { + body: ResponseBody::Ack {}, + }; + } + Response::Error { + message: "balloon device not found".to_string(), } - Response::Error { message: "balloon device not found".to_string() } } fn handle_pause(vm: &VmHandle) -> Response { @@ -397,7 +395,8 @@ fn handle_incremental_snapshot(vm: &VmHandle, output_path: &str, base_template: let device_states = { let bus = vm.mmio_bus.lock().unwrap(); let transport_states = bus.snapshot_all(); - let transports: Vec> = transport_states.iter() + let transports: Vec> = transport_states + .iter() .map(|s| serde_json::to_vec(s).unwrap_or_default()) .collect(); DeviceStates { @@ -411,9 +410,7 @@ fn handle_incremental_snapshot(vm: &VmHandle, output_path: &str, base_template: // 4. Save incremental snapshot (only dirty pages) let result = { - let guest_mem = unsafe { - crate::memory::GuestMem::borrow_raw(vm.guest_memory, vm.mem_size) - }; + let guest_mem = unsafe { crate::memory::GuestMem::borrow_raw(vm.guest_memory, vm.mem_size) }; crate::boot::template::save_incremental( &guest_mem, vm_fd, @@ -469,7 +466,8 @@ fn handle_snapshot(vm: &VmHandle, output_path: &str) -> Response { let device_states = { let bus = vm.mmio_bus.lock().unwrap(); let transport_states = bus.snapshot_all(); - let transports: Vec> = transport_states.iter() + let transports: Vec> = transport_states + .iter() .map(|s| serde_json::to_vec(s).unwrap_or_default()) .collect(); @@ -477,17 +475,20 @@ fn handle_snapshot(vm: &VmHandle, output_path: &str) -> Response { let mut irqchip_states = Vec::new(); let mut pit_bytes = Vec::new(); if let Some(ref vm_fd) = vm.vm_fd { - use kvm_bindings::{KVM_IRQCHIP_PIC_MASTER, KVM_IRQCHIP_PIC_SLAVE, KVM_IRQCHIP_IOAPIC, kvm_irqchip}; + use kvm_bindings::{kvm_irqchip, KVM_IRQCHIP_IOAPIC, KVM_IRQCHIP_PIC_MASTER, KVM_IRQCHIP_PIC_SLAVE}; for chip_id in [KVM_IRQCHIP_PIC_MASTER, KVM_IRQCHIP_PIC_SLAVE, KVM_IRQCHIP_IOAPIC] { - let mut chip = kvm_irqchip::default(); - chip.chip_id = chip_id; + let mut chip = kvm_irqchip { + chip_id, + ..Default::default() + }; match vm_fd.get_irqchip(&mut chip) { Ok(()) => { let bytes = unsafe { std::slice::from_raw_parts( &chip as *const kvm_irqchip as *const u8, std::mem::size_of::(), - ).to_vec() + ) + .to_vec() }; irqchip_states.push(bytes); } @@ -500,7 +501,8 @@ fn handle_snapshot(vm: &VmHandle, output_path: &str) -> Response { std::slice::from_raw_parts( &pit_state as *const kvm_bindings::kvm_pit_state2 as *const u8, std::mem::size_of::(), - ).to_vec() + ) + .to_vec() }; } Err(e) => tracing::warn!("Failed to save PIT state: {e}"), @@ -517,15 +519,15 @@ fn handle_snapshot(vm: &VmHandle, output_path: &str) -> Response { }; // 3. Save kvmclock and template - let clock_ns = vm.vm_fd.as_ref() + let clock_ns = vm + .vm_fd + .as_ref() .and_then(|fd| fd.get_clock().ok()) .map(|c| c.clock) .unwrap_or(0); let result = { - let guest_mem = unsafe { - crate::memory::GuestMem::borrow_raw(vm.guest_memory, vm.mem_size) - }; + let guest_mem = unsafe { crate::memory::GuestMem::borrow_raw(vm.guest_memory, vm.mem_size) }; crate::boot::template::save_template( &guest_mem, vcpu_states, diff --git a/src/main.rs b/src/main.rs index 2626b25..295e0d9 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,21 +1,36 @@ #[cfg(target_os = "linux")] -mod vmm; +#[allow(dead_code)] +mod boot; #[cfg(target_os = "linux")] -mod memory; +mod compat; #[cfg(target_os = "linux")] -mod boot; +#[allow(dead_code)] +mod memory; #[cfg(target_os = "linux")] +#[allow(dead_code)] mod migration; #[cfg(target_os = "linux")] +#[allow(dead_code)] mod pci; +#[cfg(target_os = "linux")] +#[allow(dead_code)] +mod vmm; -mod virtio; -mod net; -mod storage; +#[allow(dead_code)] mod control; +#[allow(dead_code)] +mod net; +#[allow(dead_code)] mod rootfs; +#[allow(dead_code)] mod rootfs_create; +// Platform-agnostic modules (unit tests run on all platforms) +#[allow(dead_code)] +mod storage; +#[allow(dead_code)] +mod virtio; + use anyhow::{Context as _, Result}; use clap::{Parser, Subcommand}; @@ -366,6 +381,7 @@ fn main() -> Result<()> { let cli = Cli::parse(); + #[allow(unused_variables)] match cli.command { Commands::Run { kernel, @@ -391,16 +407,15 @@ fn main() -> Result<()> { { // Verify kernel against manifest if provided if let Some(ref manifest_path) = kernel_manifest { - let manifest = boot::measured::load_trusted_hashes(manifest_path) - .context("Failed to load kernel manifest")?; + let manifest = + boot::measured::load_trusted_hashes(manifest_path).context("Failed to load kernel manifest")?; let kernel_name = std::path::Path::new(&kernel) .file_name() .map(|n| n.to_string_lossy().to_string()) .unwrap_or_else(|| kernel.clone()); let verifier = boot::measured::verifier_for_kernel(&manifest, &kernel_name) .context("Kernel not found in manifest")?; - verifier.verify_kernel(&kernel) - .context("Kernel verification failed")?; + verifier.verify_kernel(&kernel).context("Kernel verification failed")?; eprintln!("Kernel verified: {kernel}"); } @@ -456,10 +471,7 @@ fn main() -> Result<()> { let initrd_bytes = rootfs::generate_initrd(&init_binary)?; // Write generated initrd to a temp file - let initrd_path = std::env::temp_dir().join(format!( - "clone-initrd-{}.img", - std::process::id() - )); + let initrd_path = std::env::temp_dir().join(format!("clone-initrd-{}.img", std::process::id())); std::fs::write(&initrd_path, &initrd_bytes)?; effective_initrd = Some(initrd_path.to_string_lossy().to_string()); @@ -490,9 +502,7 @@ fn main() -> Result<()> { // Handle --net (auto-setup) vs --tap (manual) let (effective_tap, pre_opened_tap_fd) = if net { match net::auto_setup_network(std::process::id()) { - Ok((tap_name, tap_fd, _guest_ip)) => { - (Some(tap_name), Some(tap_fd)) - } + Ok((tap_name, tap_fd, _guest_ip)) => (Some(tap_name), Some(tap_fd)), Err(e) => { tracing::warn!("Auto network setup failed: {e}"); (None, None) @@ -505,7 +515,10 @@ fn main() -> Result<()> { tracing::info!("Booting VM: kernel={kernel}, mem={mem_mb}MB, vcpus={vcpus}"); // If passthrough devices are present, remove pci=off from cmdline if !passthrough.is_empty() { - cmdline = cmdline.replace(" pci=off", "").replace("pci=off ", "").replace("pci=off", ""); + cmdline = cmdline + .replace(" pci=off", "") + .replace("pci=off ", "") + .replace("pci=off", ""); } let config = vmm::VmConfig { @@ -532,10 +545,7 @@ fn main() -> Result<()> { // Clean up temp initrd if let Some(ref rootfs_image) = rootfs { let _ = rootfs_image; // suppress unused warning - let initrd_path = std::env::temp_dir().join(format!( - "clone-initrd-{}.img", - std::process::id() - )); + let initrd_path = std::env::temp_dir().join(format!("clone-initrd-{}.img", std::process::id())); let _ = std::fs::remove_file(initrd_path); } } @@ -571,7 +581,15 @@ fn main() -> Result<()> { } } } - Commands::Migrate { vm_id, to, remote_path, shutdown_after, dry_run, live, port } => { + Commands::Migrate { + vm_id, + to, + remote_path, + shutdown_after, + dry_run, + live, + port, + } => { let pid = resolve_vm_pid(vm_id)?; let socket_path = format!("/tmp/clone-{pid}.sock"); @@ -579,7 +597,7 @@ fn main() -> Result<()> { if live { // Extract hostname from user@host format let host = if to.contains('@') { - to.split('@').last().unwrap_or(&to).to_string() + to.split('@').next_back().unwrap_or(&to).to_string() } else { to.clone() }; @@ -612,9 +630,7 @@ fn main() -> Result<()> { .args([&to, "clone --version"]) .output()?; if !preflight.status.success() { - anyhow::bail!( - "Pre-flight failed: clone not found on {to}. Install clone on the remote host first." - ); + anyhow::bail!("Pre-flight failed: clone not found on {to}. Install clone on the remote host first."); } let remote_version = String::from_utf8_lossy(&preflight.stdout); eprintln!("Remote clone: {}", remote_version.trim()); @@ -624,10 +640,7 @@ fn main() -> Result<()> { .args([&to, &format!("test -w $(dirname {remote_template})")]) .status()?; if !write_check.success() { - anyhow::bail!( - "Pre-flight failed: no write access to {} on {to}", - remote_template - ); + anyhow::bail!("Pre-flight failed: no write access to {} on {to}", remote_template); } eprintln!("Pre-flight passed."); @@ -651,7 +664,9 @@ fn main() -> Result<()> { eprintln!("Step 2/3: Transferring to {to}:{remote_template}..."); let rsync_status = std::process::Command::new("rsync") .args([ - "-a", "--compress", "--progress", + "-a", + "--compress", + "--progress", &format!("{local_template}/"), &format!("{to}:{remote_template}/"), ]) @@ -710,7 +725,20 @@ fn main() -> Result<()> { eprintln!("Migration complete: VM running on {to}"); } - Commands::Fork { template, skip_verify, shared_dir, net, tap, block, seccomp, jail, cid, mem_mb, vcpus, overlay_size } => { + Commands::Fork { + template, + skip_verify, + shared_dir, + net, + tap, + block, + seccomp, + jail, + cid, + mem_mb, + vcpus, + overlay_size, + } => { #[cfg(target_os = "linux")] { // Handle --net (auto-setup) vs --tap (manual) @@ -784,7 +812,21 @@ fn main() -> Result<()> { let rt = tokio::runtime::Runtime::new()?; rt.block_on(control::daemon::run_daemon(&socket))?; } - Commands::Create { socket, kernel, mem_mb, vcpus, initrd, rootfs, overlay, shared_dir, block, net, tap, seccomp, jail } => { + Commands::Create { + socket, + kernel, + mem_mb, + vcpus, + initrd, + rootfs, + overlay, + shared_dir, + block, + net, + tap, + seccomp, + jail, + } => { let rt = tokio::runtime::Runtime::new()?; rt.block_on(async { let client = control::ControlClient::new(&socket); @@ -848,16 +890,19 @@ fn main() -> Result<()> { if let Ok(pid) = pid_str.parse::() { let sock_path = format!("/tmp/clone-{pid}.sock"); // Try to query status - let request = control::protocol::Request::VmStatus { - vm_id: pid.to_string(), - }; + let request = control::protocol::Request::VmStatus { vm_id: pid.to_string() }; match send_control_request(&sock_path, &request) { - Ok(control::protocol::Response::Ok { body }) => { - if let control::protocol::ResponseBody::Status { state, pid: vm_pid, vcpus } = body { - // Try to read RSS from /proc/{pid}/status - let rss_mb = read_vm_rss(vm_pid); - found.push((vm_pid, state, vcpus, rss_mb, sock_path)); - } + Ok(control::protocol::Response::Ok { + body: + control::protocol::ResponseBody::Status { + state, + pid: vm_pid, + vcpus, + }, + }) => { + // Try to read RSS from /proc/{pid}/status + let rss_mb = read_vm_rss(vm_pid); + found.push((vm_pid, state, vcpus, rss_mb, sock_path)); } _ => { // Socket exists but not responding, skip @@ -872,7 +917,7 @@ fn main() -> Result<()> { if found.is_empty() { eprintln!("No VMs running."); } else { - println!("{:<10} {:<10} {:<8} {:<10} {}", "PID", "STATE", "VCPUS", "RSS_MB", "SOCKET"); + println!("{:<10} {:<10} {:<8} {:<10} SOCKET", "PID", "STATE", "VCPUS", "RSS_MB"); for (pid, state, vcpus, rss_mb, socket_path) in &found { println!("{:<10} {:<10} {:<8} {:<10} {}", pid, state, vcpus, rss_mb, socket_path); } @@ -932,7 +977,9 @@ fn main() -> Result<()> { } } #[cfg(not(unix))] - { None::<()> } + { + None::<()> + } }; // Bidirectional bridge: stdin -> socket, socket -> stdout @@ -992,15 +1039,17 @@ fn main() -> Result<()> { (command[0].clone(), command[1..].to_vec()) }; - let request = control::protocol::Request::Exec { - command: cmd, - args, - }; + let request = control::protocol::Request::Exec { command: cmd, args }; let response = send_control_request(&socket_path, &request)?; match response { control::protocol::Response::Ok { body } => { - if let control::protocol::ResponseBody::ExecResult { exit_code, stdout, stderr } = body { + if let control::protocol::ResponseBody::ExecResult { + exit_code, + stdout, + stderr, + } = body + { if !stdout.is_empty() { print!("{stdout}"); } @@ -1060,9 +1109,7 @@ fn main() -> Result<()> { } else if let Some(image) = from_docker { rootfs_create::RootfsSource::FromDocker(image) } else { - anyhow::bail!( - "Specify one of: --distro, --from-dir, or --from-docker" - ); + anyhow::bail!("Specify one of: --distro, --from-dir, or --from-docker"); }; rootfs_create::create_rootfs(&source, &size, &output)?; } @@ -1101,9 +1148,7 @@ fn resolve_vm_pid(vm_id: Option) -> Result { match pids.len() { 0 => anyhow::bail!("No running Clone instances found. Use --vm-id to specify a PID."), 1 => Ok(pids[0]), - n => anyhow::bail!( - "Found {n} running Clone instances ({pids:?}). Use --vm-id to specify which one." - ), + n => anyhow::bail!("Found {n} running Clone instances ({pids:?}). Use --vm-id to specify which one."), } } @@ -1148,4 +1193,3 @@ fn send_control_request( Ok(response) } - diff --git a/src/memory/balloon.rs b/src/memory/balloon.rs index 8b45904..0e60273 100644 --- a/src/memory/balloon.rs +++ b/src/memory/balloon.rs @@ -158,12 +158,7 @@ impl BalloonPolicy { /// For testing: create a policy with controllable timestamps. #[cfg(test)] - fn new_with_times( - total_pages: u64, - floor_mb: u32, - last_active: Instant, - last_deflate: Instant, - ) -> Self { + fn new_with_times(total_pages: u64, floor_mb: u32, last_active: Instant, last_deflate: Instant) -> Self { let floor_pages = (floor_mb as u64 * 1024 * 1024) / 4096; let max_reclaimable = total_pages.saturating_sub(floor_pages); diff --git a/src/memory/mod.rs b/src/memory/mod.rs index 1a581e0..e616a29 100644 --- a/src/memory/mod.rs +++ b/src/memory/mod.rs @@ -1,8 +1,7 @@ pub mod balloon; pub mod overcommit; -use anyhow::{Context, Result}; -use vm_memory::{GuestAddress, GuestMemoryMmap, MmapRegion}; +use anyhow::Result; /// Guest physical memory layout: /// @@ -13,7 +12,6 @@ use vm_memory::{GuestAddress, GuestMemoryMmap, MmapRegion}; /// 0x0001_0000 - 0x0002_0000 PD page tables (one 4KB PD per GB, up to 64) /// 0x0010_0000 - (kernel end) Kernel image (loaded at 1MB) /// (kernel end) - mem_size Free memory for guest use - const GUEST_MEM_START: u64 = 0; /// Create guest memory backed by anonymous mmap with MAP_NORESERVE. @@ -34,7 +32,12 @@ pub struct GuestMem { impl GuestMem { /// Create a GuestMem from a raw pointer and size (no MMIO hole). pub fn from_raw(ptr: *mut u8, size: u64) -> Self { - Self { ptr, size, hole_start: 0, hole_end: 0 } + Self { + ptr, + size, + hole_start: 0, + hole_end: 0, + } } /// Create a GuestMem with an MMIO hole for large VMs. @@ -42,17 +45,36 @@ impl GuestMem { /// - GPA [0, hole_start) → userspace [ptr, ptr+hole_start) /// - GPA [hole_end, ...) → userspace [ptr+hole_start, ...) pub fn from_raw_with_hole(ptr: *mut u8, size: u64, hole_start: u64, hole_end: u64) -> Self { - Self { ptr, size, hole_start, hole_end } + Self { + ptr, + size, + hole_start, + hole_end, + } } /// Create a temporary borrow of existing guest memory. pub unsafe fn borrow_raw(ptr: *mut u8, size: u64) -> BorrowedGuestMem { - BorrowedGuestMem { inner: GuestMem { ptr, size, hole_start: 0, hole_end: 0 } } + BorrowedGuestMem { + inner: GuestMem { + ptr, + size, + hole_start: 0, + hole_end: 0, + }, + } } /// Create a temporary borrow with MMIO hole info. pub unsafe fn borrow_raw_with_hole(ptr: *mut u8, size: u64, hole_start: u64, hole_end: u64) -> BorrowedGuestMem { - BorrowedGuestMem { inner: GuestMem { ptr, size, hole_start, hole_end } } + BorrowedGuestMem { + inner: GuestMem { + ptr, + size, + hole_start, + hole_end, + }, + } } pub fn as_ptr(&self) -> *mut u8 { @@ -97,7 +119,11 @@ impl GuestMem { } Ok(offset as usize) } else { - anyhow::bail!("GPA {gpa:#x} is in the MMIO hole ({:#x}..{:#x})", self.hole_start, self.hole_end); + anyhow::bail!( + "GPA {gpa:#x} is in the MMIO hole ({:#x}..{:#x})", + self.hole_start, + self.hole_end + ); } } @@ -235,7 +261,7 @@ pub fn setup_page_tables(mem: &GuestMem, mem_size: u64) -> Result<()> { let pd_base: u64 = 0x10000; // 64KB — PD tables start here // How many GB to map (at least 1, capped at 64 to fit below kernel at 0x100000) - let num_gb = ((mem_size + (1 << 30) - 1) >> 30).max(1).min(64) as u64; + let num_gb = mem_size.div_ceil(1 << 30).clamp(1, 64); // PML4[0] -> PDPT (present, writable) let pml4_entry: u64 = pdpt_addr | 0x3; diff --git a/src/memory/overcommit.rs b/src/memory/overcommit.rs index b341f61..4010ef0 100644 --- a/src/memory/overcommit.rs +++ b/src/memory/overcommit.rs @@ -5,7 +5,6 @@ /// - Billing: charge only for private pages /// - Monitoring: overcommit ratio per host /// - Dirty page tracking for incremental snapshots - pub struct OvercommitTracker { /// Total guest pages total_pages: u64, @@ -28,11 +27,7 @@ impl OvercommitTracker { let mut vec = vec![0u8; page_count]; let resident = unsafe { - let ret = libc::mincore( - mem_ptr as *mut libc::c_void, - mem_size as usize, - vec.as_mut_ptr(), - ); + let ret = libc::mincore(mem_ptr as *mut libc::c_void, mem_size as usize, vec.as_mut_ptr()); if ret != 0 { tracing::warn!("mincore failed: {}", std::io::Error::last_os_error()); return; @@ -90,12 +85,13 @@ impl DirtyPageTracker { /// The bitmap has one bit per page. A set bit means the page was written /// since the last call to get_dirty_bitmap (or since dirty logging was enabled). pub fn get_dirty_bitmap(&self, vm_fd: &kvm_ioctls::VmFd) -> anyhow::Result> { - let bitmap = vm_fd.get_dirty_log(0, self.mem_size as usize) + let bitmap = vm_fd + .get_dirty_log(0, self.mem_size as usize) .map_err(|e| anyhow::anyhow!("get_dirty_log failed: {e}"))?; // Convert the kvm dirty log bitmap to a byte vec // KVM returns a bitmap where each bit represents a page - let bitmap_size = ((self.total_pages + 63) / 64 * 8) as usize; + let bitmap_size = (self.total_pages.div_ceil(64) * 8) as usize; let mut result = vec![0u8; bitmap_size]; // The dirty log is returned as a Vec of atomic bitmap words @@ -148,17 +144,12 @@ impl DirtyPageTracker { if byte_idx < bitmap.len() && (bitmap[byte_idx] & (1 << bit_idx)) != 0 { let offset = page_idx * 4096; - let page_data = unsafe { - std::slice::from_raw_parts(guest_mem.add(offset as usize), 4096) - }; + let page_data = unsafe { std::slice::from_raw_parts(guest_mem.add(offset as usize), 4096) }; dirty_data.extend_from_slice(page_data); } } - tracing::info!( - dirty_data_size = dirty_data.len(), - "Collected dirty page data" - ); + tracing::info!(dirty_data_size = dirty_data.len(), "Collected dirty page data"); Ok((bitmap, dirty_data)) } diff --git a/src/migration/mod.rs b/src/migration/mod.rs index 014e665..fd59cac 100644 --- a/src/migration/mod.rs +++ b/src/migration/mod.rs @@ -138,9 +138,7 @@ fn decode_page_batch(payload: &[u8]) -> Result)>> { if offset + 8 + 4096 > payload.len() { anyhow::bail!("page batch truncated"); } - let page_offset = u64::from_le_bytes( - payload[offset..offset + 8].try_into().unwrap(), - ); + let page_offset = u64::from_le_bytes(payload[offset..offset + 8].try_into().unwrap()); let data = payload[offset + 8..offset + 8 + 4096].to_vec(); pages.push((page_offset, data)); offset += 8 + 4096; @@ -155,9 +153,7 @@ fn decode_page_batch(payload: &[u8]) -> Result)>> { fn is_zero_page(data: &[u8]) -> bool { // Check in 8-byte chunks for speed let (prefix, aligned, suffix) = unsafe { data.align_to::() }; - prefix.iter().all(|&b| b == 0) - && aligned.iter().all(|&w| w == 0) - && suffix.iter().all(|&b| b == 0) + prefix.iter().all(|&b| b == 0) && aligned.iter().all(|&w| w == 0) && suffix.iter().all(|&b| b == 0) } // --------------------------------------------------------------------------- @@ -165,23 +161,18 @@ fn is_zero_page(data: &[u8]) -> bool { // --------------------------------------------------------------------------- #[cfg(target_os = "linux")] -pub fn run_sender( - vm: &crate::control::sync_server::VmHandle, - config: MigrationSenderConfig, -) -> Result { +pub fn run_sender(vm: &crate::control::sync_server::VmHandle, config: MigrationSenderConfig) -> Result { use crate::memory::overcommit::DirtyPageTracker; let start = Instant::now(); let mut total_pages_sent: u64 = 0; - let vm_fd = vm.vm_fd.as_ref() - .context("VM fd not available for migration")?; + let vm_fd = vm.vm_fd.as_ref().context("VM fd not available for migration")?; // Connect to receiver let addr = format!("{}:{}", config.dest_host, config.dest_port); tracing::info!("Connecting to migration receiver at {addr}"); - let mut stream = TcpStream::connect(&addr) - .with_context(|| format!("Failed to connect to receiver at {addr}"))?; + let mut stream = TcpStream::connect(&addr).with_context(|| format!("Failed to connect to receiver at {addr}"))?; // Set TCP_NODELAY for lower latency on small messages stream.set_nodelay(true)?; @@ -201,8 +192,12 @@ pub fn run_sender( }; let hello_json = serde_json::to_vec(&hello)?; write_msg(&mut stream, MSG_HELLO, &hello_json)?; - tracing::info!("Sent Hello: {}MB, {} vCPUs, {} devices", - vm.mem_size >> 20, vm.num_vcpus, num_devices); + tracing::info!( + "Sent Hello: {}MB, {} vCPUs, {} devices", + vm.mem_size >> 20, + vm.num_vcpus, + num_devices + ); // 2. Wait for Ready let (msg_type, _) = read_msg(&mut stream)?; @@ -219,16 +214,17 @@ pub fn run_sender( // 4. Send full memory (VM continues running) let total_pages = vm.mem_size / PAGE_SIZE; - tracing::info!("Sending initial memory: {} pages ({} MB)", - total_pages, vm.mem_size >> 20); + tracing::info!( + "Sending initial memory: {} pages ({} MB)", + total_pages, + vm.mem_size >> 20 + ); let mut batch: Vec<(u64, &[u8])> = Vec::with_capacity(BATCH_SIZE); for page_idx in 0..total_pages { let offset = page_idx * PAGE_SIZE; - let page_data = unsafe { - std::slice::from_raw_parts(vm.guest_memory.add(offset as usize), PAGE_SIZE as usize) - }; + let page_data = unsafe { std::slice::from_raw_parts(vm.guest_memory.add(offset as usize), PAGE_SIZE as usize) }; // Skip zero pages (receiver memory is already zeroed) if is_zero_page(page_data) { @@ -293,12 +289,8 @@ pub fn run_sender( let bit_idx = (page_idx % 8) as u8; if byte_idx < bitmap.len() && (bitmap[byte_idx] & (1 << bit_idx)) != 0 { let offset = page_idx * PAGE_SIZE; - let page_data = unsafe { - std::slice::from_raw_parts( - vm.guest_memory.add(offset as usize), - PAGE_SIZE as usize, - ) - }; + let page_data = + unsafe { std::slice::from_raw_parts(vm.guest_memory.add(offset as usize), PAGE_SIZE as usize) }; round_batch.push((offset, page_data)); if round_batch.len() >= BATCH_SIZE { @@ -338,12 +330,8 @@ pub fn run_sender( let bit_idx = (page_idx % 8) as u8; if byte_idx < bitmap.len() && (bitmap[byte_idx] & (1 << bit_idx)) != 0 { let offset = page_idx * PAGE_SIZE; - let page_data = unsafe { - std::slice::from_raw_parts( - vm.guest_memory.add(offset as usize), - PAGE_SIZE as usize, - ) - }; + let page_data = + unsafe { std::slice::from_raw_parts(vm.guest_memory.add(offset as usize), PAGE_SIZE as usize) }; final_batch.push((offset, page_data)); final_dirty += 1; @@ -366,12 +354,16 @@ pub fn run_sender( // 7. Send vCPU states let vcpu_states: Vec = { let states = vm.pause_state.captured_states.lock().unwrap(); - states.iter().enumerate().map(|(i, s)| { - s.clone().unwrap_or_else(|| { - tracing::error!("vCPU {i} state not captured"); - VcpuState::empty() + states + .iter() + .enumerate() + .map(|(i, s)| { + s.clone().unwrap_or_else(|| { + tracing::error!("vCPU {i} state not captured"); + VcpuState::empty() + }) }) - }).collect() + .collect() }; for (i, state) in vcpu_states.iter().enumerate() { @@ -388,7 +380,8 @@ pub fn run_sender( let device_states = { let bus = vm.mmio_bus.lock().unwrap(); let transport_states = bus.snapshot_all(); - let transports: Vec> = transport_states.iter() + let transports: Vec> = transport_states + .iter() .map(|s| serde_json::to_vec(s).unwrap_or_default()) .collect(); DeviceStates { @@ -424,7 +417,10 @@ pub fn run_sender( tracing::info!( "Migration complete: {} pages sent, {} rounds, {}ms downtime, {}ms total", - stats.total_pages_sent, stats.rounds, stats.downtime_ms, stats.total_time_ms + stats.total_pages_sent, + stats.rounds, + stats.downtime_ms, + stats.total_time_ms ); // Shut down source VM @@ -433,7 +429,9 @@ pub fn run_sender( // Resume vCPUs so they can exit crate::control::sync_server::resume_vcpus_pub(vm); for &tid in &vm.vcpu_threads { - unsafe { libc::pthread_kill(tid, libc::SIGUSR1); } + unsafe { + libc::pthread_kill(tid, libc::SIGUSR1); + } } Ok(stats) @@ -444,19 +442,12 @@ pub fn run_sender( // --------------------------------------------------------------------------- #[cfg(target_os = "linux")] -pub fn run_receiver( - port: u16, - kernel_path: &str, - mem_mb: u32, -) -> Result<()> { +pub fn run_receiver(port: u16, _kernel_path: &str, _mem_mb: u32) -> Result<()> { use std::net::TcpListener; use std::sync::atomic::AtomicBool; use std::sync::Arc; - use kvm_bindings::{ - kvm_pit_config, kvm_userspace_memory_region, - KVM_MEM_LOG_DIRTY_PAGES, KVM_PIT_SPEAKER_DUMMY, - }; + use kvm_bindings::{kvm_pit_config, kvm_userspace_memory_region, KVM_MEM_LOG_DIRTY_PAGES, KVM_PIT_SPEAKER_DUMMY}; use kvm_ioctls::Kvm; let listener = TcpListener::bind(format!("0.0.0.0:{port}")) @@ -475,7 +466,9 @@ pub fn run_receiver( let hello: HelloMsg = serde_json::from_slice(&payload)?; eprintln!( "Migration Hello: {}MB, {} vCPUs, {} devices", - hello.mem_size >> 20, hello.num_vcpus, hello.num_devices + hello.mem_size >> 20, + hello.num_vcpus, + hello.num_devices ); // 2. Pre-allocate guest memory and KVM VM @@ -485,8 +478,7 @@ pub fn run_receiver( let mem_size = hello.mem_size; let guard_size: u64 = 128 << 20; let alloc_size = mem_size + guard_size; - let guest_memory = crate::memory::create_guest_memory(alloc_size) - .context("Failed to allocate guest memory")?; + let guest_memory = crate::memory::create_guest_memory(alloc_size).context("Failed to allocate guest memory")?; let mem_region = kvm_userspace_memory_region { slot: 0, @@ -496,7 +488,8 @@ pub fn run_receiver( flags: KVM_MEM_LOG_DIRTY_PAGES, }; unsafe { - vm_fd.set_user_memory_region(mem_region) + vm_fd + .set_user_memory_region(mem_region) .context("Failed to set KVM memory region")?; } @@ -574,8 +567,12 @@ pub fn run_receiver( bus.register(Box::new(balloon)); match crate::virtio::vsock::VirtioVsock::new(3) { - Ok(vsock) => { bus.register(Box::new(vsock)); } - Err(e) => { tracing::warn!("Failed to create vsock: {e}"); } + Ok(vsock) => { + bus.register(Box::new(vsock)); + } + Err(e) => { + tracing::warn!("Failed to create vsock: {e}"); + } } bus.set_guest_memory(guest_memory.as_ptr(), mem_size); @@ -642,25 +639,29 @@ pub fn run_receiver( // Control socket let shutdown_flag = Arc::new(AtomicBool::new(false)); - let _agent_state = crate::vmm::agent_listener::start_listener(Arc::clone(&shutdown_flag), crate::vmm::agent_listener::AGENT_VSOCK_PORT_BASE); + let _agent_state = crate::vmm::agent_listener::start_listener( + Arc::clone(&shutdown_flag), + crate::vmm::agent_listener::AGENT_VSOCK_PORT_BASE, + ); let mut vcpu_threads: Vec = Vec::new(); let mut ap_handles = Vec::new(); - let mut all_vcpus: Vec = vcpus.drain(..).collect(); + let mut all_vcpus: Vec = std::mem::take(&mut vcpus); for vcpu in all_vcpus.drain(1..) { - let (tx, rx) = std::sync::mpsc::channel::(); + use crate::compat::SendPthreadT; + let (tx, rx) = std::sync::mpsc::channel::(); let handle = std::thread::Builder::new() .name(format!("vcpu-{}", vcpu.id())) .spawn(move || { - let _ = tx.send(unsafe { libc::pthread_self() }); + let _ = tx.send(SendPthreadT(unsafe { libc::pthread_self() })); let mut vcpu = vcpu; if let Err(e) = vcpu.run_loop() { tracing::error!("vCPU {} exited with error: {e}", vcpu.id()); } })?; - if let Ok(tid) = rx.recv() { + if let Ok(SendPthreadT(tid)) = rx.recv() { vcpu_threads.push(tid); } ap_handles.push(handle); @@ -736,7 +737,11 @@ fn decode_vcpu_state(payload: &[u8]) -> Result { } let sregs = payload[offset..offset + sregs_len].to_vec(); - Ok(VcpuState { regs, sregs, ..VcpuState::empty() }) + Ok(VcpuState { + regs, + sregs, + ..VcpuState::empty() + }) } #[cfg(test)] @@ -757,10 +762,7 @@ mod tests { fn test_page_batch_roundtrip() { let page1 = vec![0xAA; 4096]; let page2 = vec![0xBB; 4096]; - let pages: Vec<(u64, &[u8])> = vec![ - (0x1000, &page1), - (0x5000, &page2), - ]; + let pages: Vec<(u64, &[u8])> = vec![(0x1000, &page1), (0x5000, &page2)]; let encoded = encode_page_batch(&pages); let decoded = decode_page_batch(&encoded).unwrap(); assert_eq!(decoded.len(), 2); diff --git a/src/net/mod.rs b/src/net/mod.rs index 5cb828d..dd3cc0e 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -11,6 +11,11 @@ use std::os::unix::io::RawFd; +#[cfg(target_os = "linux")] +use crate::compat::IoctlReq; +#[cfg(not(target_os = "linux"))] +type IoctlReq = libc::c_ulong; + /// Network configuration for a VM. #[derive(Debug, Clone)] pub struct NetworkConfig { @@ -28,13 +33,7 @@ pub struct NetworkConfig { impl NetworkConfig { /// Create a new network configuration with the given parameters. - pub fn new( - bridge_name: &str, - guest_ip: &str, - gateway_ip: &str, - netmask: &str, - mac_address: [u8; 6], - ) -> Self { + pub fn new(bridge_name: &str, guest_ip: &str, gateway_ip: &str, netmask: &str, mac_address: [u8; 6]) -> Self { Self { bridge_name: bridge_name.to_string(), guest_ip: guest_ip.to_string(), @@ -52,9 +51,7 @@ impl NetworkConfig { 0x02, // locally administered, unicast 0x4E, // 'N' 0x56, // 'V' - bytes[1], - bytes[2], - bytes[3], + bytes[1], bytes[2], bytes[3], ] } } @@ -89,7 +86,7 @@ pub fn create_tap(name: &str) -> anyhow::Result { const IFF_TAP: libc::c_short = 0x0002; const IFF_NO_PI: libc::c_short = 0x1000; // TUNSETIFF = _IOW('T', 202, int) = 0x400454CA - const TUNSETIFF: libc::c_ulong = 0x400454CA; + const TUNSETIFF: IoctlReq = 0x400454CA; let mut ifr = [0u8; 40]; // ifreq is typically 40 bytes @@ -105,7 +102,9 @@ pub fn create_tap(name: &str) -> anyhow::Result { let ret = unsafe { libc::ioctl(fd, TUNSETIFF, ifr.as_ptr()) }; if ret < 0 { let err = std::io::Error::last_os_error(); - unsafe { libc::close(fd); } + unsafe { + libc::close(fd); + } return Err(anyhow::anyhow!("TUNSETIFF failed for {name}: {err}")); } @@ -113,13 +112,17 @@ pub fn create_tap(name: &str) -> anyhow::Result { let flags = unsafe { libc::fcntl(fd, libc::F_GETFL) }; if flags < 0 { let err = std::io::Error::last_os_error(); - unsafe { libc::close(fd); } + unsafe { + libc::close(fd); + } return Err(anyhow::anyhow!("fcntl F_GETFL failed: {err}")); } let ret = unsafe { libc::fcntl(fd, libc::F_SETFL, flags | libc::O_NONBLOCK) }; if ret < 0 { let err = std::io::Error::last_os_error(); - unsafe { libc::close(fd); } + unsafe { + libc::close(fd); + } return Err(anyhow::anyhow!("fcntl F_SETFL O_NONBLOCK failed: {err}")); } @@ -158,22 +161,19 @@ pub fn configure_tap(fd: RawFd, ip: &str, netmask: &str) -> anyhow::Result<()> { // Actually, let's just use a utility ioctl to get the ifr_name. // TUNGETIFF to retrieve the interface name. - const TUNGETIFF: libc::c_ulong = 0x800454D2; + const TUNGETIFF: IoctlReq = 0x800454D2u32 as IoctlReq; let mut ifr = [0u8; 40]; let ret = unsafe { libc::ioctl(fd, TUNGETIFF, ifr.as_mut_ptr()) }; if ret < 0 { - unsafe { libc::close(sock); } - return Err(anyhow::anyhow!( - "TUNGETIFF failed: {}", - std::io::Error::last_os_error() - )); + unsafe { + libc::close(sock); + } + return Err(anyhow::anyhow!("TUNGETIFF failed: {}", std::io::Error::last_os_error())); } // Parse the IP address. let ip_addr: Ipv4Addr = ip.parse().map_err(|e| anyhow::anyhow!("Invalid IP: {e}"))?; - let netmask_addr: Ipv4Addr = netmask - .parse() - .map_err(|e| anyhow::anyhow!("Invalid netmask: {e}"))?; + let netmask_addr: Ipv4Addr = netmask.parse().map_err(|e| anyhow::anyhow!("Invalid netmask: {e}"))?; // Helper to build a sockaddr_in and place it in the ifreq union at offset 16. fn set_sockaddr_in(ifr: &mut [u8], addr: &Ipv4Addr) { @@ -192,34 +192,40 @@ pub fn configure_tap(fd: RawFd, ip: &str, netmask: &str) -> anyhow::Result<()> { } // SIOCSIFADDR = 0x8916 - const SIOCSIFADDR: libc::c_ulong = 0x8916; + const SIOCSIFADDR: IoctlReq = 0x8916; set_sockaddr_in(&mut ifr, &ip_addr); let ret = unsafe { libc::ioctl(sock, SIOCSIFADDR, ifr.as_ptr()) }; if ret < 0 { let err = std::io::Error::last_os_error(); - unsafe { libc::close(sock); } + unsafe { + libc::close(sock); + } return Err(anyhow::anyhow!("SIOCSIFADDR failed: {err}")); } // SIOCSIFNETMASK = 0x891C - const SIOCSIFNETMASK: libc::c_ulong = 0x891C; + const SIOCSIFNETMASK: IoctlReq = 0x891C; set_sockaddr_in(&mut ifr, &netmask_addr); let ret = unsafe { libc::ioctl(sock, SIOCSIFNETMASK, ifr.as_ptr()) }; if ret < 0 { let err = std::io::Error::last_os_error(); - unsafe { libc::close(sock); } + unsafe { + libc::close(sock); + } return Err(anyhow::anyhow!("SIOCSIFNETMASK failed: {err}")); } // Bring the interface up: SIOCSIFFLAGS with IFF_UP. - const SIOCSIFFLAGS: libc::c_ulong = 0x8914; - const SIOCGIFFLAGS: libc::c_ulong = 0x8913; + const SIOCSIFFLAGS: IoctlReq = 0x8914; + const SIOCGIFFLAGS: IoctlReq = 0x8913; // First get current flags. let ret = unsafe { libc::ioctl(sock, SIOCGIFFLAGS, ifr.as_mut_ptr()) }; if ret < 0 { let err = std::io::Error::last_os_error(); - unsafe { libc::close(sock); } + unsafe { + libc::close(sock); + } return Err(anyhow::anyhow!("SIOCGIFFLAGS failed: {err}")); } // Set IFF_UP (bit 0) in the flags at offset 16 (as i16). @@ -230,11 +236,15 @@ pub fn configure_tap(fd: RawFd, ip: &str, netmask: &str) -> anyhow::Result<()> { let ret = unsafe { libc::ioctl(sock, SIOCSIFFLAGS, ifr.as_ptr()) }; if ret < 0 { let err = std::io::Error::last_os_error(); - unsafe { libc::close(sock); } + unsafe { + libc::close(sock); + } return Err(anyhow::anyhow!("SIOCSIFFLAGS (IFF_UP) failed: {err}")); } - unsafe { libc::close(sock); } + unsafe { + libc::close(sock); + } tracing::info!("TAP configured: ip={ip}, netmask={netmask}"); Ok(()) @@ -242,9 +252,7 @@ pub fn configure_tap(fd: RawFd, ip: &str, netmask: &str) -> anyhow::Result<()> { #[cfg(not(target_os = "linux"))] pub fn configure_tap(_fd: RawFd, ip: &str, netmask: &str) -> anyhow::Result<()> { - tracing::warn!( - "TAP configuration not supported on this platform (stub: ip={ip}, netmask={netmask})" - ); + tracing::warn!("TAP configuration not supported on this platform (stub: ip={ip}, netmask={netmask})"); Ok(()) } @@ -269,11 +277,13 @@ pub fn setup_bridge(bridge_name: &str, tap_name: &str) -> anyhow::Result<()> { ifr[..copy_len].copy_from_slice(&name_bytes[..copy_len]); // SIOCGIFINDEX = 0x8933 - const SIOCGIFINDEX: libc::c_ulong = 0x8933; + const SIOCGIFINDEX: IoctlReq = 0x8933; let ret = unsafe { libc::ioctl(sock, SIOCGIFINDEX, ifr.as_mut_ptr()) }; if ret < 0 { let err = std::io::Error::last_os_error(); - unsafe { libc::close(sock); } + unsafe { + libc::close(sock); + } return Err(anyhow::anyhow!("SIOCGIFINDEX for {tap_name} failed: {err}")); } let ifindex = i32::from_ne_bytes([ifr[16], ifr[17], ifr[18], ifr[19]]); @@ -286,26 +296,28 @@ pub fn setup_bridge(bridge_name: &str, tap_name: &str) -> anyhow::Result<()> { br_ifr[16..20].copy_from_slice(&ifindex.to_ne_bytes()); // SIOCBRADDIF = 0x89A2 - const SIOCBRADDIF: libc::c_ulong = 0x89A2; + const SIOCBRADDIF: IoctlReq = 0x89A2; let ret = unsafe { libc::ioctl(sock, SIOCBRADDIF, br_ifr.as_ptr()) }; if ret < 0 { let err = std::io::Error::last_os_error(); - unsafe { libc::close(sock); } + unsafe { + libc::close(sock); + } return Err(anyhow::anyhow!( "SIOCBRADDIF (add {tap_name} to {bridge_name}) failed: {err}" )); } - unsafe { libc::close(sock); } + unsafe { + libc::close(sock); + } tracing::info!("Added TAP {tap_name} to bridge {bridge_name}"); Ok(()) } #[cfg(not(target_os = "linux"))] pub fn setup_bridge(bridge_name: &str, tap_name: &str) -> anyhow::Result<()> { - tracing::warn!( - "Bridge setup not supported on this platform (stub: bridge={bridge_name}, tap={tap_name})" - ); + tracing::warn!("Bridge setup not supported on this platform (stub: bridge={bridge_name}, tap={tap_name})"); Ok(()) } @@ -322,8 +334,12 @@ pub fn setup_vm_network(config: &NetworkConfig) -> anyhow::Result { tracing::info!( "Setting up VM network: tap={tap_name}, ip={}, mac={:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}", config.gateway_ip, - config.mac_address[0], config.mac_address[1], config.mac_address[2], - config.mac_address[3], config.mac_address[4], config.mac_address[5] + config.mac_address[0], + config.mac_address[1], + config.mac_address[2], + config.mac_address[3], + config.mac_address[4], + config.mac_address[5] ); // 1. Create the TAP device. @@ -333,7 +349,9 @@ pub fn setup_vm_network(config: &NetworkConfig) -> anyhow::Result { if let Err(e) = configure_tap(tap_fd, &config.gateway_ip, &config.netmask) { // Clean up on failure. if tap_fd >= 0 { - unsafe { libc::close(tap_fd); } + unsafe { + libc::close(tap_fd); + } } return Err(e); } @@ -442,7 +460,16 @@ fn ensure_nat() -> anyhow::Result<()> { // Check if the rule already exists let output = Command::new("iptables") - .args(["-t", "nat", "-C", "POSTROUTING", "-s", DEFAULT_BRIDGE_CIDR, "-j", "MASQUERADE"]) + .args([ + "-t", + "nat", + "-C", + "POSTROUTING", + "-s", + DEFAULT_BRIDGE_CIDR, + "-j", + "MASQUERADE", + ]) .output()?; if output.status.success() { @@ -450,25 +477,44 @@ fn ensure_nat() -> anyhow::Result<()> { } let _ = Command::new("iptables") - .args(["-t", "nat", "-A", "POSTROUTING", "-s", DEFAULT_BRIDGE_CIDR, "-j", "MASQUERADE"]) + .args([ + "-t", + "nat", + "-A", + "POSTROUTING", + "-s", + DEFAULT_BRIDGE_CIDR, + "-j", + "MASQUERADE", + ]) .status(); // FORWARD rules let _ = Command::new("iptables") .args(["-C", "FORWARD", "-s", DEFAULT_BRIDGE_CIDR, "-j", "ACCEPT"]) .output() - .and_then(|o| if o.status.success() { Ok(()) } else { - Command::new("iptables") - .args(["-A", "FORWARD", "-s", DEFAULT_BRIDGE_CIDR, "-j", "ACCEPT"]) - .status().map(|_| ()) + .and_then(|o| { + if o.status.success() { + Ok(()) + } else { + Command::new("iptables") + .args(["-A", "FORWARD", "-s", DEFAULT_BRIDGE_CIDR, "-j", "ACCEPT"]) + .status() + .map(|_| ()) + } }); let _ = Command::new("iptables") .args(["-C", "FORWARD", "-d", DEFAULT_BRIDGE_CIDR, "-j", "ACCEPT"]) .output() - .and_then(|o| if o.status.success() { Ok(()) } else { - Command::new("iptables") - .args(["-A", "FORWARD", "-d", DEFAULT_BRIDGE_CIDR, "-j", "ACCEPT"]) - .status().map(|_| ()) + .and_then(|o| { + if o.status.success() { + Ok(()) + } else { + Command::new("iptables") + .args(["-A", "FORWARD", "-d", DEFAULT_BRIDGE_CIDR, "-j", "ACCEPT"]) + .status() + .map(|_| ()) + } }); Ok(()) @@ -488,8 +534,8 @@ fn bring_interface_up(name: &str) -> anyhow::Result<()> { ifr[..copy_len].copy_from_slice(&name_bytes[..copy_len]); // Get current flags - const SIOCGIFFLAGS: libc::c_ulong = 0x8913; - const SIOCSIFFLAGS: libc::c_ulong = 0x8914; + const SIOCGIFFLAGS: IoctlReq = 0x8913; + const SIOCSIFFLAGS: IoctlReq = 0x8914; let ret = unsafe { libc::ioctl(sock, SIOCGIFFLAGS, ifr.as_mut_ptr()) }; if ret < 0 { diff --git a/src/pci/mod.rs b/src/pci/mod.rs index 764b336..6fdea4d 100644 --- a/src/pci/mod.rs +++ b/src/pci/mod.rs @@ -8,8 +8,6 @@ pub mod vfio; -use std::collections::HashMap; - /// ECAM base address — below our virtio MMIO region at 0xD000_0000. pub const ECAM_BASE: u64 = 0xB000_0000; @@ -126,7 +124,7 @@ impl PciDevice { } // BAR writes need special handling — mask to BAR size alignment - if off >= 0x10 && off < 0x28 { + if (0x10..0x28).contains(&off) { let bar_index = (off - 0x10) / 4; if bar_index < self.bars.len() { // Guest is probing BAR size: write all 1s, read back aligned size @@ -219,8 +217,7 @@ impl PciBus { // Write BAR value into config space let bar_offset = 0x10 + i * 4; - let bar_val = (guest_addr as u32) | if is_64bit { 0x4 } else { 0x0 } - | if prefetchable { 0x8 } else { 0x0 }; + let bar_val = (guest_addr as u32) | if is_64bit { 0x4 } else { 0x0 } | if prefetchable { 0x8 } else { 0x0 }; config[bar_offset..bar_offset + 4].copy_from_slice(&bar_val.to_le_bytes()); // For 64-bit BARs, write upper 32 bits in next BAR slot diff --git a/src/pci/vfio.rs b/src/pci/vfio.rs index b23b30a..a78151a 100644 --- a/src/pci/vfio.rs +++ b/src/pci/vfio.rs @@ -9,12 +9,13 @@ //! No external crate dependencies — raw ioctl constants from Linux headers. use std::fs; -use std::os::unix::io::{AsRawFd, RawFd}; -use std::path::{Path, PathBuf}; +use std::os::unix::io::RawFd; +use std::path::Path; use anyhow::{Context, Result}; use super::MsixState; +use crate::compat::IoctlReq; // --------------------------------------------------------------------------- // VFIO ioctl constants (from linux/vfio.h) @@ -139,25 +140,20 @@ impl VfioDevice { tracing::info!("VFIO device {bdf}: IOMMU group {iommu_group}"); // 2. Open container - let container_fd = open_rw("/dev/vfio/vfio") - .context("Failed to open /dev/vfio/vfio")?; + let container_fd = open_rw("/dev/vfio/vfio").context("Failed to open /dev/vfio/vfio")?; // Check API version - let version = unsafe { libc::ioctl(container_fd, VFIO_GET_API_VERSION as libc::c_ulong) }; - if version != VFIO_API_VERSION as i32 { + let version = unsafe { libc::ioctl(container_fd, VFIO_GET_API_VERSION as IoctlReq) }; + if version != VFIO_API_VERSION { anyhow::bail!("VFIO API version mismatch: got {version}, expected {VFIO_API_VERSION}"); } // Check IOMMU support - let has_type1v2 = unsafe { - libc::ioctl(container_fd, VFIO_CHECK_EXTENSION as libc::c_ulong, VFIO_TYPE1V2_IOMMU) - }; + let has_type1v2 = unsafe { libc::ioctl(container_fd, VFIO_CHECK_EXTENSION as IoctlReq, VFIO_TYPE1V2_IOMMU) }; let iommu_type = if has_type1v2 > 0 { VFIO_TYPE1V2_IOMMU } else { - let has_type1 = unsafe { - libc::ioctl(container_fd, VFIO_CHECK_EXTENSION as libc::c_ulong, VFIO_TYPE1_IOMMU) - }; + let has_type1 = unsafe { libc::ioctl(container_fd, VFIO_CHECK_EXTENSION as IoctlReq, VFIO_TYPE1_IOMMU) }; if has_type1 <= 0 { anyhow::bail!("No supported VFIO IOMMU type found"); } @@ -166,14 +162,11 @@ impl VfioDevice { // 3. Open group let group_path = format!("/dev/vfio/{iommu_group}"); - let group_fd = open_rw(&group_path) - .with_context(|| format!("Failed to open VFIO group: {group_path}"))?; + let group_fd = open_rw(&group_path).with_context(|| format!("Failed to open VFIO group: {group_path}"))?; // Check group is viable let mut status = VfioGroupStatus { argsz: 8, flags: 0 }; - let ret = unsafe { - libc::ioctl(group_fd, VFIO_GROUP_GET_STATUS as libc::c_ulong, &mut status) - }; + let ret = unsafe { libc::ioctl(group_fd, VFIO_GROUP_GET_STATUS as IoctlReq, &mut status) }; if ret < 0 { anyhow::bail!("VFIO_GROUP_GET_STATUS failed: {}", std::io::Error::last_os_error()); } @@ -185,26 +178,20 @@ impl VfioDevice { } // 4. Set container for group - let ret = unsafe { - libc::ioctl(group_fd, VFIO_GROUP_SET_CONTAINER as libc::c_ulong, &container_fd) - }; + let ret = unsafe { libc::ioctl(group_fd, VFIO_GROUP_SET_CONTAINER as IoctlReq, &container_fd) }; if ret < 0 { anyhow::bail!("VFIO_GROUP_SET_CONTAINER failed: {}", std::io::Error::last_os_error()); } // 5. Set IOMMU type - let ret = unsafe { - libc::ioctl(container_fd, VFIO_SET_IOMMU as libc::c_ulong, iommu_type) - }; + let ret = unsafe { libc::ioctl(container_fd, VFIO_SET_IOMMU as IoctlReq, iommu_type) }; if ret < 0 { anyhow::bail!("VFIO_SET_IOMMU failed: {}", std::io::Error::last_os_error()); } // 6. Get device fd let bdf_cstr = std::ffi::CString::new(bdf)?; - let device_fd = unsafe { - libc::ioctl(group_fd, VFIO_GROUP_GET_DEVICE_FD as libc::c_ulong, bdf_cstr.as_ptr()) - }; + let device_fd = unsafe { libc::ioctl(group_fd, VFIO_GROUP_GET_DEVICE_FD as IoctlReq, bdf_cstr.as_ptr()) }; if device_fd < 0 { anyhow::bail!( "VFIO_GROUP_GET_DEVICE_FD failed for {bdf}: {}", @@ -219,15 +206,14 @@ impl VfioDevice { num_regions: 0, num_irqs: 0, }; - let ret = unsafe { - libc::ioctl(device_fd, VFIO_DEVICE_GET_INFO as libc::c_ulong, &mut dev_info) - }; + let ret = unsafe { libc::ioctl(device_fd, VFIO_DEVICE_GET_INFO as IoctlReq, &mut dev_info) }; if ret < 0 { anyhow::bail!("VFIO_DEVICE_GET_INFO failed: {}", std::io::Error::last_os_error()); } tracing::info!( "VFIO device {bdf}: {} regions, {} IRQs", - dev_info.num_regions, dev_info.num_irqs + dev_info.num_regions, + dev_info.num_irqs ); // 8. Query regions @@ -241,9 +227,7 @@ impl VfioDevice { size: 0, offset: 0, }; - let ret = unsafe { - libc::ioctl(device_fd, VFIO_DEVICE_GET_REGION_INFO as libc::c_ulong, &mut region) - }; + let ret = unsafe { libc::ioctl(device_fd, VFIO_DEVICE_GET_REGION_INFO as IoctlReq, &mut region) }; if ret < 0 { tracing::warn!("Failed to get region {i} info"); continue; @@ -251,7 +235,9 @@ impl VfioDevice { if region.size > 0 { tracing::info!( " Region {i}: size={:#x}, flags={:#x}, offset={:#x}", - region.size, region.flags, region.offset + region.size, + region.flags, + region.offset ); } regions.push(region); @@ -277,17 +263,16 @@ impl VfioDevice { size: mem_size, }; - let ret = unsafe { - libc::ioctl(self.container_fd, VFIO_IOMMU_MAP_DMA as libc::c_ulong, &dma_map) - }; + let ret = unsafe { libc::ioctl(self.container_fd, VFIO_IOMMU_MAP_DMA as IoctlReq, &dma_map) }; if ret < 0 { - anyhow::bail!( - "VFIO_IOMMU_MAP_DMA failed: {}", - std::io::Error::last_os_error() - ); + anyhow::bail!("VFIO_IOMMU_MAP_DMA failed: {}", std::io::Error::last_os_error()); } - tracing::info!("DMA mapped: IOVA 0x0-{:#x} → vaddr {:#x}", mem_size, guest_mem_ptr as u64); + tracing::info!( + "DMA mapped: IOVA 0x0-{:#x} → vaddr {:#x}", + mem_size, + guest_mem_ptr as u64 + ); Ok(()) } @@ -390,7 +375,10 @@ impl VfioDevice { pub fn map_bar(&mut self, bar_index: usize, guest_addr: u64, size: u64) -> Result<()> { tracing::info!( "VFIO {} BAR{}: mapped at guest {:#x}, size {:#x}", - self.bdf, bar_index, guest_addr, size + self.bdf, + bar_index, + guest_addr, + size ); Ok(()) } @@ -409,9 +397,7 @@ impl VfioDevice { /// Reset the device. pub fn reset(&self) -> Result<()> { - let ret = unsafe { - libc::ioctl(self.device_fd, VFIO_DEVICE_RESET as libc::c_ulong) - }; + let ret = unsafe { libc::ioctl(self.device_fd, VFIO_DEVICE_RESET as IoctlReq) }; if ret < 0 { tracing::warn!("VFIO device reset failed: {}", std::io::Error::last_os_error()); } @@ -444,22 +430,19 @@ fn find_iommu_group(bdf: &str) -> Result { let link = fs::read_link(format!("/sys/bus/pci/devices/{bdf}/iommu_group")) .with_context(|| format!("Device {bdf} has no IOMMU group (is iommu enabled in BIOS?)"))?; - let group_name = link.file_name() + let group_name = link + .file_name() .and_then(|n| n.to_str()) .context("Invalid IOMMU group path")?; - group_name.parse::() + group_name + .parse::() .with_context(|| format!("Invalid IOMMU group number: {group_name}")) } /// Open a file read-write and return the raw fd. fn open_rw(path: &str) -> Result { - let fd = unsafe { - libc::open( - std::ffi::CString::new(path)?.as_ptr(), - libc::O_RDWR, - ) - }; + let fd = unsafe { libc::open(std::ffi::CString::new(path)?.as_ptr(), libc::O_RDWR) }; if fd < 0 { anyhow::bail!("Failed to open {path}: {}", std::io::Error::last_os_error()); } diff --git a/src/rootfs.rs b/src/rootfs.rs index 1010a3f..0fc024a 100644 --- a/src/rootfs.rs +++ b/src/rootfs.rs @@ -54,10 +54,7 @@ pub fn find_init_binary() -> Result { } // 3. In PATH - if let Ok(output) = std::process::Command::new("which") - .arg("clone-init") - .output() - { + if let Ok(output) = std::process::Command::new("which").arg("clone-init").output() { if output.status.success() { let path = String::from_utf8_lossy(&output.stdout).trim().to_string(); if !path.is_empty() { @@ -92,10 +89,7 @@ pub fn find_agent_binary() -> Option { } } } - if let Ok(output) = std::process::Command::new("which") - .arg("clone-agent") - .output() - { + if let Ok(output) = std::process::Command::new("which").arg("clone-agent").output() { if output.status.success() { let path = String::from_utf8_lossy(&output.stdout).trim().to_string(); if !path.is_empty() { @@ -113,9 +107,7 @@ pub fn find_agent_binary() -> Option { /// can load them before switching root into the real rootfs. fn embed_kernel_modules(cpio: &mut Vec) { let uname = match std::process::Command::new("uname").arg("-r").output() { - Ok(o) if o.status.success() => { - String::from_utf8_lossy(&o.stdout).trim().to_string() - } + Ok(o) if o.status.success() => String::from_utf8_lossy(&o.stdout).trim().to_string(), _ => return, }; @@ -181,13 +173,20 @@ fn embed_kernel_modules(cpio: &mut Vec) { /// /// Returns the initrd contents as a byte vector. pub fn generate_initrd(init_binary: &Path) -> Result> { - let init_data = - fs::read(init_binary).with_context(|| format!("Failed to read {}", init_binary.display()))?; + let init_data = fs::read(init_binary).with_context(|| format!("Failed to read {}", init_binary.display()))?; let mut cpio = Vec::new(); // Create directory entries: /dev, /proc, /sys, /mnt - for dir in &["/dev", "/proc", "/sys", "/mnt", "/mnt/root", "/mnt/merged", "/mnt/overlay"] { + for dir in &[ + "/dev", + "/proc", + "/sys", + "/mnt", + "/mnt/root", + "/mnt/merged", + "/mnt/overlay", + ] { cpio_write_entry(&mut cpio, dir, 0o040755, &[]); } @@ -215,10 +214,7 @@ pub fn generate_initrd(init_binary: &Path) -> Result> { match fs::read(&agent_path) { Ok(agent_data) => { cpio_write_entry(&mut cpio, "/clone-agent", 0o100755, &agent_data); - tracing::info!( - "Embedded clone-agent in initrd ({} bytes)", - agent_data.len() - ); + tracing::info!("Embedded clone-agent in initrd ({} bytes)", agent_data.len()); } Err(e) => { tracing::warn!("Failed to read clone-agent at {}: {e}", agent_path.display()); diff --git a/src/rootfs_create.rs b/src/rootfs_create.rs index 9d46086..f0cbc4e 100644 --- a/src/rootfs_create.rs +++ b/src/rootfs_create.rs @@ -42,20 +42,17 @@ pub fn create_rootfs(source: &RootfsSource, size: &str, output: &str) -> Result< println!("Creating rootfs image: {output} ({size})"); // 1. Create sparse image - run_cmd("truncate", &["-s", &size_bytes.to_string(), output]) - .context("Failed to create sparse image")?; + run_cmd("truncate", &["-s", &size_bytes.to_string(), output]).context("Failed to create sparse image")?; // 2. Format with ext4 println!("Formatting with ext4..."); - run_cmd("mkfs.ext4", &["-q", "-F", "-L", "clone-rootfs", output]) - .context("Failed to format image with ext4")?; + run_cmd("mkfs.ext4", &["-q", "-F", "-L", "clone-rootfs", output]).context("Failed to format image with ext4")?; // 3. Mount via loop device let mount_dir = format!("/tmp/clone-rootfs-{}", std::process::id()); std::fs::create_dir_all(&mount_dir)?; - run_cmd("mount", &["-o", "loop", output, &mount_dir]) - .context("Failed to mount image (are you root?)")?; + run_cmd("mount", &["-o", "loop", output, &mount_dir]).context("Failed to mount image (are you root?)")?; // From here on, ensure we unmount on error let result = populate_rootfs(source, &mount_dir); @@ -90,8 +87,7 @@ fn populate_rootfs(source: &RootfsSource, mount_dir: &str) -> Result<()> { }, RootfsSource::FromDir(dir) => { println!("Copying from directory: {dir}"); - run_cmd("cp", &["-a", &format!("{dir}/."), mount_dir]) - .context("Failed to copy directory contents")?; + run_cmd("cp", &["-a", &format!("{dir}/."), mount_dir]).context("Failed to copy directory contents")?; Ok(()) } RootfsSource::FromDocker(image) => import_docker(image, mount_dir), @@ -132,8 +128,7 @@ fn bootstrap_alpine(mount_dir: &str, release: Option<&str>) -> Result<()> { // Extract into mount dir println!("Extracting..."); - run_cmd("tar", &["xzf", &tarball_path, "-C", mount_dir]) - .context("Failed to extract Alpine minirootfs")?; + run_cmd("tar", &["xzf", &tarball_path, "-C", mount_dir]).context("Failed to extract Alpine minirootfs")?; let _ = std::fs::remove_file(&tarball_path); @@ -169,8 +164,11 @@ fn bootstrap_ubuntu(mount_dir: &str, suite: &str) -> Result<()> { println!("Bootstrapping {suite} with debootstrap..."); // Check that debootstrap is available - if Command::new("which").arg("debootstrap").output() - .map(|o| !o.status.success()).unwrap_or(true) + if Command::new("which") + .arg("debootstrap") + .output() + .map(|o| !o.status.success()) + .unwrap_or(true) { anyhow::bail!( "debootstrap is not installed. Install it with:\n \ @@ -180,12 +178,10 @@ fn bootstrap_ubuntu(mount_dir: &str, suite: &str) -> Result<()> { // Use DEBOOTSTRAP_MIRROR env var if set, otherwise use a fast US mirror. // Operators can set DEBOOTSTRAP_MIRROR for local/regional mirrors. - let mirror = std::env::var("DEBOOTSTRAP_MIRROR") - .unwrap_or_else(|_| "http://archive.ubuntu.com/ubuntu".to_string()); + let mirror = std::env::var("DEBOOTSTRAP_MIRROR").unwrap_or_else(|_| "http://archive.ubuntu.com/ubuntu".to_string()); println!("Using mirror: {mirror}"); - run_cmd("debootstrap", &["--variant=minbase", suite, mount_dir, &mirror]) - .context("debootstrap failed")?; + run_cmd("debootstrap", &["--variant=minbase", suite, mount_dir, &mirror]).context("debootstrap failed")?; Ok(()) } @@ -195,8 +191,7 @@ fn import_docker(image: &str, mount_dir: &str) -> Result<()> { println!("Importing Docker image: {image}"); // Pull the image - run_cmd("docker", &["pull", image]) - .context("Failed to pull Docker image")?; + run_cmd("docker", &["pull", image]).context("Failed to pull Docker image")?; // Create a container (don't start it) let output = Command::new("docker") @@ -212,7 +207,12 @@ fn import_docker(image: &str, mount_dir: &str) -> Result<()> { // Export the container filesystem let tarball = format!("/tmp/clone-docker-{}.tar", std::process::id()); - let export_result = run_cmd_piped("docker", &["export", &container_id], "tar", &["xf", "-", "-C", mount_dir]); + let export_result = run_cmd_piped( + "docker", + &["export", &container_id], + "tar", + &["xf", "-", "-C", mount_dir], + ); // Clean up container let _ = run_cmd("docker", &["rm", &container_id]); @@ -339,10 +339,7 @@ WantedBy=multi-user.target ); // Mask the default serial-getty to avoid the 90s udev wait - let _ = std::os::unix::fs::symlink( - "/dev/null", - format!("{service_dir}/serial-getty@ttyS0.service"), - ); + let _ = std::os::unix::fs::symlink("/dev/null", format!("{service_dir}/serial-getty@ttyS0.service")); Ok(()) } @@ -359,10 +356,7 @@ fn configure_openrc_console(mount_dir: &str) -> Result<()> { // Enable it let runlevel_dir = format!("{mount_dir}/etc/runlevels/default"); let _ = std::fs::create_dir_all(&runlevel_dir); - let _ = std::os::unix::fs::symlink( - "/etc/init.d/ttyS0", - format!("{runlevel_dir}/ttyS0"), - ); + let _ = std::os::unix::fs::symlink("/etc/init.d/ttyS0", format!("{runlevel_dir}/ttyS0")); Ok(()) } @@ -380,9 +374,7 @@ fn parse_size(size: &str) -> Result { (size, 1u64) }; - let num: u64 = num_str - .parse() - .with_context(|| format!("Invalid size: {size}"))?; + let num: u64 = num_str.parse().with_context(|| format!("Invalid size: {size}"))?; Ok(num * multiplier) } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 992aee2..bd9cb1b 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -223,8 +223,8 @@ impl IoUringBlockIo { pub fn new(file: &File, queue_depth: u32) -> anyhow::Result { use std::os::unix::io::AsRawFd; - let ring = io_uring::IoUring::new(queue_depth) - .map_err(|e| anyhow::anyhow!("Failed to create io_uring: {e}"))?; + let ring = + io_uring::IoUring::new(queue_depth).map_err(|e| anyhow::anyhow!("Failed to create io_uring: {e}"))?; let fd = file.as_raw_fd(); tracing::info!(fd, queue_depth, "io_uring block I/O engine initialized"); @@ -236,14 +236,10 @@ impl IoUringBlockIo { /// /// Reads `len` bytes at `offset` into `buf`. pub fn read_at(&mut self, buf: &mut [u8], offset: u64) -> anyhow::Result { - let read_e = io_uring::opcode::Read::new( - io_uring::types::Fd(self.fd), - buf.as_mut_ptr(), - buf.len() as u32, - ) - .offset(offset) - .build() - .user_data(0x01); + let read_e = io_uring::opcode::Read::new(io_uring::types::Fd(self.fd), buf.as_mut_ptr(), buf.len() as u32) + .offset(offset) + .build() + .user_data(0x01); unsafe { self.ring @@ -254,7 +250,10 @@ impl IoUringBlockIo { self.ring.submit_and_wait(1)?; - let cqe = self.ring.completion().next() + let cqe = self + .ring + .completion() + .next() .ok_or_else(|| anyhow::anyhow!("io_uring: no completion entry"))?; let result = cqe.result(); @@ -272,14 +271,10 @@ impl IoUringBlockIo { /// /// Writes `buf` at `offset`. pub fn write_at(&mut self, buf: &[u8], offset: u64) -> anyhow::Result { - let write_e = io_uring::opcode::Write::new( - io_uring::types::Fd(self.fd), - buf.as_ptr(), - buf.len() as u32, - ) - .offset(offset) - .build() - .user_data(0x02); + let write_e = io_uring::opcode::Write::new(io_uring::types::Fd(self.fd), buf.as_ptr(), buf.len() as u32) + .offset(offset) + .build() + .user_data(0x02); unsafe { self.ring @@ -290,7 +285,10 @@ impl IoUringBlockIo { self.ring.submit_and_wait(1)?; - let cqe = self.ring.completion().next() + let cqe = self + .ring + .completion() + .next() .ok_or_else(|| anyhow::anyhow!("io_uring: no completion entry"))?; let result = cqe.result(); @@ -306,11 +304,9 @@ impl IoUringBlockIo { /// Submit a fsync operation and wait for completion. pub fn fsync(&mut self) -> anyhow::Result<()> { - let fsync_e = io_uring::opcode::Fsync::new( - io_uring::types::Fd(self.fd), - ) - .build() - .user_data(0x03); + let fsync_e = io_uring::opcode::Fsync::new(io_uring::types::Fd(self.fd)) + .build() + .user_data(0x03); unsafe { self.ring @@ -321,7 +317,10 @@ impl IoUringBlockIo { self.ring.submit_and_wait(1)?; - let cqe = self.ring.completion().next() + let cqe = self + .ring + .completion() + .next() .ok_or_else(|| anyhow::anyhow!("io_uring: no completion entry"))?; let result = cqe.result(); diff --git a/src/storage/qcow2.rs b/src/storage/qcow2.rs index be55882..fc69e22 100644 --- a/src/storage/qcow2.rs +++ b/src/storage/qcow2.rs @@ -90,7 +90,7 @@ impl Qcow2Header { .context("seeking to start of QCOW2 file")?; let mut buf = [0u8; 104]; // v3 header size - // Read at least v2 header bytes. + // Read at least v2 header bytes. file.read_exact(&mut buf[..72]) .context("reading QCOW2 header (first 72 bytes)")?; @@ -300,8 +300,8 @@ impl Qcow2File { .open(path) .with_context(|| format!("opening QCOW2 file {}", path.display()))?; - let header = Qcow2Header::read_from(&mut file) - .with_context(|| format!("parsing QCOW2 header in {}", path.display()))?; + let header = + Qcow2Header::read_from(&mut file).with_context(|| format!("parsing QCOW2 header in {}", path.display()))?; tracing::info!( path = %path.display(), @@ -313,14 +313,13 @@ impl Qcow2File { ); // Read L1 table. - let l1_table = Self::read_table(&mut file, header.l1_table_offset, header.l1_size as usize) - .context("reading L1 table")?; + let l1_table = + Self::read_table(&mut file, header.l1_table_offset, header.l1_size as usize).context("reading L1 table")?; // Read refcount table. let rc_entries = (header.refcount_table_clusters as u64 * header.cluster_size()) / 8; - let refcount_table = - Self::read_table(&mut file, header.refcount_table_offset, rc_entries as usize) - .context("reading refcount table")?; + let refcount_table = Self::read_table(&mut file, header.refcount_table_offset, rc_entries as usize) + .context("reading refcount table")?; // Determine next allocation offset: end of the file, rounded up to cluster boundary. let file_len = file.seek(SeekFrom::End(0))?; @@ -360,12 +359,7 @@ impl Qcow2File { /// - `virtual_size`: virtual disk size in bytes. /// - `cluster_bits`: log2 of cluster size (typically 16 for 64 KB). /// - `backing_file`: optional path to a backing file. - pub fn create( - path: &Path, - virtual_size: u64, - cluster_bits: u32, - backing_file: Option<&Path>, - ) -> Result { + pub fn create(path: &Path, virtual_size: u64, cluster_bits: u32, backing_file: Option<&Path>) -> Result { ensure!( (9..=21).contains(&cluster_bits), "cluster_bits {cluster_bits} out of valid range 9..21" @@ -433,8 +427,7 @@ impl Qcow2File { }; // Create the file. - let mut file = File::create(path) - .with_context(|| format!("creating QCOW2 file {}", path.display()))?; + let mut file = File::create(path).with_context(|| format!("creating QCOW2 file {}", path.display()))?; // Write backing file path into cluster 0 if specified. if let Some(backing_path) = backing_file { @@ -503,9 +496,7 @@ impl Qcow2File { /// /// Unallocated regions return zeros (or data from the backing file if present). pub fn read_at(&mut self, offset: u64, buf: &mut [u8]) -> Result<()> { - let end = offset - .checked_add(buf.len() as u64) - .context("read offset overflow")?; + let end = offset.checked_add(buf.len() as u64).context("read offset overflow")?; ensure!( end <= self.header.size, "read past end of virtual disk: offset={offset} len={} size={}", @@ -559,9 +550,7 @@ impl Qcow2File { /// /// Allocates new clusters as needed (copy-on-write). pub fn write_at(&mut self, offset: u64, data: &[u8]) -> Result<()> { - let end = offset - .checked_add(data.len() as u64) - .context("write offset overflow")?; + let end = offset.checked_add(data.len() as u64).context("write offset overflow")?; ensure!( end <= self.header.size, "write past end of virtual disk: offset={offset} len={} size={}", @@ -739,12 +728,7 @@ impl Qcow2File { } /// Get an L2 entry, loading the L2 table into the cache if necessary. - fn get_l2_entry( - &mut self, - l1_index: u32, - l2_table_offset: u64, - l2_index: usize, - ) -> Result { + fn get_l2_entry(&mut self, l1_index: u32, l2_table_offset: u64, l2_index: usize) -> Result { self.ensure_l2_cached(l1_index, l2_table_offset)?; let entry = self.l2_cache.get(&l1_index).unwrap(); Ok(entry.entries[l2_index]) @@ -809,9 +793,7 @@ impl Qcow2File { // Read L2 table from disk. let n_entries = self.header.l2_entries_per_table() as usize; let entries = Self::read_table(&mut self.file, l2_table_offset, n_entries) - .with_context(|| { - format!("reading L2 table at offset {l2_table_offset} (L1 index {l1_index})") - })?; + .with_context(|| format!("reading L2 table at offset {l2_table_offset} (L1 index {l1_index})"))?; self.access_counter += 1; self.l2_cache.insert( @@ -829,11 +811,7 @@ impl Qcow2File { /// Evict the least-recently-used L2 cache entry. fn evict_l2_entry(&mut self) -> Result<()> { - let victim = self - .l2_cache - .iter() - .min_by_key(|(_, e)| e.last_access) - .map(|(k, _)| *k); + let victim = self.l2_cache.iter().min_by_key(|(_, e)| e.last_access).map(|(k, _)| *k); if let Some(idx) = victim { // Flush if dirty. @@ -943,9 +921,7 @@ impl Qcow2File { #[allow(dead_code)] pub fn refcount_increment(&mut self, cluster_offset: u64) -> Result { let current = self.get_refcount(cluster_offset)?; - let new_rc = current - .checked_add(1) - .context("refcount overflow")?; + let new_rc = current.checked_add(1).context("refcount overflow")?; self.set_refcount(cluster_offset, new_rc)?; Ok(new_rc) } @@ -954,7 +930,10 @@ impl Qcow2File { #[allow(dead_code)] pub fn refcount_decrement(&mut self, cluster_offset: u64) -> Result { let current = self.get_refcount(cluster_offset)?; - ensure!(current > 0, "cannot decrement refcount below 0 for cluster at offset {cluster_offset}"); + ensure!( + current > 0, + "cannot decrement refcount below 0 for cluster at offset {cluster_offset}" + ); let new_rc = current - 1; self.set_refcount(cluster_offset, new_rc)?; Ok(new_rc) @@ -981,8 +960,7 @@ impl Qcow2File { /// Write the L1 table to disk. fn write_l1_table(&mut self) -> Result<()> { - self.file - .seek(SeekFrom::Start(self.header.l1_table_offset))?; + self.file.seek(SeekFrom::Start(self.header.l1_table_offset))?; for &entry in &self.l1_table { self.file.write_all(&entry.to_be_bytes())?; } @@ -1015,8 +993,7 @@ impl Qcow2File { self.header.write_to(&mut self.file)?; } - self.file - .seek(SeekFrom::Start(self.header.refcount_table_offset))?; + self.file.seek(SeekFrom::Start(self.header.refcount_table_offset))?; for &entry in &self.refcount_table { self.file.write_all(&entry.to_be_bytes())?; } @@ -1051,7 +1028,7 @@ fn align_up(value: u64, align: u64) -> u64 { /// Integer division, rounding up. fn div_round_up(a: u64, b: u64) -> u64 { - (a + b - 1) / b + a.div_ceil(b) } /// Resolve a backing file path relative to the directory containing the overlay image. @@ -1060,9 +1037,7 @@ fn resolve_backing_path(overlay_path: &Path, backing_name: &str) -> Result Result Result { // Try to detect format by reading magic. - let mut f = File::open(path) - .with_context(|| format!("opening backing file {}", path.display()))?; + let mut f = File::open(path).with_context(|| format!("opening backing file {}", path.display()))?; let mut magic = [0u8; 4]; let is_qcow2 = f.read_exact(&mut magic).is_ok() && magic == *b"QFI\xfb"; drop(f); @@ -1095,10 +1069,7 @@ mod tests { use super::*; /// Helper: create a QCOW2 image in a temp directory and return (Qcow2File, tempdir). - fn create_test_image( - virtual_size: u64, - cluster_bits: u32, - ) -> (Qcow2File, tempfile::TempDir) { + fn create_test_image(virtual_size: u64, cluster_bits: u32) -> (Qcow2File, tempfile::TempDir) { let dir = tempfile::tempdir().unwrap(); let path = dir.path().join("test.qcow2"); let q = Qcow2File::create(&path, virtual_size, cluster_bits, None).unwrap(); @@ -1209,9 +1180,7 @@ mod tests { let (mut q, _dir) = create_test_image(virtual_size, cluster_bits); // Write a large block spanning many clusters. - let data: Vec = (0..cluster_size as usize * 10) - .map(|i| (i % 256) as u8) - .collect(); + let data: Vec = (0..cluster_size as usize * 10).map(|i| (i % 256) as u8).collect(); q.write_at(0, &data).unwrap(); let mut buf = vec![0u8; data.len()]; @@ -1312,13 +1281,7 @@ mod tests { let overlay_path = dir.path().join("overlay.qcow2"); let cluster_bits = 12u32; let virtual_size = 8192u64; - let mut q = Qcow2File::create( - &overlay_path, - virtual_size, - cluster_bits, - Some(&backing_path), - ) - .unwrap(); + let mut q = Qcow2File::create(&overlay_path, virtual_size, cluster_bits, Some(&backing_path)).unwrap(); // Reading unallocated clusters should return backing file data. let mut buf = vec![0u8; 100]; @@ -1355,13 +1318,7 @@ mod tests { // Create overlay referencing base. let overlay_path = dir.path().join("overlay.qcow2"); - let mut overlay = Qcow2File::create( - &overlay_path, - virtual_size, - cluster_bits, - Some(&base_path), - ) - .unwrap(); + let mut overlay = Qcow2File::create(&overlay_path, virtual_size, cluster_bits, Some(&base_path)).unwrap(); // Read unallocated from overlay → should get base data. let mut buf = vec![0u8; 16]; diff --git a/src/virtio/balloon.rs b/src/virtio/balloon.rs index 391423b..3510d9f 100644 --- a/src/virtio/balloon.rs +++ b/src/virtio/balloon.rs @@ -116,11 +116,7 @@ impl VirtioBalloon { if old != num_pages { self.config.num_pages.store(num_pages, Ordering::Release); self.config_interrupt_pending = true; - tracing::info!( - old_pages = old, - new_pages = num_pages, - "balloon target updated" - ); + tracing::info!(old_pages = old, new_pages = num_pages, "balloon target updated"); } } @@ -140,16 +136,16 @@ impl VirtioBalloon { continue; } - // For VMs > 3GB, memory is split around the MMIO hole (3-4GB). - // Guest addresses above 4GB map to host mmap offset 3GB+. - let host_offset = if guest_addr >= MMIO_HOLE_END { - MMIO_HOLE_START + (guest_addr - MMIO_HOLE_END) - } else { - guest_addr - }; - #[cfg(target_os = "linux")] { + // For VMs > 3GB, memory is split around the MMIO hole (3-4GB). + // Guest addresses above 4GB map to host mmap offset 3GB+. + let host_offset = if guest_addr >= MMIO_HOLE_END { + MMIO_HOLE_START + (guest_addr - MMIO_HOLE_END) + } else { + guest_addr + }; + // SAFETY: host_offset is within the mmap region after MMIO hole translation. let ret = unsafe { libc::madvise( @@ -257,12 +253,8 @@ impl VirtioDevice for VirtioBalloon { // Read current config, overlay the write, store back. let mut config_bytes = [0u8; 8]; - config_bytes[0..4].copy_from_slice( - &self.config.num_pages.load(Ordering::Relaxed).to_le_bytes(), - ); - config_bytes[4..8].copy_from_slice( - &self.config.actual.load(Ordering::Relaxed).to_le_bytes(), - ); + config_bytes[0..4].copy_from_slice(&self.config.num_pages.load(Ordering::Relaxed).to_le_bytes()); + config_bytes[4..8].copy_from_slice(&self.config.actual.load(Ordering::Relaxed).to_le_bytes()); let end = std::cmp::min(offset + data.len(), 8); config_bytes[offset..end].copy_from_slice(&data[..end - offset]); @@ -296,12 +288,7 @@ impl VirtioDevice for VirtioBalloon { Ok(()) } - fn process_descriptor_chain( - &mut self, - queue_index: u16, - chain: &DescriptorChain, - vq: &Virtqueue, - ) -> u32 { + fn process_descriptor_chain(&mut self, queue_index: u16, chain: &DescriptorChain, vq: &Virtqueue) -> u32 { // Balloon descriptors contain arrays of u32 PFNs. // All descriptors in the chain are readable (guest provides PFN data). let mut pfns = Vec::new(); @@ -316,9 +303,7 @@ impl VirtioDevice for VirtioBalloon { let count = data.len() / 4; for i in 0..count { let off = i * 4; - let pfn = u32::from_le_bytes([ - data[off], data[off + 1], data[off + 2], data[off + 3], - ]); + let pfn = u32::from_le_bytes([data[off], data[off + 1], data[off + 2], data[off + 3]]); pfns.push(pfn); } } @@ -337,7 +322,9 @@ impl VirtioDevice for VirtioBalloon { 0 // Balloon doesn't write data back to descriptors } - fn as_any_mut(&mut self) -> &mut dyn std::any::Any { self } + fn as_any_mut(&mut self) -> &mut dyn std::any::Any { + self + } fn reset(&mut self) { self.config.num_pages.store(0, Ordering::Release); self.config.actual.store(0, Ordering::Release); @@ -357,10 +344,14 @@ impl VirtioDevice for VirtioBalloon { } fn restore_state(&mut self, data: &[u8]) -> anyhow::Result<()> { - if data.is_empty() { return Ok(()); } + if data.is_empty() { + return Ok(()); + } let state: serde_json::Value = serde_json::from_slice(data)?; if let Some(v) = state.get("num_pages").and_then(|v| v.as_u64()) { - self.config.num_pages.store(v as u32, std::sync::atomic::Ordering::Relaxed); + self.config + .num_pages + .store(v as u32, std::sync::atomic::Ordering::Relaxed); } if let Some(v) = state.get("actual").and_then(|v| v.as_u64()) { self.config.actual.store(v as u32, std::sync::atomic::Ordering::Relaxed); diff --git a/src/virtio/block.rs b/src/virtio/block.rs index 3489bd6..443e20d 100644 --- a/src/virtio/block.rs +++ b/src/virtio/block.rs @@ -79,7 +79,7 @@ enum BlockBackend { /// Raw disk image — direct seek/read/write. Raw(File), /// QCOW2 disk image — translated through L1/L2 tables. - Qcow2(Qcow2File), + Qcow2(Box), } /// A virtio-block device backed by a disk image file. @@ -115,13 +115,7 @@ impl VirtioBlock { /// Create a new virtio-block device from an already-opened raw file. /// /// `capacity_sectors` is the disk size in 512-byte sectors. - pub fn new( - file: File, - path: PathBuf, - format: DiskFormat, - readonly: bool, - capacity_sectors: u64, - ) -> Self { + pub fn new(file: File, path: PathBuf, format: DiskFormat, readonly: bool, capacity_sectors: u64) -> Self { Self { backend: BlockBackend::Raw(file), path, @@ -159,7 +153,7 @@ impl VirtioBlock { ); Ok(Self { - backend: BlockBackend::Qcow2(qcow2), + backend: BlockBackend::Qcow2(Box::new(qcow2)), path, format: disk.format, readonly, @@ -192,12 +186,7 @@ impl VirtioBlock { /// In the full MMIO wiring, the transport layer parses the virtio_blk_req /// header from the descriptor chain and calls these methods. We expose /// them publicly so the transport can drive I/O. - pub fn process_request( - &mut self, - request_type: u32, - sector: u64, - data: &mut [u8], - ) -> u8 { + pub fn process_request(&mut self, request_type: u32, sector: u64, data: &mut [u8]) -> u8 { match request_type { VIRTIO_BLK_T_IN => self.do_read(sector, data), VIRTIO_BLK_T_OUT => self.do_write(sector, data), @@ -429,12 +418,7 @@ impl VirtioDevice for VirtioBlock { } } - fn process_descriptor_chain( - &mut self, - _queue_index: u16, - chain: &DescriptorChain, - vq: &Virtqueue, - ) -> u32 { + fn process_descriptor_chain(&mut self, _queue_index: u16, chain: &DescriptorChain, vq: &Virtqueue) -> u32 { // A virtio-block request is: // Descriptor 0: readable — virtio_blk_req header (type: u32, reserved: u32, sector: u64) // Descriptor 1..N-1: data buffer(s) — readable for writes, writable for reads @@ -459,13 +443,17 @@ impl VirtioDevice for VirtioBlock { } }; - let request_type = u32::from_le_bytes([ - header_data[0], header_data[1], header_data[2], header_data[3], - ]); + let request_type = u32::from_le_bytes([header_data[0], header_data[1], header_data[2], header_data[3]]); // bytes 4-7: reserved let sector = u64::from_le_bytes([ - header_data[8], header_data[9], header_data[10], header_data[11], - header_data[12], header_data[13], header_data[14], header_data[15], + header_data[8], + header_data[9], + header_data[10], + header_data[11], + header_data[12], + header_data[13], + header_data[14], + header_data[15], ]); let mut total_written: u32 = 0; @@ -576,7 +564,9 @@ impl VirtioDevice for VirtioBlock { total_written } - fn as_any_mut(&mut self) -> &mut dyn std::any::Any { self } + fn as_any_mut(&mut self) -> &mut dyn std::any::Any { + self + } fn reset(&mut self) { self.acked_features_low = 0; self.acked_features_high = 0; @@ -595,7 +585,9 @@ impl VirtioDevice for VirtioBlock { } fn restore_state(&mut self, data: &[u8]) -> anyhow::Result<()> { - if data.is_empty() { return Ok(()); } + if data.is_empty() { + return Ok(()); + } let state: serde_json::Value = serde_json::from_slice(data)?; if let Some(v) = state.get("acked_features_low").and_then(|v| v.as_u64()) { self.acked_features_low = v as u32; diff --git a/src/virtio/fs.rs b/src/virtio/fs.rs index 197419b..ee50061 100644 --- a/src/virtio/fs.rs +++ b/src/virtio/fs.rs @@ -389,9 +389,9 @@ struct InodeMap { } impl InodeMap { - fn new(root_dir: &PathBuf) -> Self { + fn new(root_dir: &std::path::Path) -> Self { let mut inodes = HashMap::new(); - inodes.insert(FUSE_ROOT_ID, root_dir.clone()); + inodes.insert(FUSE_ROOT_ID, root_dir.to_path_buf()); Self { next_inode: 2, next_fh: 1, @@ -445,7 +445,7 @@ impl InodeMap { } } - fn rename_path(&mut self, old_path: &PathBuf, new_path: &PathBuf) { + fn rename_path(&mut self, old_path: &std::path::Path, new_path: &std::path::Path) { let mut ino_to_update = None; for (&ino, path) in &self.inodes { if path == old_path { @@ -454,7 +454,7 @@ impl InodeMap { } } if let Some(ino) = ino_to_update { - self.inodes.insert(ino, new_path.clone()); + self.inodes.insert(ino, new_path.to_path_buf()); } } @@ -532,9 +532,7 @@ impl VirtioFs { FUSE_RENAME2 => self.handle_rename2(header, body), FUSE_STATFS => self.handle_statfs(header), FUSE_FLUSH | FUSE_FSYNC | FUSE_FSYNCDIR | FUSE_SYNCFS => self.handle_flush(header), - FUSE_FORGET => { - Vec::new() - } + FUSE_FORGET => Vec::new(), FUSE_BATCH_FORGET => { // No response needed for batch forget Vec::new() @@ -706,8 +704,16 @@ impl VirtioFs { // Handle chown (lchown so symlinks aren't followed) if setattr.valid & (FATTR_UID | FATTR_GID) != 0 { - let uid = if setattr.valid & FATTR_UID != 0 { setattr.uid } else { u32::MAX }; - let gid = if setattr.valid & FATTR_GID != 0 { setattr.gid } else { u32::MAX }; + let uid = if setattr.valid & FATTR_UID != 0 { + setattr.uid + } else { + u32::MAX + }; + let gid = if setattr.valid & FATTR_GID != 0 { + setattr.gid + } else { + u32::MAX + }; unsafe { let c_path = std::ffi::CString::new(path.to_str().unwrap_or("")).unwrap_or_default(); libc::lchown(c_path.as_ptr(), uid, gid); @@ -954,7 +960,7 @@ impl VirtioFs { buf.extend_from_slice(unsafe { as_bytes(&dirent) }); buf.extend_from_slice(name_bytes); let padding = padded_name_len - name_bytes.len(); - buf.extend(std::iter::repeat(0u8).take(padding)); + buf.extend(std::iter::repeat_n(0u8, padding)); } let hdr_size = std::mem::size_of::(); @@ -1064,10 +1070,7 @@ impl VirtioFs { // Create directory with mode use std::os::unix::fs::DirBuilderExt; - if let Err(e) = std::fs::DirBuilder::new() - .mode(mkdir_in.mode) - .create(&child_path) - { + if let Err(e) = std::fs::DirBuilder::new().mode(mkdir_in.mode).create(&child_path) { return self.make_error_response(header.unique, -errno_from_io(&e)); } @@ -1194,14 +1197,21 @@ impl VirtioFs { _padding: 0, _spare: [0; 6], }; - return self.make_response(header.unique, &statfs_out); + self.make_response(header.unique, &statfs_out) } #[cfg(not(target_os = "linux"))] { let statfs_out = FuseStatfsOut { - blocks: 1024 * 1024, bfree: 512 * 1024, bavail: 512 * 1024, - files: 1000000, ffree: 999000, bsize: 4096, namelen: 255, - frsize: 4096, _padding: 0, _spare: [0; 6], + blocks: 1024 * 1024, + bfree: 512 * 1024, + bavail: 512 * 1024, + files: 1000000, + ffree: 999000, + bsize: 4096, + namelen: 255, + frsize: 4096, + _padding: 0, + _spare: [0; 6], }; self.make_response(header.unique, &statfs_out) } @@ -1303,8 +1313,12 @@ impl VirtioFs { let mode = mknod_in.mode; // For regular files (S_IFREG) or mode 0 (default), create with File::create + // Cast needed: libc::S_IFMT type differs by platform (u16 macOS, u32 Linux). + #[allow(clippy::unnecessary_cast)] let file_type = mode & libc::S_IFMT as u32; - if file_type == libc::S_IFREG as u32 || file_type == 0 { + #[allow(clippy::unnecessary_cast)] + let is_regular = file_type == libc::S_IFREG as u32 || file_type == 0; + if is_regular { use std::os::unix::fs::OpenOptionsExt; match std::fs::OpenOptions::new() .write(true) @@ -1323,9 +1337,7 @@ impl VirtioFs { Ok(c) => c, Err(_) => return self.make_error_response(header.unique, -libc::EINVAL), }; - let ret = unsafe { - libc::mknod(c_path.as_ptr(), mode as libc::mode_t, mknod_in.rdev as libc::dev_t) - }; + let ret = unsafe { libc::mknod(c_path.as_ptr(), mode as libc::mode_t, mknod_in.rdev as libc::dev_t) }; if ret < 0 { return self.make_error_response(header.unique, -errno()); } @@ -1438,14 +1450,7 @@ impl VirtioFs { if getxattr_in.size == 0 { // Query the size needed - let ret = unsafe { - libc::lgetxattr( - c_path.as_ptr(), - c_name.as_ptr(), - std::ptr::null_mut(), - 0, - ) - }; + let ret = unsafe { libc::lgetxattr(c_path.as_ptr(), c_name.as_ptr(), std::ptr::null_mut(), 0) }; if ret < 0 { return self.make_error_response(header.unique, -errno()); } @@ -1566,9 +1571,7 @@ impl VirtioFs { if getxattr_in.size == 0 { // Query the size needed - let ret = unsafe { - libc::llistxattr(c_path.as_ptr(), std::ptr::null_mut(), 0) - }; + let ret = unsafe { libc::llistxattr(c_path.as_ptr(), std::ptr::null_mut(), 0) }; if ret < 0 { return self.make_error_response(header.unique, -errno()); } @@ -1580,13 +1583,7 @@ impl VirtioFs { } let mut buf = vec![0u8; getxattr_in.size as usize]; - let ret = unsafe { - libc::llistxattr( - c_path.as_ptr(), - buf.as_mut_ptr() as *mut libc::c_char, - buf.len(), - ) - }; + let ret = unsafe { libc::llistxattr(c_path.as_ptr(), buf.as_mut_ptr() as *mut libc::c_char, buf.len()) }; if ret < 0 { return self.make_error_response(header.unique, -errno()); } @@ -1720,16 +1717,12 @@ impl VirtioFs { use std::os::unix::io::AsRawFd; let fd = file.as_raw_fd(); - let ret = unsafe { - libc::lseek(fd, lseek_in.offset as libc::off_t, lseek_in.whence as i32) - }; + let ret = unsafe { libc::lseek(fd, lseek_in.offset as libc::off_t, lseek_in.whence as i32) }; if ret < 0 { return self.make_error_response(header.unique, -errno()); } - let out = FuseLseekOut { - offset: ret as u64, - }; + let out = FuseLseekOut { offset: ret as u64 }; self.make_response(header.unique, &out) } @@ -1776,14 +1769,16 @@ impl VirtioFs { Ok(c) => c, Err(_) => return self.make_error_response(header.unique, -libc::EINVAL), }; + // Use raw syscall for musl compatibility (musl lacks renameat2 wrapper) let ret = unsafe { - libc::renameat2( + libc::syscall( + libc::SYS_renameat2, libc::AT_FDCWD, c_old.as_ptr(), libc::AT_FDCWD, c_new.as_ptr(), flags, - ) + ) as i32 }; if ret < 0 { return self.make_error_response(header.unique, -errno()); @@ -1867,12 +1862,7 @@ impl VirtioDevice for VirtioFs { Ok(()) } - fn process_descriptor_chain( - &mut self, - _queue_index: u16, - chain: &DescriptorChain, - vq: &Virtqueue, - ) -> u32 { + fn process_descriptor_chain(&mut self, _queue_index: u16, chain: &DescriptorChain, vq: &Virtqueue) -> u32 { // Collect all readable data (FUSE request) let mut request_data = Vec::new(); for desc in &chain.descriptors { @@ -1917,7 +1907,9 @@ impl VirtioDevice for VirtioFs { written as u32 } - fn as_any_mut(&mut self) -> &mut dyn std::any::Any { self } + fn as_any_mut(&mut self) -> &mut dyn std::any::Any { + self + } fn reset(&mut self) { self.activated = false; } @@ -1954,7 +1946,9 @@ fn chown_to_caller(path: &std::path::Path, uid: u32, gid: u32) { #[cfg(target_os = "linux")] { if let Ok(c_path) = std::ffi::CString::new(path.to_str().unwrap_or("")) { - unsafe { libc::lchown(c_path.as_ptr(), uid, gid); } + unsafe { + libc::lchown(c_path.as_ptr(), uid, gid); + } } } #[cfg(not(target_os = "linux"))] @@ -2034,7 +2028,7 @@ mod tests { fs.read_config(0, &mut buf); assert_eq!(&buf[0..4], b"myfs"); assert_eq!(buf[4], 0); // null-padded - // num_request_queues = 1 + // num_request_queues = 1 let nrq = u32::from_le_bytes([buf[36], buf[37], buf[38], buf[39]]); assert_eq!(nrq, 1); } @@ -2129,10 +2123,7 @@ mod tests { // Open a real file let file_path = root.join("handle_test"); std::fs::write(&file_path, b"data").unwrap(); - let file = std::fs::OpenOptions::new() - .read(true) - .open(&file_path) - .unwrap(); + let file = std::fs::OpenOptions::new().read(true).open(&file_path).unwrap(); let fh = map.open_file(file); assert!(fh >= 1); diff --git a/src/virtio/mmio.rs b/src/virtio/mmio.rs index 7022117..2ef72ad 100644 --- a/src/virtio/mmio.rs +++ b/src/virtio/mmio.rs @@ -4,10 +4,10 @@ //! The guest reads/writes to MMIO offsets and this module translates //! them into calls on the underlying VirtioDevice trait. -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; -use std::sync::Arc; use std::sync::atomic::AtomicU32; +use std::sync::Arc; use crate::virtio::queue::Virtqueue; use crate::virtio::{status, VirtioDevice, MMIO_BASE, MMIO_STRIDE}; @@ -137,12 +137,7 @@ impl MmioTransport { /// /// The guest memory pointer and size are passed to the virtqueues so /// they can read/write descriptor chains in guest memory. - pub fn new_with_mem( - device: Box, - irq: u32, - guest_mem: *mut u8, - guest_mem_size: u64, - ) -> Self { + pub fn new_with_mem(device: Box, irq: u32, guest_mem: *mut u8, guest_mem_size: u64) -> Self { let queue_max_sizes = device.queue_max_sizes().to_vec(); let queues: Vec = queue_max_sizes .iter() @@ -183,7 +178,13 @@ impl MmioTransport { } /// Set guest memory with MMIO hole info for large VMs. - pub fn set_guest_memory_with_hole(&mut self, guest_mem: *mut u8, guest_mem_size: u64, hole_start: u64, hole_end: u64) { + pub fn set_guest_memory_with_hole( + &mut self, + guest_mem: *mut u8, + guest_mem_size: u64, + hole_start: u64, + hole_end: u64, + ) { self.guest_mem = guest_mem; self.guest_mem_size = guest_mem_size; if hole_start > 0 { @@ -261,8 +262,7 @@ impl MmioTransport { // All standard registers are 32-bit aligned reads. // Config space can be byte-granularity. if offset >= reg::CONFIG_SPACE { - self.device - .read_config(offset - reg::CONFIG_SPACE, data); + self.device.read_config(offset - reg::CONFIG_SPACE, data); return; } @@ -279,16 +279,8 @@ impl MmioTransport { reg::DEVICE_ID => self.device.device_type() as u32, reg::VENDOR_ID => VENDOR_ID, reg::DEVICE_FEATURES => self.device.features(self.device_features_sel), - reg::QUEUE_NUM_MAX => { - self.current_queue() - .map(|q| q.max_size as u32) - .unwrap_or(0) - } - reg::QUEUE_READY => { - self.current_queue() - .map(|q| u32::from(q.ready)) - .unwrap_or(0) - } + reg::QUEUE_NUM_MAX => self.current_queue().map(|q| q.max_size as u32).unwrap_or(0), + reg::QUEUE_READY => self.current_queue().map(|q| u32::from(q.ready)).unwrap_or(0), reg::INTERRUPT_STATUS => { // Merge transport interrupt_status with vhost interrupt bits. // @@ -323,8 +315,7 @@ impl MmioTransport { pub fn write(&mut self, offset: u64, data: &[u8]) { // Config space writes can be byte-granularity. if offset >= reg::CONFIG_SPACE { - self.device - .write_config(offset - reg::CONFIG_SPACE, data); + self.device.write_config(offset - reg::CONFIG_SPACE, data); return; } @@ -341,13 +332,10 @@ impl MmioTransport { self.device_features_sel = val; } reg::DRIVER_FEATURES => { - if self.device_status & status::FEATURES_OK == 0 - && self.device_status & status::DRIVER != 0 - { + if self.device_status & status::FEATURES_OK == 0 && self.device_status & status::DRIVER != 0 { // Driver is setting features before FEATURES_OK self.driver_features[self.driver_features_sel as usize & 1] = val; - self.device - .ack_features(self.driver_features_sel, val); + self.device.ack_features(self.driver_features_sel, val); } } reg::DRIVER_FEATURES_SEL => { @@ -373,9 +361,7 @@ impl MmioTransport { q.ready = val == 1; } // Sync the virtqueue state - if let (Some(qs), Some(vq)) = - (self.queues.get(sel), self.virtqueues.get_mut(sel)) - { + if let (Some(qs), Some(vq)) = (self.queues.get(sel), self.virtqueues.get_mut(sel)) { if val == 1 { vq.configure(qs.desc_addr, qs.avail_addr, qs.used_addr); vq.set_ready(true); @@ -396,7 +382,8 @@ impl MmioTransport { reg::INTERRUPT_ACK => { self.interrupt_status &= !val; // Also clear the bits in the vhost interrupt atomic - self.vhost_interrupt.fetch_and(!val, std::sync::atomic::Ordering::Release); + self.vhost_interrupt + .fetch_and(!val, std::sync::atomic::Ordering::Release); } reg::STATUS => { self.handle_status_write(val); @@ -461,10 +448,7 @@ impl MmioTransport { return; } - let mut vq = std::mem::replace( - &mut self.virtqueues[qi], - Virtqueue::new(0, std::ptr::null_mut(), 0), - ); + let mut vq = std::mem::replace(&mut self.virtqueues[qi], Virtqueue::new(0, std::ptr::null_mut(), 0)); if !vq.is_ready() { self.virtqueues[qi] = vq; @@ -472,7 +456,7 @@ impl MmioTransport { } let mut raised_interrupt = false; - let is_fs = self.device.device_type() == crate::virtio::DeviceType::Fs; + let _is_fs = self.device.device_type() == crate::virtio::DeviceType::Fs; while let Some(chain) = vq.pop_avail() { let bytes_written = self.device.process_descriptor_chain(queue_idx, &chain, &vq); @@ -525,7 +509,7 @@ impl MmioTransport { // virtio_net_hdr_v1 (12 bytes, all zeros = no offload) let hdr = [0u8; 12]; - let total_len = hdr.len() + frame.len(); + let _total_len = hdr.len() + frame.len(); let mut written = 0usize; let mut src_offset = 0usize; @@ -595,15 +579,18 @@ impl MmioTransport { // If the device was activated in the snapshot, re-activate it now. // This sets up vhost backends (vsock, net) with the restored queue addresses. if state.activated && !self.activated { - let queue_infos: Vec = self.queues.iter().map(|q| { - crate::virtio::QueueInfo { + let queue_infos: Vec = self + .queues + .iter() + .map(|q| crate::virtio::QueueInfo { size: q.size, desc_addr: q.desc_addr, avail_addr: q.avail_addr, used_addr: q.used_addr, - } - }).collect(); - self.device.prepare_activate(&queue_infos, self.guest_mem, self.guest_mem_size); + }) + .collect(); + self.device + .prepare_activate(&queue_infos, self.guest_mem, self.guest_mem_size); match self.device.activate() { Ok(()) => { self.activated = true; @@ -652,15 +639,18 @@ impl MmioTransport { // If driver sets DRIVER_OK and we haven't activated yet, do so. if new_bits & status::DRIVER_OK != 0 && !self.activated { // Pass queue configuration and guest memory info to the device - let queue_infos: Vec = self.queues.iter().map(|q| { - crate::virtio::QueueInfo { + let queue_infos: Vec = self + .queues + .iter() + .map(|q| crate::virtio::QueueInfo { size: q.size, desc_addr: q.desc_addr, avail_addr: q.avail_addr, used_addr: q.used_addr, - } - }).collect(); - self.device.prepare_activate(&queue_infos, self.guest_mem, self.guest_mem_size); + }) + .collect(); + self.device + .prepare_activate(&queue_infos, self.guest_mem, self.guest_mem_size); match self.device.activate() { Ok(()) => { @@ -723,7 +713,13 @@ impl MmioBus { } /// Set guest memory with MMIO hole info. - pub fn set_guest_memory_with_hole(&mut self, guest_mem: *mut u8, guest_mem_size: u64, hole_start: u64, hole_end: u64) { + pub fn set_guest_memory_with_hole( + &mut self, + guest_mem: *mut u8, + guest_mem_size: u64, + hole_start: u64, + hole_end: u64, + ) { self.guest_mem = guest_mem; self.guest_mem_size = guest_mem_size; for transport in &mut self.devices { @@ -738,10 +734,10 @@ impl MmioBus { let base = MMIO_BASE + (index as u64) * MMIO_STRIDE; let mut irq = crate::virtio::IRQ_BASE + index as u32; // Skip IRQ 8 (RTC on x86) to avoid genirq conflict - if irq >= 8 { irq += 1; } - let transport = MmioTransport::new_with_mem( - device, irq, self.guest_mem, self.guest_mem_size, - ); + if irq >= 8 { + irq += 1; + } + let transport = MmioTransport::new_with_mem(device, irq, self.guest_mem, self.guest_mem_size); self.devices.push(transport); tracing::info!( "Registered virtio {:?} at MMIO {base:#x}, IRQ {irq}", @@ -867,9 +863,7 @@ impl MmioBus { } let ring_idx = (used_idx % eq.size) as usize; - let desc_idx: u16 = unsafe { - *(gpa_to_hva(eq.avail_addr + 4 + ring_idx as u64 * 2) as *const u16) - }; + let desc_idx: u16 = unsafe { *(gpa_to_hva(eq.avail_addr + 4 + ring_idx as u64 * 2) as *const u16) }; let desc_gpa = eq.desc_addr + desc_idx as u64 * 16; let buf_gpa: u64 = unsafe { *(gpa_to_hva(desc_gpa) as *const u64) }; @@ -881,7 +875,9 @@ impl MmioBus { } // Write VIRTIO_VSOCK_EVENT_TRANSPORT_RESET (id = 0) - unsafe { *(gpa_to_hva(buf_gpa) as *mut u32) = 0u32.to_le(); } + unsafe { + *(gpa_to_hva(buf_gpa) as *mut u32) = 0u32.to_le(); + } // Update used ring entry let used_entry_gpa = eq.used_addr + 4 + ring_idx as u64 * 8; @@ -896,7 +892,9 @@ impl MmioBus { } // Set interrupt status so guest ISR processes the event - transport.vhost_interrupt.fetch_or(1, std::sync::atomic::Ordering::Release); + transport + .vhost_interrupt + .fetch_or(1, std::sync::atomic::Ordering::Release); tracing::info!("Injected vsock transport reset into event virtqueue"); return; @@ -1019,7 +1017,9 @@ mod tests { Ok(()) } - fn as_any_mut(&mut self) -> &mut dyn std::any::Any { self } + fn as_any_mut(&mut self) -> &mut dyn std::any::Any { + self + } fn reset(&mut self) { self.reset_count += 1; self.activated = false; @@ -1087,10 +1087,7 @@ mod tests { // ACKNOWLEDGE | DRIVER write_u32(&mut transport, reg::STATUS, status::ACKNOWLEDGE | status::DRIVER); - assert_eq!( - read_u32(&transport, reg::STATUS), - status::ACKNOWLEDGE | status::DRIVER - ); + assert_eq!(read_u32(&transport, reg::STATUS), status::ACKNOWLEDGE | status::DRIVER); // ACKNOWLEDGE | DRIVER | FEATURES_OK write_u32( @@ -1397,10 +1394,7 @@ mod tests { // Verify via read let mut buf = [0u8; 4]; bus.handle_read(MMIO_BASE + reg::STATUS, &mut buf); - assert_eq!( - u32::from_le_bytes(buf), - status::ACKNOWLEDGE | status::DRIVER - ); + assert_eq!(u32::from_le_bytes(buf), status::ACKNOWLEDGE | status::DRIVER); } #[test] diff --git a/src/virtio/mod.rs b/src/virtio/mod.rs index a353df8..a9f66f0 100644 --- a/src/virtio/mod.rs +++ b/src/virtio/mod.rs @@ -128,14 +128,17 @@ pub trait VirtioDevice: Send { /// Downcast to concrete type for device-specific configuration. fn as_any_mut(&mut self) -> &mut dyn std::any::Any; - /// Snapshot the device-specific state as an opaque byte vector. /// /// The default implementation returns an empty vector (no state to save). - fn snapshot_state(&self) -> Vec { Vec::new() } + fn snapshot_state(&self) -> Vec { + Vec::new() + } /// Restore device-specific state from a previously-snapshotted byte vector. /// /// The default implementation accepts any input and does nothing. - fn restore_state(&mut self, _data: &[u8]) -> anyhow::Result<()> { Ok(()) } + fn restore_state(&mut self, _data: &[u8]) -> anyhow::Result<()> { + Ok(()) + } } diff --git a/src/virtio/net.rs b/src/virtio/net.rs index b640845..a5cbe69 100644 --- a/src/virtio/net.rs +++ b/src/virtio/net.rs @@ -10,7 +10,7 @@ use std::os::unix::io::RawFd; -use super::queue::{DescriptorChain, Virtqueue, VRING_DESC_F_WRITE, VRING_DESC_F_NEXT}; +use super::queue::{DescriptorChain, Virtqueue, VRING_DESC_F_NEXT, VRING_DESC_F_WRITE}; use super::{DeviceType, QueueInfo, VirtioDevice}; // --- Feature bits (virtio spec 5.1.3) --- @@ -44,16 +44,18 @@ const VIRTIO_NET_HDR_SIZE: usize = 12; // --- vhost ioctl numbers (Linux) --- #[cfg(target_os = "linux")] mod vhost { - pub const SET_OWNER: libc::c_ulong = 0xAF01; - pub const GET_FEATURES: libc::c_ulong = 0x8008_AF00; - pub const SET_FEATURES: libc::c_ulong = 0x4008_AF00; - pub const SET_MEM_TABLE: libc::c_ulong = 0x4008_AF03; - pub const SET_VRING_NUM: libc::c_ulong = 0x4008_AF10; - pub const SET_VRING_ADDR: libc::c_ulong = 0x4028_AF11; - pub const SET_VRING_BASE: libc::c_ulong = 0x4008_AF12; - pub const SET_VRING_KICK: libc::c_ulong = 0x4008_AF20; - pub const SET_VRING_CALL: libc::c_ulong = 0x4008_AF21; - pub const NET_SET_BACKEND: libc::c_ulong = 0x4008_AF30; + use crate::compat::IoctlReq as Ioctl; + + pub const SET_OWNER: Ioctl = 0xAF01u32 as Ioctl; + pub const GET_FEATURES: Ioctl = 0x8008_AF00u32 as Ioctl; + pub const SET_FEATURES: Ioctl = 0x4008_AF00u32 as Ioctl; + pub const SET_MEM_TABLE: Ioctl = 0x4008_AF03u32 as Ioctl; + pub const SET_VRING_NUM: Ioctl = 0x4008_AF10u32 as Ioctl; + pub const SET_VRING_ADDR: Ioctl = 0x4028_AF11u32 as Ioctl; + pub const SET_VRING_BASE: Ioctl = 0x4008_AF12u32 as Ioctl; + pub const SET_VRING_KICK: Ioctl = 0x4008_AF20u32 as Ioctl; + pub const SET_VRING_CALL: Ioctl = 0x4008_AF21u32 as Ioctl; + pub const NET_SET_BACKEND: Ioctl = 0x4008_AF30u32 as Ioctl; #[repr(C)] pub struct VringState { @@ -124,7 +126,6 @@ pub struct VirtioNet { activated: bool, // --- vhost-net state --- - /// KVM VM file descriptor (raw, borrowed — not owned). vm_fd: RawFd, /// IRQ number assigned by the MMIO bus. @@ -212,14 +213,24 @@ impl VirtioNet { /// stale TAP for a fresh one and switch to userspace net processing. pub fn set_tap_fd(&mut self, fd: RawFd) { if self.vhost_fd >= 0 { - unsafe { libc::close(self.vhost_fd); } + unsafe { + libc::close(self.vhost_fd); + } self.vhost_fd = -1; } for &kfd in &self.kick_fds { - if kfd >= 0 { unsafe { libc::close(kfd); } } + if kfd >= 0 { + unsafe { + libc::close(kfd); + } + } } for &cfd in &self.call_fds { - if cfd >= 0 { unsafe { libc::close(cfd); } } + if cfd >= 0 { + unsafe { + libc::close(cfd); + } + } } self.kick_fds = [-1; 2]; self.call_fds = [-1; 2]; @@ -272,28 +283,19 @@ impl VirtioNet { // 3. VHOST_GET_FEATURES then SET intersection with driver-negotiated features let mut vhost_features: u64 = 0; - if unsafe { - libc::ioctl( - vhost_fd, - vhost::GET_FEATURES, - &mut vhost_features as *mut u64, - ) - } < 0 - { + if unsafe { libc::ioctl(vhost_fd, vhost::GET_FEATURES, &mut vhost_features as *mut u64) } < 0 { return Err(anyhow::anyhow!( "VHOST_GET_FEATURES failed: {}", std::io::Error::last_os_error() )); } - let driver_features: u64 = - (self.acked_features_low as u64) | ((self.acked_features_high as u64) << 32); + let driver_features: u64 = (self.acked_features_low as u64) | ((self.acked_features_high as u64) << 32); // VHOST_NET_F_VIRTIO_NET_HDR (bit 27) is a vhost-specific feature // (not guest-visible) that tells vhost-net to prepend/strip the // virtio_net_hdr in RX/TX buffers. Always request it if supported. const VHOST_NET_F_VIRTIO_NET_HDR: u64 = 1 << 27; - let features = - (driver_features & vhost_features) | (vhost_features & VHOST_NET_F_VIRTIO_NET_HDR); + let features = (driver_features & vhost_features) | (vhost_features & VHOST_NET_F_VIRTIO_NET_HDR); tracing::info!( "vhost-net: features vhost={:#x} driver={:#x} negotiated={:#x}", vhost_features, @@ -341,10 +343,7 @@ impl VirtioNet { flags_padding: 0, }], }; - unsafe { libc::ioctl(vhost_fd, - vhost::SET_MEM_TABLE, - &mem_table as *const vhost::Memory, - ) } + unsafe { libc::ioctl(vhost_fd, vhost::SET_MEM_TABLE, &mem_table as *const vhost::Memory) } }; if ret < 0 { return Err(anyhow::anyhow!( @@ -354,14 +353,13 @@ impl VirtioNet { } // 5. Set up each queue (RX=0, TX=1) - let guest_mem_base = self.guest_mem as u64; + let _guest_mem_base = self.guest_mem as u64; for qi in 0..2u32 { let qc = &self.queue_configs[qi as usize]; // Create kick eventfd (VMM signals vhost when guest kicks queue) - let kick_fd = - unsafe { libc::eventfd(0, libc::EFD_CLOEXEC | libc::EFD_NONBLOCK) }; + let kick_fd = unsafe { libc::eventfd(0, libc::EFD_CLOEXEC | libc::EFD_NONBLOCK) }; if kick_fd < 0 { return Err(anyhow::anyhow!( "eventfd(kick) failed: {}", @@ -423,10 +421,7 @@ impl VirtioNet { } // VHOST_SET_VRING_KICK - let kick = vhost::VringFile { - index: qi, - fd: kick_fd, - }; + let kick = vhost::VringFile { index: qi, fd: kick_fd }; if unsafe { libc::ioctl(vhost_fd, vhost::SET_VRING_KICK, &kick) } < 0 { return Err(anyhow::anyhow!( "VHOST_SET_VRING_KICK(q={qi}) failed: {}", @@ -435,10 +430,7 @@ impl VirtioNet { } // VHOST_SET_VRING_CALL - let call = vhost::VringFile { - index: qi, - fd: call_fd, - }; + let call = vhost::VringFile { index: qi, fd: call_fd }; if unsafe { libc::ioctl(vhost_fd, vhost::SET_VRING_CALL, &call) } < 0 { return Err(anyhow::anyhow!( "VHOST_SET_VRING_CALL(q={qi}) failed: {}", @@ -485,11 +477,7 @@ impl VirtioNet { if qi < 2 && self.kick_fds[qi] >= 0 { let val: u64 = 1; unsafe { - libc::write( - self.kick_fds[qi], - &val as *const u64 as *const libc::c_void, - 8, - ); + libc::write(self.kick_fds[qi], &val as *const u64 as *const libc::c_void, 8); } } } @@ -506,14 +494,20 @@ impl VirtioNet { /// Read an available RX descriptor index from the avail ring. fn rx_pop_avail(&self) -> Option<(u16, u16)> { - if self.guest_mem.is_null() || self.queue_configs.is_empty() { return None; } + if self.guest_mem.is_null() || self.queue_configs.is_empty() { + return None; + } let qc = &self.queue_configs[RX_QUEUE as usize]; - if qc.avail_addr == 0 { return None; } + if qc.avail_addr == 0 { + return None; + } let avail_idx: u16 = unsafe { *(self.gpa_to_ptr(qc.avail_addr + 2) as *const u16) }; let last = unsafe { *(self.gpa_to_ptr(qc.used_addr + 2) as *const u16) }; // used_idx = our "last consumed" - // Use a simple approach: use used_idx as our last_avail tracker - // (we always consume and immediately push to used) - if avail_idx == last { return None; } + // Use a simple approach: use used_idx as our last_avail tracker + // (we always consume and immediately push to used) + if avail_idx == last { + return None; + } let ring_idx = (last % qc.size) as u64; let desc_idx: u16 = unsafe { *(self.gpa_to_ptr(qc.avail_addr + 4 + ring_idx * 2) as *const u16) }; Some((desc_idx, last)) @@ -539,27 +533,33 @@ impl VirtioNet { let qc = &self.queue_configs[RX_QUEUE as usize]; let ptr = self.gpa_to_ptr(qc.desc_addr + idx as u64 * 16); unsafe { - (*(ptr as *const u64), *(ptr.add(8) as *const u32), - *(ptr.add(12) as *const u16), *(ptr.add(14) as *const u16)) + ( + *(ptr as *const u64), + *(ptr.add(8) as *const u32), + *(ptr.add(12) as *const u16), + *(ptr.add(14) as *const u16), + ) } } /// Process one incoming TAP packet: read from TAP, write to RX virtqueue. /// Returns true if a packet was delivered. pub fn process_rx_from_tap(&mut self) -> bool { - let Some((desc_idx, used_idx)) = self.rx_pop_avail() else { return false }; + let Some((desc_idx, used_idx)) = self.rx_pop_avail() else { + return false; + }; // Read packet from TAP (non-blocking) let mut buf = [0u8; 65535]; - let n = unsafe { - libc::read(self.tap_fd, buf.as_mut_ptr() as *mut libc::c_void, buf.len()) - }; - if n <= 0 { return false; } + let n = unsafe { libc::read(self.tap_fd, buf.as_mut_ptr() as *mut libc::c_void, buf.len()) }; + if n <= 0 { + return false; + } let frame = &buf[..n as usize]; // Build virtio-net header (12 bytes of zeros for basic operation) let hdr = [0u8; VIRTIO_NET_HDR_SIZE]; - let total = hdr.len() + frame.len(); + let _total = hdr.len() + frame.len(); // Walk descriptor chain, write header + frame into writable buffers let mut remaining_hdr = &hdr[..]; @@ -569,9 +569,7 @@ impl VirtioNet { loop { let (addr, len, flags, next) = self.rx_read_desc(idx); if flags & VRING_DESC_F_WRITE != 0 { - let buf = unsafe { - std::slice::from_raw_parts_mut(self.gpa_to_ptr(addr), len as usize) - }; + let buf = unsafe { std::slice::from_raw_parts_mut(self.gpa_to_ptr(addr), len as usize) }; let mut pos = 0; // Write header first if !remaining_hdr.is_empty() { @@ -602,18 +600,9 @@ impl VirtioNet { /// Write a raw frame to the TAP fd (userspace fallback path). fn write_tap(&self, frame: &[u8]) -> anyhow::Result { - let n = unsafe { - libc::write( - self.tap_fd, - frame.as_ptr() as *const libc::c_void, - frame.len(), - ) - }; + let n = unsafe { libc::write(self.tap_fd, frame.as_ptr() as *const libc::c_void, frame.len()) }; if n < 0 { - return Err(anyhow::anyhow!( - "TAP write failed: {}", - std::io::Error::last_os_error() - )); + return Err(anyhow::anyhow!("TAP write failed: {}", std::io::Error::last_os_error())); } Ok(n as usize) } @@ -694,10 +683,7 @@ impl VirtioDevice for VirtioNet { } fn write_config(&mut self, offset: u64, data: &[u8]) { - tracing::debug!( - "virtio-net: write_config offset={offset} len={} (ignored)", - data.len() - ); + tracing::debug!("virtio-net: write_config offset={offset} len={} (ignored)", data.len()); } fn prepare_activate(&mut self, queues: &[QueueInfo], guest_mem: *mut u8, mem_size: u64) { @@ -713,9 +699,13 @@ impl VirtioDevice for VirtioNet { // 2. Set avail_event in used ring to match avail_idx (always notify) if !self.is_vhost() && !guest_mem.is_null() { for qc in queues { - if qc.used_addr == 0 || qc.avail_addr == 0 { continue; } + if qc.used_addr == 0 || qc.avail_addr == 0 { + continue; + } // Clear used ring flags - unsafe { *(self.gpa_to_ptr(qc.used_addr) as *mut u16) = 0; } + unsafe { + *(self.gpa_to_ptr(qc.used_addr) as *mut u16) = 0; + } // Read current used_idx and avail_idx let used_idx: u16 = unsafe { *(self.gpa_to_ptr(qc.used_addr + 2) as *const u16) }; let avail_idx: u16 = unsafe { *(self.gpa_to_ptr(qc.avail_addr + 2) as *const u16) }; @@ -723,11 +713,15 @@ impl VirtioDevice for VirtioNet { // so guest thinks we've consumed everything and MUST notify on next // avail ring layout: flags(2) + idx(2) + ring[size](2 each) + used_event(2) let used_event_offset = qc.avail_addr + 4 + qc.size as u64 * 2; - unsafe { *(self.gpa_to_ptr(used_event_offset) as *mut u16) = used_idx; } + unsafe { + *(self.gpa_to_ptr(used_event_offset) as *mut u16) = used_idx; + } // Set avail_event (in used ring, after the ring entries) to avail_idx // used ring layout: flags(2) + idx(2) + ring[size](8 each) + avail_event(2) let avail_event_offset = qc.used_addr + 4 + qc.size as u64 * 8; - unsafe { *(self.gpa_to_ptr(avail_event_offset) as *mut u16) = avail_idx; } + unsafe { + *(self.gpa_to_ptr(avail_event_offset) as *mut u16) = avail_idx; + } } } } @@ -789,19 +783,16 @@ impl VirtioDevice for VirtioNet { } } - fn process_descriptor_chain( - &mut self, - queue_index: u16, - chain: &DescriptorChain, - vq: &Virtqueue, - ) -> u32 { + fn process_descriptor_chain(&mut self, queue_index: u16, chain: &DescriptorChain, vq: &Virtqueue) -> u32 { match queue_index { TX_QUEUE => self.process_tx_chain(chain, vq), _ => 0, } } - fn as_any_mut(&mut self) -> &mut dyn std::any::Any { self } + fn as_any_mut(&mut self) -> &mut dyn std::any::Any { + self + } fn reset(&mut self) { self.acked_features_low = 0; self.acked_features_high = 0; @@ -820,7 +811,9 @@ impl VirtioDevice for VirtioNet { } fn restore_state(&mut self, data: &[u8]) -> anyhow::Result<()> { - if data.is_empty() { return Ok(()); } + if data.is_empty() { + return Ok(()); + } let state: serde_json::Value = serde_json::from_slice(data)?; if let Some(v) = state.get("link_up").and_then(|v| v.as_bool()) { self.link_up = v; diff --git a/src/virtio/queue.rs b/src/virtio/queue.rs index 8e0058f..37ef2da 100644 --- a/src/virtio/queue.rs +++ b/src/virtio/queue.rs @@ -105,7 +105,9 @@ impl Virtqueue { /// Mark the queue as ready (or not). /// Sync last_avail_idx from the guest's avail ring (used during restore). pub fn sync_avail_from_guest(&mut self) { - if self.guest_mem.is_null() || self.avail_ring == 0 { return; } + if self.guest_mem.is_null() || self.avail_ring == 0 { + return; + } self.last_avail_idx = self.read_avail_idx(); } @@ -142,14 +144,18 @@ impl Virtqueue { fn gpa_to_offset(&self, gpa: u64) -> Option { if self.hole_start == 0 { // No hole — direct mapping - if gpa >= self.guest_mem_size { return None; } + if gpa >= self.guest_mem_size { + return None; + } return Some(gpa as usize); } if gpa < self.hole_start { Some(gpa as usize) } else if gpa >= self.hole_end { let offset = self.hole_start + (gpa - self.hole_end); - if offset >= self.guest_mem_size { return None; } + if offset >= self.guest_mem_size { + return None; + } Some(offset as usize) } else { None // In the MMIO hole — no backing memory @@ -159,27 +165,26 @@ impl Virtqueue { /// Read bytes from guest memory at the given guest physical address. fn guest_read(&self, gpa: u64, len: u64) -> Option<&[u8]> { let offset = self.gpa_to_offset(gpa)?; - if offset + len as usize > self.guest_mem_size as usize { return None; } - if self.guest_mem.is_null() { return None; } - unsafe { - Some(std::slice::from_raw_parts( - self.guest_mem.add(offset), - len as usize, - )) + if offset + len as usize > self.guest_mem_size as usize { + return None; } + if self.guest_mem.is_null() { + return None; + } + unsafe { Some(std::slice::from_raw_parts(self.guest_mem.add(offset), len as usize)) } } /// Get a mutable slice of guest memory at the given GPA. + #[allow(clippy::mut_from_ref)] fn guest_write(&self, gpa: u64, len: u64) -> Option<&mut [u8]> { let offset = self.gpa_to_offset(gpa)?; - if offset + len as usize > self.guest_mem_size as usize { return None; } - if self.guest_mem.is_null() { return None; } - unsafe { - Some(std::slice::from_raw_parts_mut( - self.guest_mem.add(offset), - len as usize, - )) + if offset + len as usize > self.guest_mem_size as usize { + return None; + } + if self.guest_mem.is_null() { + return None; } + unsafe { Some(std::slice::from_raw_parts_mut(self.guest_mem.add(offset), len as usize)) } } fn read_u16(&self, gpa: u64) -> Option { @@ -195,8 +200,7 @@ impl Virtqueue { fn read_u64(&self, gpa: u64) -> Option { let bytes = self.guest_read(gpa, 8)?; Some(u64::from_le_bytes([ - bytes[0], bytes[1], bytes[2], bytes[3], - bytes[4], bytes[5], bytes[6], bytes[7], + bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7], ])) } @@ -243,9 +247,7 @@ impl Virtqueue { // Read the head descriptor index from avail.ring[last_avail_idx % size] let ring_entry_offset = (self.last_avail_idx % self.size) as u64; - let head_idx = self.read_u16( - self.avail_ring + AVAIL_RING_HEADER + ring_entry_offset * 2, - )?; + let head_idx = self.read_u16(self.avail_ring + AVAIL_RING_HEADER + ring_entry_offset * 2)?; // Walk the descriptor chain let mut descriptors = Vec::new(); @@ -400,10 +402,7 @@ mod tests { fn read_u32(&self, offset: u64) -> u32 { let off = offset as usize; - u32::from_le_bytes([ - self.mem[off], self.mem[off + 1], - self.mem[off + 2], self.mem[off + 3], - ]) + u32::from_le_bytes([self.mem[off], self.mem[off + 1], self.mem[off + 2], self.mem[off + 3]]) } /// Write a descriptor at the given index in the descriptor table. diff --git a/src/virtio/vsock.rs b/src/virtio/vsock.rs index 5d960f8..c8b0417 100644 --- a/src/virtio/vsock.rs +++ b/src/virtio/vsock.rs @@ -62,12 +62,16 @@ struct VsockHdr { impl VsockHdr { fn read_from(data: &[u8]) -> Option { - if data.len() < HDR_SIZE { return None; } + if data.len() < HDR_SIZE { + return None; + } Some(unsafe { std::ptr::read_unaligned(data.as_ptr() as *const Self) }) } fn write_to(&self, data: &mut [u8]) -> usize { - if data.len() < HDR_SIZE { return 0; } + if data.len() < HDR_SIZE { + return 0; + } unsafe { std::ptr::write_unaligned(data.as_mut_ptr() as *mut Self, *self); } @@ -88,7 +92,8 @@ struct VsockConn { impl VsockConn { fn peer_free(&self) -> u32 { - self.peer_buf_alloc.saturating_sub(self.tx_cnt.wrapping_sub(self.peer_fwd_cnt)) + self.peer_buf_alloc + .saturating_sub(self.tx_cnt.wrapping_sub(self.peer_fwd_cnt)) } } @@ -168,12 +173,20 @@ impl VirtioVsock { libc::fcntl(fds[0], libc::F_SETFL, flags | libc::O_NONBLOCK); // Increase socket buffer to avoid data loss on non-blocking writes let buf_size: libc::c_int = 256 * 1024; - libc::setsockopt(fds[0], libc::SOL_SOCKET, libc::SO_SNDBUF, + libc::setsockopt( + fds[0], + libc::SOL_SOCKET, + libc::SO_SNDBUF, &buf_size as *const libc::c_int as *const libc::c_void, - std::mem::size_of::() as libc::socklen_t); - libc::setsockopt(fds[1], libc::SOL_SOCKET, libc::SO_RCVBUF, + std::mem::size_of::() as libc::socklen_t, + ); + libc::setsockopt( + fds[1], + libc::SOL_SOCKET, + libc::SO_RCVBUF, &buf_size as *const libc::c_int as *const libc::c_void, - std::mem::size_of::() as libc::socklen_t); + std::mem::size_of::() as libc::socklen_t, + ); } #[cfg(target_os = "linux")] @@ -246,6 +259,7 @@ impl VirtioVsock { unsafe { std::slice::from_raw_parts(self.gpa_to_ptr(gpa), len as usize) } } + #[allow(clippy::mut_from_ref)] fn guest_slice_mut(&self, gpa: u64, len: u64) -> &mut [u8] { unsafe { std::slice::from_raw_parts_mut(self.gpa_to_ptr(gpa), len as usize) } } @@ -290,7 +304,9 @@ impl VirtioVsock { fn write_used_idx(&self, qi: usize, idx: u16) { let qc = &self.queue_configs[qi]; std::sync::atomic::fence(Ordering::Release); - unsafe { *(self.gpa_to_ptr(qc.used_addr + 2) as *mut u16) = idx; } + unsafe { + *(self.gpa_to_ptr(qc.used_addr + 2) as *mut u16) = idx; + } } // --- Packet building --- @@ -339,7 +355,9 @@ impl VirtioVsock { fn process_tx(&mut self) { let qi = TX_QUEUE as usize; - if qi >= self.queue_configs.len() { return; } + if qi >= self.queue_configs.len() { + return; + } let avail_idx = self.read_avail_idx(qi); while self.last_avail[qi] != avail_idx { @@ -396,8 +414,11 @@ impl VirtioVsock { if self.device_fd >= 0 { let mut drain = [0u8; 4096]; loop { - let n = unsafe { libc::read(self.device_fd, drain.as_mut_ptr() as *mut libc::c_void, drain.len()) }; - if n <= 0 { break; } + let n = + unsafe { libc::read(self.device_fd, drain.as_mut_ptr() as *mut libc::c_void, drain.len()) }; + if n <= 0 { + break; + } } } self.conn = Some(VsockConn { @@ -432,7 +453,9 @@ impl VirtioVsock { payload.len() - written, ) }; - if n <= 0 { break; } + if n <= 0 { + break; + } written += n as usize; } } @@ -455,10 +478,8 @@ impl VirtioVsock { conn.peer_fwd_cnt = u32::from_le(hdr.fwd_cnt); } } - OP_CREDIT_REQUEST => { - if self.conn.is_some() { - self.enqueue_hdr(OP_CREDIT_UPDATE); - } + OP_CREDIT_REQUEST if self.conn.is_some() => { + self.enqueue_hdr(OP_CREDIT_UPDATE); } _ => {} } @@ -468,7 +489,9 @@ impl VirtioVsock { fn process_rx(&mut self) { let qi = RX_QUEUE as usize; - if qi >= self.queue_configs.len() { return; } + if qi >= self.queue_configs.len() { + return; + } // Read any pending data from the agent unix socket. self.read_agent_data(); @@ -514,14 +537,10 @@ impl VirtioVsock { let mut buf = [0u8; 4096]; loop { - let n = unsafe { - libc::read( - self.device_fd, - buf.as_mut_ptr() as *mut libc::c_void, - buf.len(), - ) - }; - if n <= 0 { break; } + let n = unsafe { libc::read(self.device_fd, buf.as_mut_ptr() as *mut libc::c_void, buf.len()) }; + if n <= 0 { + break; + } let data = &buf[..n as usize]; let conn = self.conn.as_ref().unwrap(); @@ -557,8 +576,12 @@ impl VirtioVsock { /// Called during snapshot creation to close guest connections. pub fn send_transport_reset(&mut self) { let qi = _EVENT_QUEUE as usize; - if qi >= self.queue_configs.len() { return; } - if self.guest_mem.is_null() { return; } + if qi >= self.queue_configs.len() { + return; + } + if self.guest_mem.is_null() { + return; + } let avail_idx = self.read_avail_idx(qi); if self.last_avail[qi] == avail_idx { @@ -635,12 +658,8 @@ impl VirtioDevice for VirtioVsock { // Initialize queue indices from guest memory. for (qi, qc) in queues.iter().enumerate() { if qi < NUM_QUEUES && !guest_mem.is_null() && qc.avail_addr != 0 { - let avail_idx = unsafe { - *(self.gpa_to_ptr(qc.avail_addr + 2) as *const u16) - }; - let used_idx = unsafe { - *(self.gpa_to_ptr(qc.used_addr + 2) as *const u16) - }; + let avail_idx = unsafe { *(self.gpa_to_ptr(qc.avail_addr + 2) as *const u16) }; + let used_idx = unsafe { *(self.gpa_to_ptr(qc.used_addr + 2) as *const u16) }; self.last_used[qi] = used_idx; if qi == TX_QUEUE as usize { // TX: start from avail_idx to skip already-posted buffers. @@ -665,14 +684,18 @@ impl VirtioDevice for VirtioVsock { } fn process_queue(&mut self, queue_index: u16) -> anyhow::Result<()> { - if !self.activated || self.guest_mem.is_null() { return Ok(()); } + if !self.activated || self.guest_mem.is_null() { + return Ok(()); + } match queue_index { TX_QUEUE => { self.process_tx(); // Also process RX to deliver any responses generated by TX. self.process_rx(); - if !self.pending_rx.is_empty() || self.last_used[TX_QUEUE as usize] != self.last_avail[TX_QUEUE as usize] { + if !self.pending_rx.is_empty() + || self.last_used[TX_QUEUE as usize] != self.last_avail[TX_QUEUE as usize] + { self.signal_guest(); } else { // Always signal after TX/RX processing so guest sees used buffers. @@ -695,7 +718,9 @@ impl VirtioDevice for VirtioVsock { false // device handles all queue processing internally } - fn as_any_mut(&mut self) -> &mut dyn std::any::Any { self } + fn as_any_mut(&mut self) -> &mut dyn std::any::Any { + self + } fn reset(&mut self) { self.acked_features_low = 0; self.acked_features_high = 0; @@ -709,11 +734,14 @@ impl VirtioDevice for VirtioVsock { "guest_cid": self.guest_cid, "acked_features_low": self.acked_features_low, "acked_features_high": self.acked_features_high, - })).unwrap_or_default() + })) + .unwrap_or_default() } fn restore_state(&mut self, data: &[u8]) -> anyhow::Result<()> { - if data.is_empty() { return Ok(()); } + if data.is_empty() { + return Ok(()); + } let state: serde_json::Value = serde_json::from_slice(data)?; if let Some(v) = state.get("acked_features_low").and_then(|v| v.as_u64()) { self.acked_features_low = v as u32; @@ -728,7 +756,11 @@ impl VirtioDevice for VirtioVsock { impl Drop for VirtioVsock { fn drop(&mut self) { for fd in [self.device_fd, self.host_fd, self.rx_eventfd] { - if fd >= 0 { unsafe { libc::close(fd); } } + if fd >= 0 { + unsafe { + libc::close(fd); + } + } } } } @@ -776,6 +808,8 @@ mod tests { let fd = dev.take_host_fd(); assert!(fd >= 0); assert_eq!(dev.take_host_fd(), -1); // second call returns -1 - unsafe { libc::close(fd); } + unsafe { + libc::close(fd); + } } } diff --git a/src/vmm/agent_listener.rs b/src/vmm/agent_listener.rs index 1256779..7392bff 100644 --- a/src/vmm/agent_listener.rs +++ b/src/vmm/agent_listener.rs @@ -9,7 +9,7 @@ //! channel receiver, eliminating the 2.5s sleep + drain race. use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, Mutex, mpsc}; +use std::sync::{mpsc, Arc, Mutex}; use serde::{Deserialize, Serialize}; @@ -19,7 +19,9 @@ use serde::{Deserialize, Serialize}; /// The guest agent connects to the same port via kernel cmdline param. pub const AGENT_VSOCK_PORT_BASE: u32 = 9999; -fn default_mem_available() -> f64 { 100.0 } +fn default_mem_available() -> f64 { + 100.0 +} /// AF_VSOCK address family. const AF_VSOCK: libc::c_int = 40; @@ -57,10 +59,7 @@ pub enum VmmMessage { Poll, Pong { ping_id: u64 }, Shutdown, - Exec { - command: String, - args: Vec, - }, + Exec { command: String, args: Vec }, } /// Shared state between the listener thread and the balloon tick thread. @@ -106,10 +105,12 @@ impl AgentState { /// Returns (exit_code, stdout, stderr) on success. pub fn send_exec(&self, command: &str, args: &[String]) -> Result<(i32, String, String), String> { // Serialize concurrent exec calls — only one can use the vsock channel at a time. - let _exec_guard = self.exec_lock.lock() - .map_err(|_| "Exec lock poisoned".to_string())?; + let _exec_guard = self.exec_lock.lock().map_err(|_| "Exec lock poisoned".to_string())?; - let fd = self.client_fd.lock().unwrap() + let fd = self + .client_fd + .lock() + .unwrap() .ok_or_else(|| "Guest agent not connected".to_string())?; // Signal the listener to stop processing heartbeats @@ -126,19 +127,21 @@ impl AgentState { } // Send exec command - send_vmm_message(fd, &VmmMessage::Exec { - command: command.to_string(), - args: args.to_vec(), - }).map_err(|_| "Failed to send exec command to agent".to_string())?; + send_vmm_message( + fd, + &VmmMessage::Exec { + command: command.to_string(), + args: args.to_vec(), + }, + ) + .map_err(|_| "Failed to send exec command to agent".to_string())?; // Take the receiver out of the mutex so we don't hold the lock during exec. // This allows the agent listener to replace the channel on disconnect/reconnect. - let rx = self.msg_rx.lock().unwrap() - .take() - .ok_or_else(|| { - self.exec_in_progress.store(false, Ordering::Release); - "Channel not available".to_string() - })?; + let rx = self.msg_rx.lock().unwrap().take().ok_or_else(|| { + self.exec_in_progress.store(false, Ordering::Release); + "Channel not available".to_string() + })?; let deadline = std::time::Instant::now() + std::time::Duration::from_secs(120); loop { @@ -149,7 +152,11 @@ impl AgentState { } match rx.recv_timeout(timeout) { - Ok(AgentMessage::ExecResult { exit_code, stdout, stderr }) => { + Ok(AgentMessage::ExecResult { + exit_code, + stdout, + stderr, + }) => { self.exec_in_progress.store(false, Ordering::Release); *self.msg_rx.lock().unwrap() = Some(rx); return Ok((exit_code, stdout, stderr)); @@ -251,11 +258,8 @@ fn listener_thread_fd(state: Arc, shutdown: Arc, fd: i32 let mut msg_buf = vec![0u8; len]; if read_exact(fd, &mut msg_buf) { if let Ok(msg) = serde_json::from_slice::(&msg_buf) { - match &msg { - AgentMessage::Ready => { - tracing::info!("agent-listener: guest agent ready"); - } - _ => {} + if let AgentMessage::Ready = &msg { + tracing::info!("agent-listener: guest agent ready"); } let _ = tx.try_send(msg); } @@ -339,9 +343,7 @@ fn listener_thread(state: Arc, shutdown: Arc, port: u32) // Accept loop — handle one connection at a time (one agent per VM) while !shutdown.load(Ordering::Relaxed) { - let client_fd = unsafe { - libc::accept(listen_fd, std::ptr::null_mut(), std::ptr::null_mut()) - }; + let client_fd = unsafe { libc::accept(listen_fd, std::ptr::null_mut(), std::ptr::null_mut()) }; if client_fd < 0 { continue; // timeout or error, retry } @@ -415,7 +417,16 @@ fn handle_client(state: &AgentState, fd: i32, shutdown: &AtomicBool, tx: &mpsc:: match serde_json::from_slice::(&body) { Ok(msg) => { // Update activity state for heartbeats (always, even during exec) - if let AgentMessage::Heartbeat { active, load_avg_1m, mem_pressure_pct, mem_available_pct, process_count, uptime_secs, ping_id } = &msg { + if let AgentMessage::Heartbeat { + active, + load_avg_1m, + mem_pressure_pct, + mem_available_pct, + process_count, + uptime_secs, + ping_id, + } = &msg + { state.active.store(*active, Ordering::Release); *state.mem_available_pct.lock().unwrap() = *mem_available_pct; // Send pong back so agent knows we're alive @@ -452,24 +463,25 @@ fn handle_client(state: &AgentState, fd: i32, shutdown: &AtomicBool, tx: &mpsc:: } } -enum ReadResult { Ok, Timeout, Disconnected } +enum ReadResult { + Ok, + Timeout, + Disconnected, +} fn read_exact_result(fd: i32, buf: &mut [u8]) -> ReadResult { let mut read = 0; while read < buf.len() { - let n = unsafe { - libc::recv( - fd, - buf[read..].as_mut_ptr() as *mut libc::c_void, - buf.len() - read, - 0, - ) - }; + let n = unsafe { libc::recv(fd, buf[read..].as_mut_ptr() as *mut libc::c_void, buf.len() - read, 0) }; if n <= 0 { - if n == 0 { return ReadResult::Disconnected; } + if n == 0 { + return ReadResult::Disconnected; + } let errno = unsafe { *libc::__errno_location() }; if errno == libc::EAGAIN || errno == libc::EWOULDBLOCK { - if read == 0 { return ReadResult::Timeout; } + if read == 0 { + return ReadResult::Timeout; + } continue; // partial read, keep trying } return ReadResult::Disconnected; @@ -491,9 +503,7 @@ fn send_vmm_message(fd: i32, msg: &VmmMessage) -> Result<(), ()> { buf.extend_from_slice(&len); buf.extend_from_slice(&json); - let written = unsafe { - libc::write(fd, buf.as_ptr() as *const libc::c_void, buf.len()) - }; + let written = unsafe { libc::write(fd, buf.as_ptr() as *const libc::c_void, buf.len()) }; if written as usize == buf.len() { Ok(()) diff --git a/src/vmm/mod.rs b/src/vmm/mod.rs index b90171e..663b107 100644 --- a/src/vmm/mod.rs +++ b/src/vmm/mod.rs @@ -6,11 +6,9 @@ use std::sync::{Arc, Mutex}; use anyhow::{Context, Result}; use kvm_bindings::{ - kvm_pit_config, kvm_userspace_memory_region, - kvm_irq_routing, kvm_irq_routing_entry, - KVM_IRQ_ROUTING_IRQCHIP, KVM_IRQCHIP_IOAPIC, - KVM_IRQCHIP_PIC_MASTER, KVM_IRQCHIP_PIC_SLAVE, - KVM_PIT_SPEAKER_DUMMY, KVM_MEM_LOG_DIRTY_PAGES, + kvm_irq_routing, kvm_irq_routing_entry, kvm_pit_config, kvm_userspace_memory_region, KVM_IRQCHIP_IOAPIC, + KVM_IRQCHIP_PIC_MASTER, KVM_IRQCHIP_PIC_SLAVE, KVM_IRQ_ROUTING_IRQCHIP, KVM_MEM_LOG_DIRTY_PAGES, + KVM_PIT_SPEAKER_DUMMY, }; use kvm_ioctls::{Kvm, VmFd}; @@ -135,8 +133,7 @@ impl Vm { }; let guest_memory = if mem_size <= mmio_hole_start { - memory::create_guest_memory(alloc_size) - .context("Failed to create guest memory")? + memory::create_guest_memory(alloc_size).context("Failed to create guest memory")? } else { memory::create_guest_memory_with_hole(alloc_size, mmio_hole_start, mmio_hole_end) .context("Failed to create guest memory")? @@ -191,8 +188,10 @@ impl Vm { tracing::info!( "Split memory: slot0=0..{:#x} ({}MB), slot1={:#x}..+{}MB, total={}MB", - mmio_hole_start, mmio_hole_start >> 20, - mmio_hole_end, above_hole >> 20, + mmio_hole_start, + mmio_hole_start >> 20, + mmio_hole_end, + above_hole >> 20, mem_size >> 20, ); } @@ -235,10 +234,7 @@ impl Vm { self.kvm_slot_size = alloc_size; let (base, irq) = mmio_bus.register(Box::new(balloon)); self.balloon_irq = Some(irq); - virtio_cmdline_params.push(format!( - "virtio_mmio.device=0x{:x}@0x{:x}:{}", - MMIO_STRIDE, base, irq - )); + virtio_cmdline_params.push(format!("virtio_mmio.device=0x{:x}@0x{:x}:{}", MMIO_STRIDE, base, irq)); // Register virtio-vsock (userspace backend) match VirtioVsock::new(self.config.cid.unwrap_or(3)) { @@ -250,10 +246,7 @@ impl Vm { self.vsock_irq = Some(predicted_irq); let (base, irq) = mmio_bus.register(Box::new(vsock)); debug_assert_eq!(irq, predicted_irq); - virtio_cmdline_params.push(format!( - "virtio_mmio.device=0x{:x}@0x{:x}:{}", - MMIO_STRIDE, base, irq - )); + virtio_cmdline_params.push(format!("virtio_mmio.device=0x{:x}@0x{:x}:{}", MMIO_STRIDE, base, irq)); } Err(e) => { tracing::warn!("Failed to create virtio-vsock: {e}"); @@ -265,10 +258,8 @@ impl Vm { match crate::virtio::block::VirtioBlock::open(block_path, false) { Ok(block) => { let (base, irq) = mmio_bus.register(Box::new(block)); - virtio_cmdline_params.push(format!( - "virtio_mmio.device=0x{:x}@0x{:x}:{}", - MMIO_STRIDE, base, irq - )); + virtio_cmdline_params + .push(format!("virtio_mmio.device=0x{:x}@0x{:x}:{}", MMIO_STRIDE, base, irq)); } Err(e) => { tracing::warn!("Failed to open block device {block_path}: {e}"); @@ -281,10 +272,8 @@ impl Vm { match crate::virtio::block::VirtioBlock::open(overlay_path, false) { Ok(block) => { let (base, irq) = mmio_bus.register(Box::new(block)); - virtio_cmdline_params.push(format!( - "virtio_mmio.device=0x{:x}@0x{:x}:{}", - MMIO_STRIDE, base, irq - )); + virtio_cmdline_params + .push(format!("virtio_mmio.device=0x{:x}@0x{:x}:{}", MMIO_STRIDE, base, irq)); tracing::info!("Overlay block device registered: {overlay_path}"); } Err(e) => { @@ -308,10 +297,7 @@ impl Vm { if root_dir.is_dir() { let fs_dev = crate::virtio::fs::VirtioFs::new(root_dir, tag.clone()); let (base, irq) = mmio_bus.register(Box::new(fs_dev)); - virtio_cmdline_params.push(format!( - "virtio_mmio.device=0x{:x}@0x{:x}:{}", - MMIO_STRIDE, base, irq - )); + virtio_cmdline_params.push(format!("virtio_mmio.device=0x{:x}@0x{:x}:{}", MMIO_STRIDE, base, irq)); tracing::info!("virtio-fs registered: dir={dir_path}, tag={tag}"); } else { tracing::warn!("Shared directory does not exist: {dir_path}"); @@ -355,7 +341,12 @@ impl Vm { // Set guest memory on the MMIO bus for virtqueue descriptor chain processing { let mut mmio_bus = self.mmio_bus.lock().unwrap(); - mmio_bus.set_guest_memory_with_hole(guest_memory.as_ptr(), guest_memory.size(), guest_memory.hole_start(), guest_memory.hole_end()); + mmio_bus.set_guest_memory_with_hole( + guest_memory.as_ptr(), + guest_memory.size(), + guest_memory.hole_start(), + guest_memory.hole_end(), + ); } // Build the final kernel command line with virtio_mmio.device parameters @@ -393,7 +384,7 @@ impl Vm { let mut pci_bus = crate::pci::PciBus::new(); for bdf_str in &self.config.passthrough_devices { match crate::pci::vfio::VfioDevice::open(bdf_str) { - Ok(mut vfio_dev) => { + Ok(vfio_dev) => { // Map guest memory for DMA if let Err(e) = vfio_dev.map_dma(guest_memory.as_ptr(), alloc_size) { tracing::error!("Failed to map DMA for {bdf_str}: {e}"); @@ -470,15 +461,21 @@ impl Vm { /// Instead of loading a kernel, this maps the template's memory file /// with MAP_PRIVATE (CoW) and restores vCPU register state. This is the /// primary mechanism for <20ms cold starts. - pub fn fork_boot(&mut self, template_dir: &str, skip_verify: bool, mem_limit_mb: Option, vcpu_limit: Option, overlay_size: Option<&str>) -> Result<()> { - use crate::boot::template::{TemplateSnapshot, fork_from_template}; + pub fn fork_boot( + &mut self, + template_dir: &str, + skip_verify: bool, + mem_limit_mb: Option, + vcpu_limit: Option, + _overlay_size: Option<&str>, + ) -> Result<()> { use crate::boot::identity; + use crate::boot::template::{fork_from_template, TemplateSnapshot}; let template = TemplateSnapshot::load(template_dir, !skip_verify)?; // 1. Fork guest memory from template (CoW mmap) - let guest_memory = fork_from_template(&template) - .context("Failed to fork memory from template")?; + let guest_memory = fork_from_template(&template).context("Failed to fork memory from template")?; let mem_size = template.memory_size; @@ -486,7 +483,7 @@ impl Vm { // KVM_MEM_LOG_DIRTY_PAGES enables dirty page tracking for incremental snapshots. let mmio_hole_start: u64 = 0xC000_0000; let mmio_hole_end: u64 = 0x1_0000_0000; - let guard_size: u64 = 128 << 20; + let _guard_size: u64 = 128 << 20; if mem_size <= mmio_hole_start { // Small VM: single KVM slot @@ -537,8 +534,10 @@ impl Vm { tracing::info!( "Split memory: slot0=0..{:#x} ({}MB), slot1={:#x}..+{}MB, total={}MB", - mmio_hole_start, mmio_hole_start >> 20, - mmio_hole_end, above_hole >> 20, + mmio_hole_start, + mmio_hole_start >> 20, + mmio_hole_end, + above_hole >> 20, mem_size >> 20, ); self.kvm_slot_size = mmio_hole_start; // for dirty log @@ -567,8 +566,7 @@ impl Vm { let pit_state = unsafe { std::ptr::read(template.device_states.pit.as_ptr() as *const kvm_bindings::kvm_pit_state2) }; - self.vm_fd.set_pit2(&pit_state) - .context("Failed to restore PIT state")?; + self.vm_fd.set_pit2(&pit_state).context("Failed to restore PIT state")?; tracing::info!("Restored PIT state from template"); } } @@ -586,10 +584,10 @@ impl Vm { let expected_size = std::mem::size_of::(); for (i, name) in ["PIC_MASTER", "PIC_SLAVE", "IOAPIC"].iter().enumerate() { if template.device_states.irqchip[i].len() == expected_size { - let chip = unsafe { - std::ptr::read(template.device_states.irqchip[i].as_ptr() as *const kvm_irqchip) - }; - self.vm_fd.set_irqchip(&chip) + let chip = + unsafe { std::ptr::read(template.device_states.irqchip[i].as_ptr() as *const kvm_irqchip) }; + self.vm_fd + .set_irqchip(&chip) .context(format!("Failed to restore {name}"))?; } } @@ -631,13 +629,18 @@ impl Vm { self.vsock_irq = Some(predicted_irq); mmio_bus.register(Box::new(vsock)); } - Err(e) => { tracing::warn!("Failed to create virtio-vsock: {e}"); } + Err(e) => { + tracing::warn!("Failed to create virtio-vsock: {e}"); + } } // Register virtio-block: use CLI --block, or fall back to template metadata. // The block device MUST be registered if the template had one, otherwise // device indices are misaligned and transport state restore breaks. - let block_path = self.config.block_device.clone() + let block_path = self + .config + .block_device + .clone() .or_else(|| template.block_device.clone()); if let Some(ref block_path) = block_path { match crate::virtio::block::VirtioBlock::open(block_path, false) { @@ -692,16 +695,26 @@ impl Vm { let kick_evt = unsafe { libc::eventfd(0, libc::EFD_CLOEXEC | libc::EFD_NONBLOCK) }; if kick_evt >= 0 { let mut ioeventfd = kvm_bindings::kvm_ioeventfd { - datamatch: 1, len: 4, addr: notify_addr, fd: kick_evt, - flags: 1, ..Default::default() + datamatch: 1, + len: 4, + addr: notify_addr, + fd: kick_evt, + flags: 1, + ..Default::default() }; let ret = unsafe { - libc::ioctl(self.vm_fd.as_raw_fd(), 0x4040AE79u64 as libc::c_ulong, &ioeventfd) + libc::ioctl(self.vm_fd.as_raw_fd(), crate::compat::ioctl_req(0x4040AE79), &ioeventfd) }; if ret != 0 { ioeventfd.flags = 0; ioeventfd.datamatch = 0; - unsafe { libc::ioctl(self.vm_fd.as_raw_fd(), 0x4040AE79u64 as libc::c_ulong, &ioeventfd); } + unsafe { + libc::ioctl( + self.vm_fd.as_raw_fd(), + crate::compat::ioctl_req(0x4040AE79), + &ioeventfd, + ); + } } } self.net_tap_info = Some((tap_fd, dev_index, actual_irq, kick_evt)); @@ -715,7 +728,12 @@ impl Vm { // (virtio-fs registered above, before net) - mmio_bus.set_guest_memory_with_hole(guest_memory.as_ptr(), guest_memory.size(), guest_memory.hole_start(), guest_memory.hole_end()); + mmio_bus.set_guest_memory_with_hole( + guest_memory.as_ptr(), + guest_memory.size(), + guest_memory.hole_start(), + guest_memory.hole_end(), + ); // Restore transport state from the template snapshot so devices are // in the same state the guest kernel expects (queue addresses, activated). @@ -729,9 +747,7 @@ impl Vm { // Send transport reset to close stale connections from the snapshot. if let Some(dev_idx) = self.vsock_dev_index { if let Some(transport) = mmio_bus.transport_mut(dev_idx) { - if let Some(vsock) = transport.device_mut().as_any_mut() - .downcast_mut::() - { + if let Some(vsock) = transport.device_mut().as_any_mut().downcast_mut::() { vsock.send_transport_reset(); } } @@ -751,13 +767,11 @@ impl Vm { if limit_mb < template_mb { let reclaim_mb = template_mb - limit_mb; let reclaim_pages = reclaim_mb * 256; // 1 MB = 256 x 4KB pages - // Use update_target() via the MMIO bus so config_interrupt_pending - // is set and the guest driver gets notified. + // Use update_target() via the MMIO bus so config_interrupt_pending + // is set and the guest driver gets notified. let mut bus = self.mmio_bus.lock().unwrap(); if let Some(transport) = bus.transport_mut(0) { - if let Some(balloon) = transport.device_mut().as_any_mut() - .downcast_mut::() - { + if let Some(balloon) = transport.device_mut().as_any_mut().downcast_mut::() { balloon.update_target(reclaim_pages); tracing::info!( template_mb, @@ -801,18 +815,16 @@ impl Vm { if let (Some(dev_index), Some(irq)) = (self.vsock_dev_index, self.vsock_irq) { let bus = self.mmio_bus.lock().unwrap(); if let Some(transport) = bus.transport(dev_index) { - transport.vhost_interrupt().fetch_or(1, std::sync::atomic::Ordering::Release); + transport + .vhost_interrupt() + .fetch_or(1, std::sync::atomic::Ordering::Release); } drop(bus); let _ = self.vm_fd.set_irq_line(irq, true); let _ = self.vm_fd.set_irq_line(irq, false); } - tracing::info!( - "Forked VM from template: {}MB, {} vCPUs", - mem_size >> 20, - num_vcpus, - ); + tracing::info!("Forked VM from template: {}MB, {} vCPUs", mem_size >> 20, num_vcpus,); Ok(()) } @@ -886,11 +898,7 @@ impl Vm { // Read to clear the eventfd let mut val: u64 = 0; unsafe { - libc::read( - pfd.fd, - &mut val as *mut u64 as *mut libc::c_void, - 8, - ); + libc::read(pfd.fd, &mut val as *mut u64 as *mut libc::c_void, 8); } need_irq = true; pfd.revents = 0; @@ -945,10 +953,16 @@ impl Vm { std::thread::Builder::new() .name("vsock-rx-poll".into()) .spawn(move || { - let mut pfd = libc::pollfd { fd: device_fd, events: libc::POLLIN, revents: 0 }; + let mut pfd = libc::pollfd { + fd: device_fd, + events: libc::POLLIN, + revents: 0, + }; loop { let ret = unsafe { libc::poll(&mut pfd, 1, 500) }; - if ret <= 0 { continue; } + if ret <= 0 { + continue; + } if pfd.revents & libc::POLLIN != 0 { pfd.revents = 0; // Trigger RX processing on the vsock device @@ -956,7 +970,7 @@ impl Vm { if let Some(transport) = bus.transport_mut(dev_index) { let vhost_int = transport.vhost_interrupt(); let _ = transport.device_mut().process_queue(0); // RX_QUEUE - // Signal guest interrupt + // Signal guest interrupt vhost_int.fetch_or(1, std::sync::atomic::Ordering::Release); drop(bus); let _ = vm_fd_clone.set_irq_line(irq, true); @@ -979,23 +993,37 @@ impl Vm { .name("net-io-poll".into()) .spawn(move || { let mut pollfds = [ - libc::pollfd { fd: tap_fd, events: libc::POLLIN, revents: 0 }, - libc::pollfd { fd: kick_evt, events: libc::POLLIN, revents: 0 }, + libc::pollfd { + fd: tap_fd, + events: libc::POLLIN, + revents: 0, + }, + libc::pollfd { + fd: kick_evt, + events: libc::POLLIN, + revents: 0, + }, ]; let nfds = if kick_evt >= 0 { 2 } else { 1 }; loop { let ret = unsafe { libc::poll(pollfds.as_mut_ptr(), nfds, 500) }; - if ret <= 0 { continue; } + if ret <= 0 { + continue; + } let tap_readable = pollfds[0].revents & libc::POLLIN != 0; let tx_kicked = nfds > 1 && pollfds[1].revents & libc::POLLIN != 0; pollfds[0].revents = 0; - if nfds > 1 { pollfds[1].revents = 0; } + if nfds > 1 { + pollfds[1].revents = 0; + } // Drain kick eventfd if tx_kicked { let mut val: u64 = 0; - unsafe { libc::read(kick_evt, &mut val as *mut u64 as *mut libc::c_void, 8); } + unsafe { + libc::read(kick_evt, &mut val as *mut u64 as *mut libc::c_void, 8); + } } let mut need_irq = false; @@ -1005,13 +1033,15 @@ impl Vm { // Process TX if guest kicked if tx_kicked { let _ = transport.device_mut().process_queue(1); // TX_QUEUE - // Also process descriptor chains for TX + // Also process descriptor chains for TX transport.process_queue_descriptors_pub(1); } // Process RX from TAP if tap_readable { - if let Some(net) = transport.device_mut().as_any_mut() + if let Some(net) = transport + .device_mut() + .as_any_mut() .downcast_mut::() { while net.process_rx_from_tap() { @@ -1021,7 +1051,8 @@ impl Vm { } if need_irq || tx_kicked { - transport.vhost_interrupt() + transport + .vhost_interrupt() .fetch_or(1, std::sync::atomic::Ordering::Release); need_irq = true; } @@ -1052,9 +1083,7 @@ impl Vm { let shutdown_clone = Arc::clone(&shutdown_flag); let total_pages = self.mem_size / 4096; let mem_size = self.mem_size; - let guest_mem_ptr = self.guest_memory.as_ref() - .map(|m| m.as_ptr() as usize) - .unwrap_or(0); + let guest_mem_ptr = self.guest_memory.as_ref().map(|m| m.as_ptr() as usize).unwrap_or(0); // Floor: retain at least 50% of guest RAM or 512MB, whichever is larger. // Prevents balloon from starving small VMs (e.g. 2GB with Claude Code). let mem_mb = (self.mem_size / (1024 * 1024)) as u32; @@ -1114,7 +1143,7 @@ impl Vm { // Periodically refresh overcommit tracking (every 10 ticks = 10s) static TICK_COUNT: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0); let tick = TICK_COUNT.fetch_add(1, std::sync::atomic::Ordering::Relaxed); - if tick % 10 == 0 && guest_mem_ptr != 0 { + if tick.is_multiple_of(10) && guest_mem_ptr != 0 { overcommit.refresh(guest_mem_ptr as *const u8, mem_size); tracing::debug!( private_pages = overcommit.private_pages(), @@ -1139,12 +1168,12 @@ impl Vm { // Spawn AP threads (vCPU 1, 2, ...) and capture their pthread_t handles for vcpu in all_vcpus.drain(1..) { // Use a channel to get the pthread_t from inside the spawned thread - let (tx, rx) = std::sync::mpsc::channel::(); + let (tx, rx) = std::sync::mpsc::channel::(); let handle = std::thread::Builder::new() .name(format!("vcpu-{}", vcpu.id())) .spawn(move || { // Send our pthread_t to the main thread - let _ = tx.send(unsafe { libc::pthread_self() }); + let _ = tx.send(crate::compat::SendPthreadT(unsafe { libc::pthread_self() })); let mut vcpu = vcpu; if let Err(e) = vcpu.run_loop() { tracing::error!("vCPU {} exited with error: {e}", vcpu.id()); @@ -1153,7 +1182,7 @@ impl Vm { .context("Failed to spawn AP vCPU thread")?; // Receive the pthread_t handle if let Ok(tid) = rx.recv() { - vcpu_threads.push(tid); + vcpu_threads.push(tid.0); } ap_handles.push(handle); } @@ -1164,7 +1193,9 @@ impl Vm { vcpu_threads.insert(0, bsp_tid); // Start the per-VM control socket now that we have all pthread_t handles - let guest_mem_ptr = self.guest_memory.as_ref() + let guest_mem_ptr = self + .guest_memory + .as_ref() .map(|m| m.as_ptr()) .unwrap_or(std::ptr::null_mut()); let vm_handle = Arc::new(crate::control::sync_server::VmHandle { @@ -1240,18 +1271,14 @@ impl Vm { break; } let n = unsafe { - libc::read( - client_fd, - buf.as_mut_ptr() as *mut libc::c_void, - buf.len(), - ) + libc::read(client_fd, buf.as_mut_ptr() as *mut libc::c_void, buf.len()) }; if n <= 0 { break; // client disconnected } - for i in 0..n as usize { + for &byte in &buf[..n as usize] { let mut serial = serial_for_console.lock().unwrap(); - serial.enqueue_input(buf[i]); + serial.enqueue_input(byte); if serial.interrupt_enabled() { drop(serial); let _ = vm_fd_for_console.set_irq_line(4, true); @@ -1316,38 +1343,38 @@ pub fn setup_gsi_routing(vm_fd: &VmFd) -> Result<()> { // IOAPIC entries for all 24 pins for i in 0u32..24 { - let mut entry = kvm_irq_routing_entry::default(); - entry.gsi = i; - entry.type_ = KVM_IRQ_ROUTING_IRQCHIP; - // SAFETY: union access — we know the type_ is IRQCHIP - unsafe { - entry.u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC; - entry.u.irqchip.pin = i; - } + let mut entry = kvm_irq_routing_entry { + gsi: i, + type_: KVM_IRQ_ROUTING_IRQCHIP, + ..Default::default() + }; + + entry.u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC; + entry.u.irqchip.pin = i; entries.push(entry); } // PIC master entries for IRQ 0-7 for i in 0u32..8 { - let mut entry = kvm_irq_routing_entry::default(); - entry.gsi = i; - entry.type_ = KVM_IRQ_ROUTING_IRQCHIP; - unsafe { - entry.u.irqchip.irqchip = KVM_IRQCHIP_PIC_MASTER; - entry.u.irqchip.pin = i; - } + let mut entry = kvm_irq_routing_entry { + gsi: i, + type_: KVM_IRQ_ROUTING_IRQCHIP, + ..Default::default() + }; + entry.u.irqchip.irqchip = KVM_IRQCHIP_PIC_MASTER; + entry.u.irqchip.pin = i; entries.push(entry); } // PIC slave entries for IRQ 8-15 for i in 0u32..8 { - let mut entry = kvm_irq_routing_entry::default(); - entry.gsi = i + 8; - entry.type_ = KVM_IRQ_ROUTING_IRQCHIP; - unsafe { - entry.u.irqchip.irqchip = KVM_IRQCHIP_PIC_SLAVE; - entry.u.irqchip.pin = i; - } + let mut entry = kvm_irq_routing_entry { + gsi: i + 8, + type_: KVM_IRQ_ROUTING_IRQCHIP, + ..Default::default() + }; + entry.u.irqchip.irqchip = KVM_IRQCHIP_PIC_SLAVE; + entry.u.irqchip.pin = i; entries.push(entry); } @@ -1356,8 +1383,7 @@ pub fn setup_gsi_routing(vm_fd: &VmFd) -> Result<()> { let header_size = std::mem::size_of::(); let total_size = header_size + entries.len() * entry_size; - let layout = std::alloc::Layout::from_size_align(total_size, 8) - .context("Invalid layout for kvm_irq_routing")?; + let layout = std::alloc::Layout::from_size_align(total_size, 8).context("Invalid layout for kvm_irq_routing")?; // SAFETY: We allocate, zero, fill, pass to ioctl, then dealloc. unsafe { @@ -1376,9 +1402,7 @@ pub fn setup_gsi_routing(vm_fd: &VmFd) -> Result<()> { std::ptr::write(entries_ptr.add(i), *entry); } - let result = vm_fd - .set_gsi_routing(routing) - .context("Failed to set GSI routing"); + let result = vm_fd.set_gsi_routing(routing).context("Failed to set GSI routing"); std::alloc::dealloc(ptr, layout); diff --git a/src/vmm/serial.rs b/src/vmm/serial.rs index 264d184..2cc162a 100644 --- a/src/vmm/serial.rs +++ b/src/vmm/serial.rs @@ -331,11 +331,7 @@ impl Drop for RawModeGuard { #[cfg(unix)] { unsafe { - libc::tcsetattr( - libc::STDIN_FILENO, - libc::TCSANOW, - &self.original_termios, - ); + libc::tcsetattr(libc::STDIN_FILENO, libc::TCSANOW, &self.original_termios); } tracing::debug!("Terminal restored from raw mode"); } diff --git a/src/vmm/vcpu.rs b/src/vmm/vcpu.rs index d1de028..585b46f 100644 --- a/src/vmm/vcpu.rs +++ b/src/vmm/vcpu.rs @@ -3,13 +3,13 @@ use std::sync::{Arc, Condvar, Mutex}; use std::time::Duration; use anyhow::{Context, Result}; -use kvm_bindings::{KVM_MAX_CPUID_ENTRIES, Msrs}; +use kvm_bindings::{Msrs, KVM_MAX_CPUID_ENTRIES}; // AsRawFd removed — no longer needed use kvm_ioctls::{Kvm, VcpuExit, VcpuFd, VmFd}; use vm_memory::GuestAddress; -use crate::virtio::mmio::MmioBus; use super::serial::{Serial, COM1_PORT_BASE, COM1_PORT_COUNT}; +use crate::virtio::mmio::MmioBus; /// Global shutdown flag — set by SIGTERM/SIGINT handler. /// The vCPU run loop checks this and exits cleanly so Drop runs. @@ -140,9 +140,7 @@ impl Vcpu { mmio_bus: Arc>, serial: Arc>, ) -> Result { - let fd = vm_fd - .create_vcpu(id as u64) - .context("Failed to create vCPU")?; + let fd = vm_fd.create_vcpu(id as u64).context("Failed to create vCPU")?; // Set up CPUID -- pass through host CPUID with KVM filtering let mut cpuid = kvm @@ -172,8 +170,7 @@ impl Vcpu { } } - fd.set_cpuid2(&cpuid) - .context("Failed to set CPUID")?; + fd.set_cpuid2(&cpuid).context("Failed to set CPUID")?; // Set up MSRs — enable MTRRs so the kernel initializes PAT correctly. // Without this, the kernel sees "MTRRs disabled" and skips PAT init, @@ -184,9 +181,9 @@ impl Vcpu { // MTRRdefType (MSR 0x2FF): E=1 (bit 11), FE=1 (bit 10), default type=WB (6) entries[0].index = 0x2FF; entries[0].data = (1 << 11) | (1 << 10) | 6; // 0xC06 - // PAT MSR (0x277): Intel recommended defaults matching QEMU/SeaBIOS - // PA0=WB(06) PA1=WC(01) PA2=UC-(07) PA3=UC(00) - // PA4=WB(06) PA5=WP(05) PA6=UC-(07) PA7=WT(04) + // PAT MSR (0x277): Intel recommended defaults matching QEMU/SeaBIOS + // PA0=WB(06) PA1=WC(01) PA2=UC-(07) PA3=UC(00) + // PA4=WB(06) PA5=WP(05) PA6=UC-(07) PA7=WT(04) entries[1].index = 0x277; entries[1].data = 0x0007_0106_0007_0506_u64.swap_bytes(); // Correct byte layout: PA7..PA0 = 04 07 05 06 00 07 01 06 @@ -205,8 +202,8 @@ impl Vcpu { lapic.regs[sivr_offset + 2] as u8, lapic.regs[sivr_offset + 3] as u8, ]); - sivr |= 1 << 8; // Software enable - sivr |= 0xFF; // Spurious vector = 0xFF + sivr |= 1 << 8; // Software enable + sivr |= 0xFF; // Spurious vector = 0xFF let sivr_bytes = sivr.to_le_bytes(); lapic.regs[sivr_offset] = sivr_bytes[0] as i8; lapic.regs[sivr_offset + 1] = sivr_bytes[1] as i8; @@ -248,12 +245,23 @@ impl Vcpu { let mut regs = fd.get_regs().context("Failed to get regs")?; regs.rip = entry_addr.0; regs.rflags = 0x2; // bit 1 is always set - // Linux boot protocol: rsi = pointer to boot_params (at 0x7000 by convention) + // Linux boot protocol: rsi = pointer to boot_params (at 0x7000 by convention) regs.rsi = boot_params_addr(); fd.set_regs(®s).context("Failed to set regs")?; } - Ok(Self { id, fd, vm_fd: Arc::clone(vm_fd), mmio_bus, serial, cmos_index: 0, pm_regs: [0u8; 8], pause_state: None, pci_bus: None, deferred_tsc_deadline: None }) + Ok(Self { + id, + fd, + vm_fd: Arc::clone(vm_fd), + mmio_bus, + serial, + cmos_index: 0, + pm_regs: [0u8; 8], + pause_state: None, + pci_bus: None, + deferred_tsc_deadline: None, + }) } /// Set the shared pause state for snapshot coordination. @@ -277,21 +285,28 @@ impl Vcpu { mmio_bus: Arc>, serial: Arc>, ) -> Result { - let fd = vm_fd - .create_vcpu(id as u64) - .context("Failed to create vCPU")?; + let fd = vm_fd.create_vcpu(id as u64).context("Failed to create vCPU")?; // Use host CPUID (not snapshot) to avoid KVM PV CPUID entries // that can interfere with TSC_DEADLINE MSR writes. // The guest sees slightly different CPUID but all hardware features match. + #[allow(clippy::overly_complex_bool_expr)] if false && !vcpu_state.cpuid.is_empty() { if let Ok(entries) = serde_json::from_slice::>(&vcpu_state.cpuid) { - let cpuid_entries: Vec = entries.iter() - .map(|&(function, index, flags, eax, ebx, ecx, edx)| { - kvm_bindings::kvm_cpuid_entry2 { - function, index, flags, eax, ebx, ecx, edx, padding: [0; 3], - } - }) + let cpuid_entries: Vec = entries + .iter() + .map( + |&(function, index, flags, eax, ebx, ecx, edx)| kvm_bindings::kvm_cpuid_entry2 { + function, + index, + flags, + eax, + ebx, + ecx, + edx, + padding: [0; 3], + }, + ) .collect(); if let Ok(cpuid) = kvm_bindings::CpuId::from_entries(&cpuid_entries) { fd.set_cpuid2(&cpuid).context("Failed to restore CPUID")?; @@ -381,10 +396,13 @@ impl Vcpu { // 7. TSC pre-restore if !vcpu_state.msrs.is_empty() { if let Ok(entries) = serde_json::from_slice::>(&vcpu_state.msrs) { - let tsc_entries: Vec = entries.iter() + let tsc_entries: Vec = entries + .iter() .filter(|&&(idx, _)| idx == 0x10) .map(|&(index, data)| kvm_bindings::kvm_msr_entry { - index, data, ..Default::default() + index, + data, + ..Default::default() }) .collect(); if !tsc_entries.is_empty() { @@ -409,7 +427,8 @@ impl Vcpu { if !vcpu_state.msrs.is_empty() { if let Ok(mut entries) = serde_json::from_slice::>(&vcpu_state.msrs) { // If TSC_DEADLINE (0x6E0) is 0, use TSC value so timer fires immediately - let tsc_val = entries.iter() + let tsc_val = entries + .iter() .find(|&&(idx, _)| idx == 0x10) .map(|&(_, v)| v) .unwrap_or(0); @@ -425,12 +444,13 @@ impl Vcpu { entries.retain(|&(idx, _)| idx != 0x6E0); // Filter out zero-value KVM PV MSRs (write-only from guest) - let msr_entries: Vec = entries.iter() - .filter(|&&(idx, data)| { - !(idx >= 0x4b564d00 && idx <= 0x4b564d07 && data == 0) - }) + let msr_entries: Vec = entries + .iter() + .filter(|&&(idx, data)| !((0x4b564d00..=0x4b564d07).contains(&idx) && data == 0)) .map(|&(index, data)| kvm_bindings::kvm_msr_entry { - index, data, ..Default::default() + index, + data, + ..Default::default() }) .collect(); if let Ok(msrs) = Msrs::from_entries(&msr_entries) { @@ -456,17 +476,23 @@ impl Vcpu { tracing::info!("vCPU {} restored from template", id); - Ok(Self { id, fd, vm_fd: Arc::clone(vm_fd), mmio_bus, serial, cmos_index: 0, pm_regs: [0u8; 8], pause_state: None, pci_bus: None, deferred_tsc_deadline }) + Ok(Self { + id, + fd, + vm_fd: Arc::clone(vm_fd), + mmio_bus, + serial, + cmos_index: 0, + pm_regs: [0u8; 8], + pause_state: None, + pci_bus: None, + deferred_tsc_deadline, + }) } /// Serialize a KVM struct to bytes. fn to_bytes(val: &T) -> Vec { - unsafe { - std::slice::from_raw_parts( - val as *const T as *const u8, - std::mem::size_of::(), - ).to_vec() - } + unsafe { std::slice::from_raw_parts(val as *const T as *const u8, std::mem::size_of::()).to_vec() } } /// Capture complete vCPU state for snapshotting. @@ -486,13 +512,18 @@ impl Vcpu { let msrs_bytes = self.capture_msrs().unwrap_or_default(); // Save CPUID - let cpuid = self.fd.get_cpuid2(KVM_MAX_CPUID_ENTRIES) + let cpuid = self + .fd + .get_cpuid2(KVM_MAX_CPUID_ENTRIES) .map(|c| { let entries = c.as_slice(); serde_json::to_vec( - &entries.iter().map(|e| (e.function, e.index, e.flags, e.eax, e.ebx, e.ecx, e.edx)) - .collect::>() - ).unwrap_or_default() + &entries + .iter() + .map(|e| (e.function, e.index, e.flags, e.eax, e.ebx, e.ecx, e.edx)) + .collect::>(), + ) + .unwrap_or_default() }) .unwrap_or_default(); @@ -519,7 +550,7 @@ impl Vcpu { // Get the full list of MSRs KVM says need saving let kvm = kvm_ioctls::Kvm::new().context("open /dev/kvm for MSR list")?; let msr_list = kvm.get_msr_index_list().context("get MSR index list")?; - let mut msr_indices: Vec = msr_list.as_slice().iter().map(|&i| i).collect(); + let mut msr_indices: Vec = msr_list.as_slice().to_vec(); // Add MSRs that KVM doesn't include in msrs_to_save but that are // critical for snapshot/restore (like Firecracker's SERIALIZABLE_MSR_RANGES) @@ -532,8 +563,12 @@ impl Vcpu { // Read MSRs in chunks (KVM limits entries per call) let mut all_entries: Vec<(u32, u64)> = Vec::new(); for chunk in msr_indices.chunks(64) { - let entries: Vec = chunk.iter() - .map(|&idx| kvm_bindings::kvm_msr_entry { index: idx, ..Default::default() }) + let entries: Vec = chunk + .iter() + .map(|&idx| kvm_bindings::kvm_msr_entry { + index: idx, + ..Default::default() + }) .collect(); if let Ok(mut msrs) = Msrs::from_entries(&entries) { if let Ok(count) = self.fd.get_msrs(&mut msrs) { @@ -605,8 +640,10 @@ impl Vcpu { if let Ok(mut lapic) = self.fd.get_lapic() { // Force one-shot mode if snapshot had TSC-deadline let lvt = u32::from_le_bytes([ - lapic.regs[0x320] as u8, lapic.regs[0x321] as u8, - lapic.regs[0x322] as u8, lapic.regs[0x323] as u8, + lapic.regs[0x320] as u8, + lapic.regs[0x321] as u8, + lapic.regs[0x322] as u8, + lapic.regs[0x323] as u8, ]); let vector = lvt & 0xFF; if vector != 0 && ((lvt >> 17) & 3) == 2 { @@ -631,7 +668,12 @@ impl Vcpu { if let Ok(regs) = self.fd.get_regs() { tracing::info!( "vCPU {} shutting down: RIP={:#x} RSP={:#x} RFLAGS={:#x} exits={} serial={}", - self.id, regs.rip, regs.rsp, regs.rflags, exit_count, serial_bytes + self.id, + regs.rip, + regs.rsp, + regs.rflags, + exit_count, + serial_bytes ); } else { tracing::info!("vCPU {} shutting down (signal received)", self.id); @@ -647,13 +689,17 @@ impl Vcpu { ExitAction::IoOut { port, byte } } VcpuExit::IoIn(port, data) => { - if port >= COM1_PORT_BASE && port < COM1_PORT_BASE + COM1_PORT_COUNT { + if (COM1_PORT_BASE..COM1_PORT_BASE + COM1_PORT_COUNT).contains(&port) { let offset = port - COM1_PORT_BASE; let val = self.serial.lock().unwrap().read(offset); - if let Some(b) = data.first_mut() { *b = val; } + if let Some(b) = data.first_mut() { + *b = val; + } } else if port == CMOS_DATA_PORT { - if let Some(b) = data.first_mut() { *b = cmos_read(self.cmos_index); } - } else if port >= 0x600 && port <= 0x607 { + if let Some(b) = data.first_mut() { + *b = cmos_read(self.cmos_index); + } + } else if (0x600..=0x607).contains(&port) { let offset = (port - 0x600) as usize; if let Some(b) = data.first_mut() { *b = self.pm_regs[offset]; @@ -669,11 +715,7 @@ impl Vcpu { // Try PCI bus first (ECAM + BAR regions) if let Some(ref pci_bus) = self.pci_bus { let bus = pci_bus.lock().unwrap(); - if bus.handle_ecam_read(addr, data) { - handled = true; - } else if bus.handle_bar_read(addr, data) { - handled = true; - } + handled = bus.handle_ecam_read(addr, data) || bus.handle_bar_read(addr, data); } if !handled { let bus = self.mmio_bus.lock().unwrap(); @@ -691,7 +733,10 @@ impl Vcpu { data_bytes[..copy_len].copy_from_slice(&data[..copy_len]); ExitAction::MmioWrite { addr, data_bytes, len } } - VcpuExit::Debug(dbg_info) => ExitAction::Debug { pc: dbg_info.pc, dr6: dbg_info.dr6 }, + VcpuExit::Debug(dbg_info) => ExitAction::Debug { + pc: dbg_info.pc, + dr6: dbg_info.dr6, + }, other => ExitAction::Unknown(format!("{:?}", other)), }, Err(e) => { @@ -717,7 +762,13 @@ impl Vcpu { if let Ok(sregs) = self.fd.get_sregs() { tracing::error!( "vCPU {} FATAL: RIP={:#x} RSP={:#x} CR3={:#x} CR2={:#x} RFLAGS={:#x} exit={}", - self.id, regs.rip, regs.rsp, sregs.cr3, sregs.cr2, regs.rflags, exit_count + self.id, + regs.rip, + regs.rsp, + sregs.cr3, + sregs.cr2, + regs.rflags, + exit_count ); } } @@ -733,13 +784,12 @@ impl Vcpu { // Log first 5 exits for debugging fork issues if !first_exits_logged && exit_count <= 5 { tracing::info!("vCPU {} exit #{}: {:?}", self.id, exit_count, action); - if exit_count == 5 { first_exits_logged = true; } + if exit_count == 5 { + first_exits_logged = true; + } } - if exit_count % 100_000 == 0 { - tracing::info!( - "vCPU {} exits={}, serial_bytes={}", - self.id, exit_count, serial_bytes - ); + if exit_count.is_multiple_of(100_000) { + tracing::info!("vCPU {} exits={}, serial_bytes={}", self.id, exit_count, serial_bytes); } match action { @@ -753,7 +803,7 @@ impl Vcpu { break; } ExitAction::IoOut { port, byte } => { - if port >= COM1_PORT_BASE && port < COM1_PORT_BASE + COM1_PORT_COUNT { + if (COM1_PORT_BASE..COM1_PORT_BASE + COM1_PORT_COUNT).contains(&port) { let offset = port - COM1_PORT_BASE; if offset == 0 { serial_bytes += 1; @@ -771,7 +821,7 @@ impl Vcpu { self.cmos_index = byte & 0x7F; } else if port == CMOS_DATA_PORT { // ignore - } else if port >= 0x600 && port <= 0x607 { + } else if (0x600..=0x607).contains(&port) { let offset = (port - 0x600) as usize; self.pm_regs[offset] = byte; } else { @@ -785,11 +835,8 @@ impl Vcpu { // Try PCI bus first (ECAM + BAR regions) if let Some(ref pci_bus) = self.pci_bus { let mut bus = pci_bus.lock().unwrap(); - if bus.handle_ecam_write(addr, &data_bytes[..len]) { - pci_handled = true; - } else if bus.handle_bar_write(addr, &data_bytes[..len]) { - pci_handled = true; - } + pci_handled = bus.handle_ecam_write(addr, &data_bytes[..len]) + || bus.handle_bar_write(addr, &data_bytes[..len]); } if !pci_handled { let (handled, irq) = { @@ -819,7 +866,6 @@ impl Vcpu { } } - /// Minimal CMOS/RTC register read. /// /// The kernel reads CMOS during boot for time-of-day and hardware detection. @@ -865,27 +911,27 @@ fn setup_long_mode(sregs: &mut kvm_bindings::kvm_sregs) { sregs.idt.limit = 0; sregs.cr0 = 0x8003_0001; // PG | PE | WP | ET - sregs.cr3 = 0x9000; // PML4 base - sregs.cr4 = 0x20; // PAE - sregs.efer = 0x500; // LME | LMA (long mode enable + active) + sregs.cr3 = 0x9000; // PML4 base + sregs.cr4 = 0x20; // PAE + sregs.efer = 0x500; // LME | LMA (long mode enable + active) // Code segment -- 64-bit mode sregs.cs.base = 0; sregs.cs.limit = 0xFFFF_FFFF; sregs.cs.selector = 0x10; // GDT entry 2 - sregs.cs.type_ = 0xB; // execute/read, accessed + sregs.cs.type_ = 0xB; // execute/read, accessed sregs.cs.present = 1; sregs.cs.dpl = 0; - sregs.cs.db = 0; // must be 0 for 64-bit + sregs.cs.db = 0; // must be 0 for 64-bit sregs.cs.s = 1; - sregs.cs.l = 1; // 64-bit mode + sregs.cs.l = 1; // 64-bit mode sregs.cs.g = 1; // Data segment sregs.ds.base = 0; sregs.ds.limit = 0xFFFF_FFFF; sregs.ds.selector = 0x18; // GDT entry 3 - sregs.ds.type_ = 0x3; // read/write, accessed + sregs.ds.type_ = 0x3; // read/write, accessed sregs.ds.present = 1; sregs.ds.dpl = 0; sregs.ds.db = 1; From 72e832583327e74adae91d419150a223958dc219 Mon Sep 17 00:00:00 2001 From: Tolga Karatas Date: Tue, 5 May 2026 15:34:07 +0300 Subject: [PATCH 2/5] build: optimize release profile and project metadata - profile.release: LTO fat, codegen-units=1, panic=abort, strip=true - Cargo.toml: homepage, repository, keywords, MSRV 1.87 - Workspace members: add rust-version = "1.87" - rustfmt.toml: max_width=120 matching original codebase style - .editorconfig: consistent settings across editors - Makefile: add shift-left targets (make ci, make fix, make lint) - .gitignore: add VM artifact patterns (*.img, *.qcow2) --- .editorconfig | 34 ++++++++++++++++++++++++++++++++++ .gitignore | 6 ++++++ Cargo.toml | 10 ++++++++++ Makefile | 21 +++++++++++++++++++++ rustfmt.toml | 1 + 5 files changed, 72 insertions(+) create mode 100644 .editorconfig create mode 100644 rustfmt.toml diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..dc26ec1 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,34 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true +indent_style = space +indent_size = 4 + +[*.rs] +indent_size = 4 +max_line_length = 120 + +[*.toml] +indent_size = 2 + +[*.{yml,yaml}] +indent_size = 2 + +[*.{json,json5}] +indent_size = 2 + +[*.md] +trim_trailing_whitespace = false + +[Makefile] +indent_style = tab + +[*.sh] +indent_size = 4 + +[Dockerfile*] +indent_size = 4 diff --git a/.gitignore b/.gitignore index eb3d61d..c25f961 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,9 @@ .DS_Store /backup backup/ + +# VM artifacts +*.img +*.qcow2 +*.mem +/templates/ diff --git a/Cargo.toml b/Cargo.toml index eb4484e..2cac7ad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,9 +2,13 @@ name = "clone" version = "0.1.0" edition = "2021" +rust-version = "1.87" description = "Minimal VMM for multi-tenant dev shells and serverless" license = "MIT" authors = ["Unix Shells Limited Company"] +homepage = "https://github.com/nicholasgasior/clone" +repository = "https://github.com/nicholasgasior/clone" +keywords = ["vmm", "kvm", "virtualization", "microvm", "serverless"] [dependencies] # async runtime @@ -55,6 +59,12 @@ io_uring = ["io-uring"] [dev-dependencies] tempfile = "3" +[profile.release] +lto = "fat" +codegen-units = 1 +panic = "abort" +strip = true + [[bin]] name = "clone" path = "src/main.rs" diff --git a/Makefile b/Makefile index 91c6a54..274ad8c 100644 --- a/Makefile +++ b/Makefile @@ -21,6 +21,7 @@ # CLONE=/path/to/clone Override binary path .PHONY: all build build-debug test check fmt clippy \ + ci lint deny audit \ e2e e2e-quick e2e-boot e2e-snapshot e2e-storage e2e-security \ e2e-migration e2e-devices e2e-multivm \ initrd clean @@ -51,6 +52,26 @@ fmt: clippy: cargo clippy -- -D warnings +# ── Auto-fix ──────────────────────────────────────────────────────────── + +fix: + cargo fmt + cargo fix --allow-dirty --allow-staged + cargo clippy --fix --allow-dirty --allow-staged + cargo fmt + +# ── Shift-left targets ────────────────────────────────────────────────── + +ci: fmt clippy test + +lint: fmt clippy + +deny: + cargo deny check + +audit: + cargo audit + # ── End-to-end tests ──────────────────────────────────────────────────── # All e2e targets require: Linux, KVM (/dev/kvm), root (sudo), busybox-static diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000..7530651 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1 @@ +max_width = 120 From d0ec71349d5f9b64bfe22d40cda74fe94ecd0fbf Mon Sep 17 00:00:00 2001 From: Tolga Karatas Date: Tue, 5 May 2026 15:34:25 +0300 Subject: [PATCH 3/5] ci: add GitHub Actions CI/CD with release automation Workflows: - build.yml: fmt, clippy, musl static build+test, MSRV 1.87 check, cargo-deny, security audit (with smart change detection) - release-please.yml: conventional commits to automated release PRs - release.yml: x86_64+aarch64 musl static binaries, SHA256 checksums, cosign keyless signing, SLSA attestation, SBOM (SPDX) - security-scan.yml: weekly cargo audit, cargo deny, CodeQL Rust - dependabot.yml: weekly cargo+actions updates with semantic grouping - dependabot-auto-merge.yml: auto-squash-merge patch/minor updates Templates: - Issue templates (bug report, feature request) - Pull request template with checklist --- .github/ISSUE_TEMPLATE/bug_report.yml | 48 +++++++ .github/ISSUE_TEMPLATE/feature_request.yml | 28 ++++ .github/dependabot.yml | 30 +++++ .github/pull_request_template.md | 19 +++ .github/workflows/build.yml | 127 ++++++++++++++++++ .github/workflows/dependabot-auto-merge.yml | 37 ++++++ .github/workflows/release-please.yml | 44 +++++++ .github/workflows/release.yml | 138 ++++++++++++++++++++ .github/workflows/security-scan.yml | 42 ++++++ 9 files changed, 513 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 .github/ISSUE_TEMPLATE/feature_request.yml create mode 100644 .github/dependabot.yml create mode 100644 .github/pull_request_template.md create mode 100644 .github/workflows/build.yml create mode 100644 .github/workflows/dependabot-auto-merge.yml create mode 100644 .github/workflows/release-please.yml create mode 100644 .github/workflows/release.yml create mode 100644 .github/workflows/security-scan.yml diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000..1415bf9 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,48 @@ +name: Bug Report +description: Report a bug in Clone +labels: ["bug"] +body: + - type: textarea + id: description + attributes: + label: Description + description: A clear description of the bug. + validations: + required: true + - type: textarea + id: reproduce + attributes: + label: Steps to Reproduce + description: Steps to reproduce the behavior. + placeholder: | + 1. Run `clone run --kernel ... --rootfs ...` + 2. Inside the guest, run `...` + 3. Observe error + validations: + required: true + - type: textarea + id: expected + attributes: + label: Expected Behavior + description: What you expected to happen. + validations: + required: true + - type: textarea + id: environment + attributes: + label: Environment + description: System information. + value: | + - Clone version: + - Host OS/kernel: + - CPU: + - RAM: + - Guest distro: + validations: + required: true + - type: textarea + id: logs + attributes: + label: Logs + description: Relevant log output (run with `RUST_LOG=debug`). + render: shell diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 0000000..00d8fb8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,28 @@ +name: Feature Request +description: Suggest a new feature for Clone +labels: ["enhancement"] +body: + - type: textarea + id: problem + attributes: + label: Problem + description: What problem does this feature solve? + validations: + required: true + - type: textarea + id: solution + attributes: + label: Proposed Solution + description: How should this work? + validations: + required: true + - type: textarea + id: alternatives + attributes: + label: Alternatives Considered + description: Any alternative approaches you've considered. + - type: textarea + id: context + attributes: + label: Additional Context + description: Any other context (use case, benchmarks, links). diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..87d40b6 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,30 @@ +version: 2 +updates: + - package-ecosystem: cargo + directory: / + schedule: + interval: weekly + day: monday + groups: + rust-vmm: + patterns: + - "kvm-*" + - "vm-memory" + - "linux-loader" + - "virtio-queue" + async: + patterns: + - "tokio" + - "io-uring" + serialization: + patterns: + - "serde" + - "serde_json" + open-pull-requests-limit: 10 + + - package-ecosystem: github-actions + directory: / + schedule: + interval: weekly + day: monday + open-pull-requests-limit: 5 diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..fd7dcf3 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,19 @@ +## Summary + + + +## Changes + + + +## Test Plan + +- [ ] `cargo fmt -- --check` passes +- [ ] `cargo clippy -- -D warnings` passes +- [ ] `cargo test` passes (all unit tests) +- [ ] E2E tests pass (if applicable) +- [ ] Manual testing done (describe below) + +## Notes + + diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..013b8b4 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,127 @@ +name: CI + +on: + push: + branches: [master] + pull_request: + branches: [master] + workflow_call: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +env: + CARGO_TERM_COLOR: always + +jobs: + changes: + name: Detect changes + runs-on: ubuntu-latest + outputs: + code: ${{ steps.filter.outputs.code }} + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + code: + - 'src/**' + - 'crates/**' + - 'Cargo.toml' + - 'Cargo.lock' + - 'tests/**' + + fmt: + name: Format + runs-on: ubuntu-latest + needs: changes + if: needs.changes.outputs.code == 'true' + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt + - run: cargo fmt -- --check + + clippy: + name: Clippy + runs-on: ubuntu-latest + needs: changes + if: needs.changes.outputs.code == 'true' + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + components: clippy + - uses: Swatinem/rust-cache@v2 + - run: cargo clippy -- -D warnings + + build: + name: Build & Test + runs-on: ubuntu-latest + needs: changes + if: needs.changes.outputs.code == 'true' + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + targets: x86_64-unknown-linux-musl + - uses: Swatinem/rust-cache@v2 + + - name: Install musl tools + run: sudo apt-get update && sudo apt-get install -y musl-tools + + - name: Build (static musl) + run: cargo build --release --target x86_64-unknown-linux-musl + + - name: Unit tests + run: cargo test --target x86_64-unknown-linux-musl + + - name: Smoke test binary + run: | + ./target/x86_64-unknown-linux-musl/release/clone --help + file target/x86_64-unknown-linux-musl/release/clone + ls -lh target/x86_64-unknown-linux-musl/release/clone + + - name: Upload binary + uses: actions/upload-artifact@v4 + with: + name: clone-linux-x86_64 + path: target/x86_64-unknown-linux-musl/release/clone + retention-days: 7 + + msrv: + name: MSRV check + runs-on: ubuntu-latest + needs: changes + if: needs.changes.outputs.code == 'true' + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@1.87 + - uses: Swatinem/rust-cache@v2 + - run: cargo check + + deny: + name: Dependency check + runs-on: ubuntu-latest + needs: changes + if: needs.changes.outputs.code == 'true' + steps: + - uses: actions/checkout@v4 + - uses: EmbarkStudios/cargo-deny-action@v2 + + security: + name: Security audit + runs-on: ubuntu-latest + needs: changes + if: needs.changes.outputs.code == 'true' + steps: + - uses: actions/checkout@v4 + - uses: rustsec/audit-check@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/dependabot-auto-merge.yml b/.github/workflows/dependabot-auto-merge.yml new file mode 100644 index 0000000..0191311 --- /dev/null +++ b/.github/workflows/dependabot-auto-merge.yml @@ -0,0 +1,37 @@ +name: Dependabot Auto-Merge + +# Triggers on all pull_request events because GitHub Actions does not support +# filtering by actor in the trigger. The `if: github.actor == 'dependabot[bot]'` +# condition on the job ensures only Dependabot PRs are processed. +on: + pull_request: + +permissions: {} + +jobs: + auto-merge: + runs-on: ubuntu-latest + if: github.actor == 'dependabot[bot]' + permissions: + contents: write + pull-requests: write + steps: + - name: Fetch Dependabot metadata + id: metadata + uses: dependabot/fetch-metadata@v2 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Auto-merge patch and minor updates + if: steps.metadata.outputs.update-type == 'version-update:semver-patch' || steps.metadata.outputs.update-type == 'version-update:semver-minor' + run: gh pr merge --auto --squash "$PR_URL" + env: + PR_URL: ${{ github.event.pull_request.html_url }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Comment on major updates + if: steps.metadata.outputs.update-type == 'version-update:semver-major' + run: gh pr comment "$PR_URL" --body "Major version update detected. Manual review required." + env: + PR_URL: ${{ github.event.pull_request.html_url }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml new file mode 100644 index 0000000..a2a57d8 --- /dev/null +++ b/.github/workflows/release-please.yml @@ -0,0 +1,44 @@ +name: Release Please + +on: + push: + branches: [master] + paths-ignore: + - 'docs/**' + - 'CONTRIBUTING.md' + - 'SECURITY.md' + - 'LICENSE' + +permissions: + contents: write + pull-requests: write + +concurrency: + group: release-please + cancel-in-progress: false + +jobs: + validate: + name: Validate + uses: ./.github/workflows/build.yml + + release-please: + name: Create release PR + runs-on: ubuntu-latest + needs: validate + outputs: + release_created: ${{ steps.release.outputs.release_created }} + tag_name: ${{ steps.release.outputs.tag_name }} + steps: + - uses: googleapis/release-please-action@v4 + id: release + with: + token: ${{ secrets.GITHUB_TOKEN }} + + release: + name: Build & publish release + needs: release-please + if: needs.release-please.outputs.release_created + uses: ./.github/workflows/release.yml + with: + tag: ${{ needs.release-please.outputs.tag_name }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..5c5a758 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,138 @@ +name: Release + +on: + workflow_call: + inputs: + tag: + required: true + type: string + workflow_dispatch: + inputs: + tag: + description: 'Tag to release (e.g. v0.2.0)' + required: true + type: string + +permissions: + contents: write + id-token: write + attestations: write + +env: + CARGO_TERM_COLOR: always + +jobs: + build: + name: Build ${{ matrix.target }} + runs-on: ubuntu-latest + strategy: + matrix: + include: + - target: x86_64-unknown-linux-musl + artifact: clone-linux-x86_64 + use_cross: false + - target: aarch64-unknown-linux-musl + artifact: clone-linux-aarch64 + use_cross: true + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ inputs.tag }} + + - uses: dtolnay/rust-toolchain@stable + with: + targets: ${{ matrix.target }} + + - name: Install build tools + run: | + sudo apt-get update + sudo apt-get install -y musl-tools + if [ "${{ matrix.use_cross }}" = "true" ]; then + cargo install cross --git https://github.com/cross-rs/cross --tag v0.2.5 + fi + + - uses: Swatinem/rust-cache@v2 + with: + key: ${{ matrix.target }} + + - name: Build static binary + run: | + if [ "${{ matrix.use_cross }}" = "true" ]; then + cross build --release --target ${{ matrix.target }} + else + cargo build --release --target ${{ matrix.target }} + fi + + - name: Prepare artifact + run: | + cd target/${{ matrix.target }}/release + cp clone ${{ matrix.artifact }} + sha256sum ${{ matrix.artifact }} > ${{ matrix.artifact }}.sha256 + + - name: Smoke test + if: matrix.target == 'x86_64-unknown-linux-musl' + run: | + chmod +x target/${{ matrix.target }}/release/clone + target/${{ matrix.target }}/release/clone --help + echo "Binary smoke test passed" + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.artifact }} + path: | + target/${{ matrix.target }}/release/${{ matrix.artifact }} + target/${{ matrix.target }}/release/${{ matrix.artifact }}.sha256 + + publish: + name: Publish GitHub Release + runs-on: ubuntu-latest + needs: build + steps: + - uses: actions/checkout@v4 + + - name: Download all artifacts + uses: actions/download-artifact@v4 + with: + path: artifacts + merge-multiple: true + + - name: List artifacts + run: ls -la artifacts/ + + - name: Generate SBOM + uses: anchore/sbom-action@v0 + with: + artifact-name: clone-sbom.spdx.json + output-file: artifacts/clone-sbom.spdx.json + + - name: Sign artifacts with Cosign + uses: sigstore/cosign-installer@v3 + + - name: Sign checksums + run: | + cd artifacts + cat *.sha256 > SHA256SUMS + cosign sign-blob --yes --output-signature SHA256SUMS.sig --output-certificate SHA256SUMS.pem SHA256SUMS + + - name: Attest build provenance + uses: actions/attest-build-provenance@v2 + with: + subject-path: | + artifacts/clone-linux-x86_64 + artifacts/clone-linux-aarch64 + + - name: Create release + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ inputs.tag }} + generate_release_notes: true + files: | + artifacts/clone-linux-x86_64 + artifacts/clone-linux-x86_64.sha256 + artifacts/clone-linux-aarch64 + artifacts/clone-linux-aarch64.sha256 + artifacts/SHA256SUMS + artifacts/SHA256SUMS.sig + artifacts/SHA256SUMS.pem + artifacts/clone-sbom.spdx.json diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml new file mode 100644 index 0000000..4d27aef --- /dev/null +++ b/.github/workflows/security-scan.yml @@ -0,0 +1,42 @@ +name: Security Scan + +on: + schedule: + - cron: '0 6 * * 1' # Monday 6am UTC + workflow_dispatch: + +permissions: + contents: read + security-events: write + +env: + CARGO_TERM_COLOR: always + +jobs: + audit: + name: Cargo Audit + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: rustsec/audit-check@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + + deny: + name: Cargo Deny + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: EmbarkStudios/cargo-deny-action@v2 + + codeql: + name: CodeQL + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: github/codeql-action/init@v3 + with: + languages: rust + - uses: github/codeql-action/autobuild@v3 + - uses: github/codeql-action/analyze@v3 + From 1bc4b37c4aef04a1006e9ba5e6ce2150da03cfdf Mon Sep 17 00:00:00 2001 From: Tolga Karatas Date: Tue, 5 May 2026 15:34:41 +0300 Subject: [PATCH 4/5] docs: add project documentation and README badges - SECURITY.md: vulnerability reporting via GitHub private advisories - CONTRIBUTING.md: setup, shift-left local CI (make ci), pre-commit hooks, conventional commits, code style guide - CHANGELOG.md: initial file for release-please automation - README.md: CI status, license, and MSRV badges --- CHANGELOG.md | 5 +++ CONTRIBUTING.md | 98 +++++++++++++++++++++++++++++++++++++++++++++++++ README.md | 4 ++ SECURITY.md | 35 ++++++++++++++++++ 4 files changed, 142 insertions(+) create mode 100644 CHANGELOG.md create mode 100644 CONTRIBUTING.md create mode 100644 SECURITY.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..ea5aea9 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,5 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +This changelog is automatically generated by [release-please](https://github.com/googleapis/release-please). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..30b64a7 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,98 @@ +# Contributing to Clone + +Thank you for your interest in contributing to Clone. + +## Getting Started + +### Prerequisites + +- Linux host with KVM (`/dev/kvm`) +- Rust 1.87+ (install via [rustup](https://rustup.rs) or [mise](https://mise.jdx.dev)) +- For e2e tests: root access, kernel 6.5+, busybox-static + +### Setup + +```bash +# Install toolchain and dev tools (if using mise) +mise install +mise run setup # installs cargo-audit, cargo-deny, pre-commit hooks + +# Or manually: +cargo install cargo-audit cargo-deny +pip install pre-commit && pre-commit install && pre-commit install --hook-type pre-push +``` + +### Local CI (mandatory before every push) + +**All local checks MUST pass before pushing.** Do not rely on remote CI +to catch issues -- run them locally first. This is enforced by pre-commit +hooks if you ran `mise run setup`. + +```bash +# REQUIRED before every push: +make ci # fmt + clippy + test + deny + audit + +# Individual checks (for faster iteration during development): +make lint # fmt + clippy only (~5s) +make test # unit tests (~1s) +make deny # license + advisory + dependency checks +make audit # known vulnerability check +``` + +| Check | When | What it catches | +|-------|------|-----------------| +| `cargo fmt --check` | Every commit (pre-commit hook) | Formatting issues | +| `cargo clippy -D warnings` | Every commit (pre-commit hook) | Lint warnings, code smells | +| `cargo test` | Every push (pre-push hook) | Regressions, broken logic | +| `cargo deny check` | Every push (pre-push hook) | License violations, vulnerable deps | +| `cargo audit` | Before PR | Known security advisories | + +If a pre-commit hook fails, the commit/push is rejected. Fix the issue +and retry. Do not bypass hooks with `--no-verify`. + +## Development Workflow + +1. Fork the repository +2. Create a feature branch from `master` +3. Make your changes +4. Run `make ci` to verify all checks pass locally +5. Commit with [Conventional Commits](https://www.conventionalcommits.org/) format +6. Open a Pull Request against `master` + +### Commit Message Format + +``` +type(scope): description + +feat: new feature +fix: bug fix +perf: performance improvement +refactor: code change that neither fixes a bug nor adds a feature +docs: documentation only +test: adding or correcting tests +ci: CI/CD changes +chore: maintenance +``` + +### Code Style + +- `rustfmt.toml` enforces `max_width = 120` +- All public APIs should have doc comments +- Prefer safe Rust; `unsafe` blocks require a `// SAFETY:` comment + +## Architecture + +See [README.md](README.md) for the module layout. Key directories: + +- `src/vmm/` -- VM lifecycle, vCPU threads +- `src/virtio/` -- Virtio device implementations +- `src/boot/` -- Kernel loading, ACPI tables +- `src/memory/` -- Guest memory management +- `crates/guest-agent/` -- In-guest vsock agent +- `crates/clone-init/` -- Minimal init for generated initrd + +## Testing + +- **Unit tests**: `cargo test` (runs on any platform) +- **E2E tests**: `sudo make e2e` (requires Linux + KVM + root) +- See `tests/e2e/run_all.sh` for the full e2e suite diff --git a/README.md b/README.md index 9cc4fca..75e60f5 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,9 @@ # Clone +[![CI](https://github.com/nicholasgasior/clone/actions/workflows/build.yml/badge.svg)](https://github.com/nicholasgasior/clone/actions/workflows/build.yml) +[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) +[![MSRV: 1.87](https://img.shields.io/badge/MSRV-1.87-orange.svg)](https://blog.rust-lang.org/2025/06/26/Rust-1.87.0.html) + A lightweight Linux VMM built for multi-tenant shell hosting and high-density VM workloads. 25K lines of Rust, single binary, KVM-based. Clone boots a template VM once, then forks isolated copies via Shadow Clone page mapping. Idle VMs get reclaimed automatically. A host running 100 shells uses memory like it's running 10. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..d8ebf31 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,35 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +|---------|--------------------| +| latest | Yes | +| < latest| No | + +## Reporting a Vulnerability + +If you discover a security vulnerability in Clone, please report it responsibly. + +**Do NOT open a public GitHub issue for security vulnerabilities.** + +To report a vulnerability, use GitHub's private vulnerability reporting: +https://github.com/unixshells/clone/security/advisories/new + +Include: +- Description of the vulnerability +- Steps to reproduce +- Potential impact +- Suggested fix (if any) + +We aim to release a fix within 7 days for critical issues. + +## Scope + +Clone is a VMM that provides KVM hardware isolation. Security issues in the following areas are especially critical: + +- **VM escape** -- guest accessing host memory or resources outside its boundary +- **Privilege escalation** -- unprivileged user gaining root through Clone +- **Memory safety** -- use-after-free, buffer overflow, or other memory corruption +- **Denial of service** -- crashing the VMM or host from within a guest +- **Information disclosure** -- guest reading other guests' or host's memory From f4c9ef80a0ec1f6e8910e9cf030e3e2bccabd1d9 Mon Sep 17 00:00:00 2001 From: Tolga Karatas Date: Tue, 5 May 2026 15:34:58 +0300 Subject: [PATCH 5/5] chore: add development tooling and release configuration - mise: rust + cargo-binstall + pre-commit; setup/ci tasks - pre-commit: cargo autofix on commit, test+deny on push - deny.toml: license allowlist (MIT/Apache/BSD/ISC), advisory checks - release-please: Rust release type, version sync, changelog sections --- .mise.toml | 24 +++++++++++++++++++++ .pre-commit-config.yaml | 39 +++++++++++++++++++++++++++++++++++ .release-please-manifest.json | 3 +++ deny.toml | 25 ++++++++++++++++++++++ release-please-config.json | 25 ++++++++++++++++++++++ 5 files changed, 116 insertions(+) create mode 100644 .mise.toml create mode 100644 .pre-commit-config.yaml create mode 100644 .release-please-manifest.json create mode 100644 deny.toml create mode 100644 release-please-config.json diff --git a/.mise.toml b/.mise.toml new file mode 100644 index 0000000..0efdd5e --- /dev/null +++ b/.mise.toml @@ -0,0 +1,24 @@ +[tools] +rust = "latest" +cargo-binstall = "latest" +pre-commit = "latest" + +[tasks.setup] +description = "Install cargo tools and pre-commit hooks" +run = """ +cargo binstall -y cargo-audit cargo-deny 2>/dev/null || \ + cargo install cargo-audit cargo-deny +pre-commit install +pre-commit install --hook-type pre-push +""" + +[tasks.ci] +description = "Run full local CI (same checks as GitHub Actions)" +run = """ +cargo fmt -- --check +cargo clippy -- -D warnings +cargo test +cargo deny check +cargo audit +echo "All local CI checks passed." +""" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..0b93147 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,39 @@ +repos: + - repo: local + hooks: + - id: cargo-autofix + name: cargo autofix (fmt + fix + clippy --fix) + entry: bash -c 'mise exec -- cargo fmt && mise exec -- cargo fix --allow-dirty --allow-staged 2>/dev/null; mise exec -- cargo clippy --fix --allow-dirty --allow-staged 2>/dev/null; mise exec -- cargo fmt' + language: system + types: [rust] + pass_filenames: false + + - id: cargo-fmt-check + name: cargo fmt check + entry: mise exec -- cargo fmt -- --check + language: system + types: [rust] + pass_filenames: false + + - id: cargo-clippy + name: cargo clippy + entry: mise exec -- cargo clippy -- -D warnings + language: system + types: [rust] + pass_filenames: false + + - id: cargo-test + name: cargo test + entry: mise exec -- cargo test + language: system + types: [rust] + pass_filenames: false + stages: [pre-push] + + - id: cargo-deny + name: cargo deny + entry: mise exec -- cargo deny check + language: system + files: Cargo\.(toml|lock) + pass_filenames: false + stages: [pre-push] diff --git a/.release-please-manifest.json b/.release-please-manifest.json new file mode 100644 index 0000000..466df71 --- /dev/null +++ b/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "0.1.0" +} diff --git a/deny.toml b/deny.toml new file mode 100644 index 0000000..312dcb0 --- /dev/null +++ b/deny.toml @@ -0,0 +1,25 @@ +[advisories] +version = 2 +ignore = [] + +[licenses] +version = 2 +allow = [ + "MIT", + "Apache-2.0", + "BSD-2-Clause", + "BSD-3-Clause", + "ISC", + "Unicode-3.0", + "Unicode-DFS-2016", +] + +[bans] +multiple-versions = "warn" +wildcards = "deny" + +[sources] +unknown-registry = "deny" +unknown-git = "deny" +allow-registry = ["https://github.com/rust-lang/crates.io-index"] +allow-git = [] diff --git a/release-please-config.json b/release-please-config.json new file mode 100644 index 0000000..3a87251 --- /dev/null +++ b/release-please-config.json @@ -0,0 +1,25 @@ +{ + "$schema": "https://raw.githubusercontent.com/googleapis/release-please/v16.0.0/schemas/config.json", + "packages": { + ".": { + "release-type": "rust", + "include-component-in-tag": false, + "include-v-in-tag": true, + "bump-minor-pre-major": true, + "bump-patch-for-minor-pre-major": true, + "changelog-sections": [ + {"type": "feat", "section": "Features"}, + {"type": "fix", "section": "Bug Fixes"}, + {"type": "perf", "section": "Performance Improvements"}, + {"type": "refactor", "section": "Code Refactoring"}, + {"type": "deps", "section": "Dependencies"}, + {"type": "revert", "section": "Reverts"}, + {"type": "docs", "section": "Documentation", "hidden": true}, + {"type": "test", "section": "Tests", "hidden": true}, + {"type": "build", "section": "Build System", "hidden": true}, + {"type": "ci", "section": "CI/CD", "hidden": true}, + {"type": "chore", "section": "Miscellaneous", "hidden": true} + ] + } + } +}