diff --git a/Justfile b/Justfile index c17563f00..b3dda3629 100644 --- a/Justfile +++ b/Justfile @@ -101,6 +101,12 @@ like-ci config=default-target hypervisor="kvm": @# Ensure up-to-date Cargo.lock cargo fetch --locked + @# typos + typos + + @# check licence headers + just check-license-headers + @# fmt just fmt-check diff --git a/dev/check-license-headers.sh b/dev/check-license-headers.sh index 8fd7749dd..88104b1ff 100755 --- a/dev/check-license-headers.sh +++ b/dev/check-license-headers.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # This script checks for the presence of the required license header in Rust source files. # Get the repository root @@ -32,7 +32,12 @@ MISSING_HEADERS=0 MISSING_FILES="" # Find all Rust files, excluding target directory -while IFS= read -r file; do +while IFS= read -r -d $'\0' file; do + # Skip some files which appear when the guests are build + if grep -q '^src/tests/rust_guests/[^/]*/target/' <<< "$file"; then + continue + fi + # Skip auto-generated files if grep -q "@generated" "$file" || grep -q "Automatically generated" "$file"; then continue @@ -44,7 +49,7 @@ while IFS= read -r file; do MISSING_FILES="$MISSING_FILES\n $file" MISSING_HEADERS=$((MISSING_HEADERS + 1)) fi -done < <(find src -name "*.rs" -type f) +done < <(find src -name "*.rs" -type f -print0) if [ $MISSING_HEADERS -gt 0 ]; then echo "Found $MISSING_HEADERS files with missing or invalid license headers:" @@ -57,4 +62,4 @@ if [ $MISSING_HEADERS -gt 0 ]; then else echo "All Rust files have the required license header" exit 0 -fi \ No newline at end of file +fi diff --git a/flake.nix b/flake.nix index 92f8442a1..fd1e67d48 100644 --- a/flake.nix +++ b/flake.nix @@ -75,19 +75,19 @@ # for rustfmt and old toolchains to verify MSRV toolchains = lib.mapAttrs (_: customisedRustChannelOf) { stable = { - date = "2025-09-18"; + date = "2025-12-11"; channel = "stable"; - sha256 = "sha256-SJwZ8g0zF2WrKDVmHrVG3pD2RGoQeo24MEXnNx5FyuI="; + sha256 = "sha256-sqSWJDUxc+zaz1nBWMAJKTAGBuGWP25GCftIOlCEAtA="; }; nightly = { - date = "2025-07-29"; + date = "2026-01-19"; channel = "nightly"; - sha256 = "sha256-6D2b7glWC3jpbIGCq6Ta59lGCKN9sTexhgixH4Y7Nng="; + sha256 = "sha256-Ye65U/qzilPLte800N5oxFOY96shgG8bST8dbrF6Qh0="; }; - "1.88" = { - date = "2025-06-26"; + "1.89" = { + date = "2025-08-07"; channel = "stable"; - sha256 = "sha256-Qxt8XAuaUR2OMdKbN4u8dBJOhSHxS+uS06Wl9+flVEk="; + sha256 = "sha256-+9FmLhAOezBZCOziO0Qct1NOrfpjNsXxc/8I0c7BdKE="; }; }; @@ -96,6 +96,47 @@ rustc = toolchains.stable.rust; }; + # Script snippet, used in the cargo/rustc wrappers below, + # which creates a number of .cargo/config.toml files in + # order to allow using Nix-fetched dependencies (this must + # be done for the guests, as well as for the main + # workspace). Ideally, we would just use environment + # variables or the --config option to Cargo, but + # unfortunately that tends not to play well with subcommands + # like `cargo clippy` and `cargo hyperlight` (see + # https://github.com/rust-lang/cargo/issues/11031). + materialiseDeps = deps: let + sortedNames = lib.lists.reverseList (builtins.attrNames deps); + matchClause = path: '' */${path}) root="''${manifest%${path}}" ;;''; + matchClauses = lib.strings.concatStringsSep "\n" + (builtins.map matchClause sortedNames); + makeClause = manifest: vendor: let + dir = builtins.dirOf manifest; + gitExclude = builtins.toString (/. + "${dir}/.cargo"); + in '' + mkdir -p $root/${dir}/.cargo + cat >$root/${dir}/.cargo/config.toml <> $root/.git/info/exclude + ''; + makeClauses = lib.strings.concatStringsSep "\n" + (lib.mapAttrsToList makeClause deps); + in '' + manifest=$(''${base}/bin/cargo locate-project --message-format plain --workspace) + case "$manifest" in + ${matchClauses} + esac + if [ -f ''${root}/flake.nix ]; then + sed -i '/# vendor dependency configuration generated by nix/{N;d;}' $root/.git/info/exclude + ${makeClauses} + fi + ''; + # Hyperlight scripts use cargo in a bunch of ways that don't # make sense for Nix cargo, including the `rustup +toolchain` # syntax to use a specific toolchain and `cargo install`, so we @@ -103,32 +144,68 @@ # scripts also use `rustup toolchain install` in some cases, in # order to work in CI, so we provide a fake rustup that does # nothing as well. - rustup-like-wrapper = name: pkgs.writeShellScriptBin name + rustup-like-wrapper = name: deps: pkgs.writeShellScriptBin name (let clause = name: toolchain: "+${name}) base=\"${toolchain.rust}\"; shift 1; ;;"; clauses = lib.strings.concatStringsSep "\n" (lib.mapAttrsToList clause toolchains); in '' - base="${toolchains.stable.rust}" - case "$1" in - ${clauses} - install) exit 0; ;; - esac - export PATH="$base/bin:$PATH" - exec "$base/bin/${name}" "$@" - ''); - fake-rustup = pkgs.symlinkJoin { + base="${toolchains.stable.rust}" + ${materialiseDeps deps} + case "$1" in + ${clauses} + install) exit 0; ;; + esac + export PATH="$base/bin:$PATH" + exec "$base/bin/${name}" "$@" + ''); + fake-rustup = deps: pkgs.symlinkJoin { name = "fake-rustup"; paths = [ (pkgs.writeShellScriptBin "rustup" "") - (rustup-like-wrapper "rustc") - (rustup-like-wrapper "cargo") + (rustup-like-wrapper "rustc" deps) + (rustup-like-wrapper "cargo" deps) ]; }; buildRustPackageClang = rust-platform.buildRustPackage.override { stdenv = clangStdenv; }; - in (buildRustPackageClang rec { + + cargo-hyperlight = buildRustPackageClang rec { + pname = "cargo-hyperlight"; + version = "0.1.5"; + src = fetchFromGitHub { + owner = "hyperlight-dev"; + repo = "cargo-hyperlight"; + tag = "v${version}"; + hash = "sha256-xq4/c69N0wG/I8WOYVloo0J0JqoSIKiWWtECdSKrsxo="; + }; + cargoHash = "sha256-muiMVrK1TydQiMitihfo7xYidqUIIQ+Hw3BIeo5rLFw="; + }; + # when building a guest with cargo-hyperlight, we need to + # include any crates.io dependencies of the standard library + # (e.g. rustc-literal-escaper) + stdlibLocks = lib.mapAttrsToList (_: toolchain: + "${toolchain.rust}/lib/rustlib/src/rust/library/Cargo.lock" + ) toolchains; + stdlibDeps = builtins.map (lockFile: + rust-platform.importCargoLock { inherit lockFile; }) stdlibLocks; + withStdlibLock = lockFile: + pkgs.symlinkJoin { + name = "cargo-deps"; + paths = stdlibDeps ++ [ + (rust-platform.importCargoLock { + inherit lockFile; + }) + ]; + }; + deps = finalRootVendor: { + "Cargo.toml" = finalRootVendor; + "src/tests/rust_guests/dummyguest/Cargo.toml" = withStdlibLock ./src/tests/rust_guests/dummyguest/Cargo.lock; + "src/tests/rust_guests/simpleguest/Cargo.toml" = withStdlibLock ./src/tests/rust_guests/simpleguest/Cargo.lock; + "src/tests/rust_guests/witguest/Cargo.toml" = withStdlibLock ./src/tests/rust_guests/witguest/Cargo.lock; + }; + in (buildRustPackageClang (mkDerivationAttrs: { pname = "hyperlight"; version = "0.0.0"; src = lib.cleanSource ./.; @@ -150,6 +227,8 @@ jaq gdb zlib + cargo-hyperlight + typos ]; buildInputs = [ pango @@ -167,9 +246,9 @@ # Set this through shellHook rather than nativeBuildInputs to be # really sure that it overrides the real cargo. postHook = '' - export PATH="${fake-rustup}/bin:$PATH" + export PATH="${fake-rustup (deps mkDerivationAttrs.cargoDeps)}/bin:$PATH" ''; - }).overrideAttrs(oA: { + })).overrideAttrs(oA: { hardeningDisable = [ "all" ]; }); }; diff --git a/src/hyperlight_common/src/arch/amd64/layout.rs b/src/hyperlight_common/src/arch/amd64/layout.rs index be4065732..da0ddd0c8 100644 --- a/src/hyperlight_common/src/arch/amd64/layout.rs +++ b/src/hyperlight_common/src/arch/amd64/layout.rs @@ -14,5 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Keep in mind that the minimum upper half GVA is 0xffff_8000_0000_0000 -pub const SNAPSHOT_PT_GVA: usize = 0xffff_ff00_0000_0000; +/// We have this the top of the page below the top of memory in order +/// to make working with start/end ptrs in a few places more +/// convenient (not needing to worry about overflow) +pub const MAX_GVA: usize = 0xffff_ffff_ffff_efff; +pub const SNAPSHOT_PT_GVA: usize = 0xffff_8000_0000_0000; + +/// We assume 36-bit IPAs for now, since every amd64 processor +/// supports at least 36 bits. Almost all of them support at least 40 +/// bits, so we could consider bumping this in the future if we were +/// ever memory-constrained. +pub const MAX_GPA: usize = 0x0000_00ff_ffff_ffff; diff --git a/src/hyperlight_common/src/arch/amd64/vmem.rs b/src/hyperlight_common/src/arch/amd64/vmem.rs index e292a0381..4e104460e 100644 --- a/src/hyperlight_common/src/arch/amd64/vmem.rs +++ b/src/hyperlight_common/src/arch/amd64/vmem.rs @@ -145,8 +145,15 @@ impl Iterator let next_vmin = if self.n == 0 { self.request.vmin } else { - // Align to the next boundary by adding one entry's worth and masking off lower bits - (self.request.vmin + (self.n << LOW_BIT)) & !lower_bits_mask + // Align to the next boundary by adding one entry's worth + // and masking off lower bits. Masking off before adding + // is safe, since n << LOW_BIT must always have zeros in + // these positions. + let aligned_min = self.request.vmin & !lower_bits_mask; + // Use checked_add here because going past the end of the + // address space counts as "the next one would be out of + // range" + aligned_min.checked_add(self.n << LOW_BIT)? }; // Check if we've processed the entire requested range diff --git a/src/hyperlight_common/src/layout.rs b/src/hyperlight_common/src/layout.rs index 87d072715..3ba739b73 100644 --- a/src/hyperlight_common/src/layout.rs +++ b/src/hyperlight_common/src/layout.rs @@ -21,4 +21,21 @@ mod arch; // The constraint on the feature is temporary and will be removed when other arch i686 is added #[cfg(feature = "init-paging")] -pub use arch::SNAPSHOT_PT_GVA; +pub use arch::MAX_GPA; +#[cfg(feature = "init-paging")] +pub use arch::{MAX_GVA, SNAPSHOT_PT_GVA}; + +// offsets down from the top of scratch memory for various things +pub const SCRATCH_TOP_SIZE_OFFSET: u64 = 0x08; +pub const SCRATCH_TOP_USED_OFFSET: u64 = 0x10; +pub const SCRATCH_TOP_ALLOCATOR_OFFSET: u64 = 0x18; +pub const SCRATCH_TOP_EXN_STACK_OFFSET: u64 = 0x20; + +#[cfg(feature = "init-paging")] +pub fn scratch_base_gpa(size: usize) -> u64 { + (MAX_GPA - size + 1) as u64 +} +#[cfg(feature = "init-paging")] +pub fn scratch_base_gva(size: usize) -> u64 { + (MAX_GVA - size + 1) as u64 +} diff --git a/src/hyperlight_host/src/hypervisor/crashdump.rs b/src/hyperlight_host/src/hypervisor/crashdump.rs index a5f7c19fc..e42a62619 100644 --- a/src/hyperlight_host/src/hypervisor/crashdump.rs +++ b/src/hyperlight_host/src/hypervisor/crashdump.rs @@ -86,11 +86,11 @@ impl GuestView { let regions = ctx .regions .iter() - .filter(|r| !r.host_region.is_empty()) + .filter(|r| !r.guest_region.is_empty()) .map(|r| VaRegion { begin: r.guest_region.start as u64, end: r.guest_region.end as u64, - offset: r.host_region.start as u64, + offset: <_ as Into>::into(r.host_region.start) as u64, protection: VaProtection { is_private: false, read: r.flags.contains(MemoryRegionFlags::READ), @@ -225,8 +225,8 @@ impl ReadProcessMemory for GuestMemReader { let offset = base - r.guest_region.start; let region_slice = unsafe { std::slice::from_raw_parts( - r.host_region.start as *const u8, - r.host_region.len(), + <_ as Into>::into(r.host_region.start) as *const u8, + r.guest_region.len(), ) }; @@ -463,9 +463,20 @@ mod test { #[test] fn test_crashdump_dummy_core_dump() { let dummy_vec = vec![0; 0x1000]; + use crate::mem::memory_region::{HostGuestMemoryRegion, MemoryRegionKind}; + #[cfg(target_os = "windows")] + let host_base = crate::mem::memory_region::HostRegionBase { + from_handle: windows::Win32::Foundation::INVALID_HANDLE_VALUE.into(), + handle_base: 0, + handle_size: -1isize as usize, + offset: dummy_vec.as_ptr() as usize, + }; + #[cfg(not(target_os = "windows"))] + let host_base = dummy_vec.as_ptr() as usize; + let host_end = ::add(host_base, dummy_vec.len()); let regions = vec![MemoryRegion { guest_region: 0x1000..0x2000, - host_region: dummy_vec.as_ptr() as usize..dummy_vec.as_ptr() as usize + dummy_vec.len(), + host_region: host_base..host_end, flags: MemoryRegionFlags::READ | MemoryRegionFlags::WRITE, region_type: crate::mem::memory_region::MemoryRegionType::Code, }]; diff --git a/src/hyperlight_host/src/hypervisor/gdb/mod.rs b/src/hyperlight_host/src/hypervisor/gdb/mod.rs index 0e4d8ac9e..aa9a8f3fb 100644 --- a/src/hyperlight_host/src/hypervisor/gdb/mod.rs +++ b/src/hyperlight_host/src/hypervisor/gdb/mod.rs @@ -126,8 +126,9 @@ impl DebugMemoryAccess { HyperlightError::TranslateGuestAddress(mem_offset as u64) })?; + let host_start_ptr = <_ as Into>::into(reg.host_region.start); let bytes: &[u8] = unsafe { - slice::from_raw_parts(reg.host_region.start as *const u8, reg.host_region.len()) + slice::from_raw_parts(host_start_ptr as *const u8, reg.guest_region.len()) }; data[..read_len].copy_from_slice(&bytes[region_offset..region_offset + read_len]); @@ -191,11 +192,9 @@ impl DebugMemoryAccess { HyperlightError::TranslateGuestAddress(mem_offset as u64) })?; + let host_start_ptr = <_ as Into>::into(reg.host_region.start); let bytes: &mut [u8] = unsafe { - slice::from_raw_parts_mut( - reg.host_region.start as *mut u8, - reg.host_region.len(), - ) + slice::from_raw_parts_mut(host_start_ptr as *mut u8, reg.guest_region.len()) }; bytes[region_offset..region_offset + write_len].copy_from_slice(&data[..write_len]); diff --git a/src/hyperlight_host/src/hypervisor/hyperlight_vm.rs b/src/hyperlight_host/src/hypervisor/hyperlight_vm.rs index c836d3d80..2cdc0b598 100644 --- a/src/hyperlight_host/src/hypervisor/hyperlight_vm.rs +++ b/src/hyperlight_host/src/hypervisor/hyperlight_vm.rs @@ -50,13 +50,11 @@ use crate::hypervisor::virtual_machine::mshv::MshvVm; #[cfg(target_os = "windows")] use crate::hypervisor::virtual_machine::whp::WhpVm; use crate::hypervisor::virtual_machine::{HypervisorType, VmExit, get_available_hypervisor}; -#[cfg(target_os = "windows")] -use crate::hypervisor::wrappers::HandleWrapper; use crate::hypervisor::{InterruptHandle, InterruptHandleImpl, get_max_log_level}; use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags, MemoryRegionType}; use crate::mem::mgr::SandboxMemoryManager; use crate::mem::ptr::{GuestPtr, RawPtr}; -use crate::mem::shared_mem::HostSharedMemory; +use crate::mem::shared_mem::{GuestSharedMemory, HostSharedMemory, SharedMemory}; use crate::metrics::{METRIC_ERRONEOUS_VCPU_KICKS, METRIC_GUEST_CANCELLATION}; use crate::sandbox::SandboxConfiguration; use crate::sandbox::host_funcs::FunctionRegistry; @@ -87,6 +85,10 @@ pub(crate) struct HyperlightVm { mmap_regions: Vec<(u32, MemoryRegion)>, // Later mapped regions (slot number, region) next_slot: u32, // Monotonically increasing slot number freed_slots: Vec, // Reusable slots from unmapped regions + scratch_slot: u32, // The slot number used for the scratch region + // The current scratch region, used to keep it alive as long as it + // is used & when unmapping + scratch_memory: Option, #[cfg(gdb)] gdb_conn: Option>, @@ -108,8 +110,6 @@ impl HyperlightVm { entrypoint: u64, rsp: u64, #[cfg_attr(target_os = "windows", allow(unused_variables))] config: &SandboxConfiguration, - #[cfg(target_os = "windows")] handle: HandleWrapper, - #[cfg(target_os = "windows")] raw_size: usize, #[cfg(gdb)] gdb_conn: Option>, #[cfg(crashdump)] rt_cfg: SandboxRuntimeConfig, #[cfg(feature = "mem_profile")] trace_info: MemTraceInfo, @@ -126,7 +126,7 @@ impl HyperlightVm { #[cfg(mshv3)] Some(HypervisorType::Mshv) => Box::new(MshvVm::new()?), #[cfg(target_os = "windows")] - Some(HypervisorType::Whp) => Box::new(WhpVm::new(handle, raw_size)?), + Some(HypervisorType::Whp) => Box::new(WhpVm::new()?), None => return Err(NoHypervisorFound()), }; @@ -176,6 +176,7 @@ impl HyperlightVm { }), }); + let scratch_slot = mem_regions.len() as u32; #[cfg_attr(not(gdb), allow(unused_mut))] let mut ret = Self { vm, @@ -184,10 +185,12 @@ impl HyperlightVm { interrupt_handle, page_size: 0, // Will be set in `initialise` - next_slot: mem_regions.len() as u32, + next_slot: scratch_slot + 1, sandbox_regions: mem_regions, mmap_regions: Vec::new(), freed_slots: Vec::new(), + scratch_slot, + scratch_memory: None, #[cfg(gdb)] gdb_conn, @@ -266,8 +269,10 @@ impl HyperlightVm { if [ region.guest_region.start, region.guest_region.end, - region.host_region.start, - region.host_region.end, + #[allow(clippy::useless_conversion)] + region.host_region.start.into(), + #[allow(clippy::useless_conversion)] + region.host_region.end.into(), ] .iter() .any(|x| x % self.page_size != 0) @@ -312,6 +317,21 @@ impl HyperlightVm { self.mmap_regions.iter().map(|(_, region)| region) } + /// Update the scratch mapping to point to a new GuestSharedMemory + pub(crate) fn update_scratch_mapping(&mut self, scratch: GuestSharedMemory) -> Result<()> { + let guest_base = hyperlight_common::layout::scratch_base_gpa(scratch.mem_size()); + let rgn = scratch.mapping_at(guest_base, MemoryRegionType::Scratch); + + if let Some(old_scratch) = self.scratch_memory.replace(scratch) { + let old_base = hyperlight_common::layout::scratch_base_gpa(old_scratch.mem_size()); + let old_rgn = old_scratch.mapping_at(old_base, MemoryRegionType::Scratch); + self.vm.unmap_memory((self.scratch_slot, &old_rgn))?; + } + unsafe { self.vm.map_memory((self.scratch_slot, &rgn))? }; + + Ok(()) + } + /// Dispatch a call from the host to the guest using the given pointer /// to the dispatch function _in the guest's address space_. /// diff --git a/src/hyperlight_host/src/hypervisor/surrogate_process.rs b/src/hyperlight_host/src/hypervisor/surrogate_process.rs index d026362d2..6ff744709 100644 --- a/src/hyperlight_host/src/hypervisor/surrogate_process.rs +++ b/src/hyperlight_host/src/hypervisor/surrogate_process.rs @@ -15,60 +15,155 @@ limitations under the License. */ use core::ffi::c_void; +use std::collections::HashMap; +use std::collections::hash_map::Entry; +use hyperlight_common::mem::PAGE_SIZE_USIZE; use tracing::{Span, instrument}; use windows::Win32::Foundation::HANDLE; use windows::Win32::System::Memory::{ - MEMORY_MAPPED_VIEW_ADDRESS, UNMAP_VIEW_OF_FILE_FLAGS, UnmapViewOfFile2, + MEMORY_MAPPED_VIEW_ADDRESS, MapViewOfFileNuma2, PAGE_NOACCESS, PAGE_PROTECTION_FLAGS, + PAGE_READWRITE, UNMAP_VIEW_OF_FILE_FLAGS, UnmapViewOfFile2, VirtualProtectEx, }; +use windows::Win32::System::SystemServices::NUMA_NO_PREFERRED_NODE; use super::surrogate_process_manager::get_surrogate_process_manager; use super::wrappers::HandleWrapper; +use crate::HyperlightError::WindowsAPIError; +use crate::{Result, log_then_return}; + +#[derive(Debug)] +pub(crate) struct HandleMapping { + pub(crate) use_count: u64, + pub(crate) surrogate_base: *mut c_void, +} /// Contains details of a surrogate process to be used by a Sandbox for providing memory to a HyperV VM on Windows. /// See surrogate_process_manager for details on why this is needed. #[derive(Debug)] pub(super) struct SurrogateProcess { - /// The address of memory allocated in the surrogate process to be mapped to the VM. - /// This includes the first guard page - pub(crate) allocated_address: *mut c_void, + /// The various mappings between handles in the host and surrogate process + pub(crate) mappings: HashMap, /// The handle to the surrogate process. pub(crate) process_handle: HandleWrapper, } impl SurrogateProcess { #[instrument(skip_all, parent = Span::current(), level= "Trace")] - pub(super) fn new(allocated_address: *mut c_void, process_handle: HANDLE) -> Self { + pub(super) fn new(process_handle: HANDLE) -> Self { Self { - allocated_address, + mappings: HashMap::new(), process_handle: HandleWrapper::from(process_handle), } } + + pub(super) fn map( + &mut self, + handle: HandleWrapper, + host_base: usize, + host_size: usize, + ) -> Result<*mut c_void> { + match self.mappings.entry(host_base) { + Entry::Occupied(mut oe) => { + oe.get_mut().use_count += 1; + Ok(oe.get().surrogate_base) + } + Entry::Vacant(ve) => { + // Use MapViewOfFile2 to map memory into the surrogate process, the MapViewOfFile2 API is implemented in as an inline function in a windows header file + // (see https://learn.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-mapviewoffile2#remarks) so we use the same API it uses in the header file here instead of + // MapViewOfFile2 which does not exist in the rust crate (see https://github.com/microsoft/windows-rs/issues/2595) + let surrogate_base = unsafe { + MapViewOfFileNuma2( + handle.into(), + self.process_handle.into(), + 0, + None, + host_size, + 0, + PAGE_READWRITE.0, + NUMA_NO_PREFERRED_NODE, + ) + }; + let mut unused_out_old_prot_flags = PAGE_PROTECTION_FLAGS(0); + + // the first page of the raw_size is the guard page + let first_guard_page_start = surrogate_base.Value; + if let Err(e) = unsafe { + VirtualProtectEx( + self.process_handle.into(), + first_guard_page_start, + PAGE_SIZE_USIZE, + PAGE_NOACCESS, + &mut unused_out_old_prot_flags, + ) + } { + log_then_return!(WindowsAPIError(e.clone())); + } + + // the last page of the raw_size is the guard page + let last_guard_page_start = + unsafe { first_guard_page_start.add(host_size - PAGE_SIZE_USIZE) }; + if let Err(e) = unsafe { + VirtualProtectEx( + self.process_handle.into(), + last_guard_page_start, + PAGE_SIZE_USIZE, + PAGE_NOACCESS, + &mut unused_out_old_prot_flags, + ) + } { + log_then_return!(WindowsAPIError(e.clone())); + } + ve.insert(HandleMapping { + use_count: 1, + surrogate_base: surrogate_base.Value, + }); + Ok(surrogate_base.Value) + } + } + } + + pub(super) fn unmap(&mut self, host_base: usize) { + if let Some(entry) = self.mappings.remove(&host_base) { + self.unmap_helper(entry.surrogate_base); + } else { + #[cfg(debug_assertions)] + panic!("Attempted to unmap from surrogate a region that was never mapped") + } + } + + fn unmap_helper(&self, surrogate_base: *mut c_void) { + let memory_mapped_view_address = MEMORY_MAPPED_VIEW_ADDRESS { + Value: surrogate_base, + }; + let flags = UNMAP_VIEW_OF_FILE_FLAGS(0); + if let Err(e) = unsafe { + UnmapViewOfFile2( + self.process_handle.into(), + memory_mapped_view_address, + flags, + ) + } { + tracing::error!( + "Failed to free surrogate process resources (UnmapViewOfFile2 failed): {:?}", + e + ); + } + } } impl Default for SurrogateProcess { #[instrument(skip_all, parent = Span::current(), level= "Trace")] fn default() -> Self { - let allocated_address = std::ptr::null_mut(); - Self::new(allocated_address, Default::default()) + Self::new(Default::default()) } } impl Drop for SurrogateProcess { #[instrument(skip_all, parent = Span::current(), level= "Trace")] fn drop(&mut self) { - let process_handle: HANDLE = self.process_handle.into(); - let memory_mapped_view_address = MEMORY_MAPPED_VIEW_ADDRESS { - Value: self.allocated_address, - }; - let flags = UNMAP_VIEW_OF_FILE_FLAGS(0); - if let Err(e) = - unsafe { UnmapViewOfFile2(process_handle, memory_mapped_view_address, flags) } - { - tracing::error!( - "Failed to free surrogate process resources (UnmapViewOfFile2 failed): {:?}", - e - ); + for mapping in self.mappings.values() { + self.unmap_helper(mapping.surrogate_base); } // we need to do this take so we can take ownership diff --git a/src/hyperlight_host/src/hypervisor/surrogate_process_manager.rs b/src/hyperlight_host/src/hypervisor/surrogate_process_manager.rs index 80a23e8aa..a1789fa04 100644 --- a/src/hyperlight_host/src/hypervisor/surrogate_process_manager.rs +++ b/src/hyperlight_host/src/hypervisor/surrogate_process_manager.rs @@ -21,7 +21,6 @@ use std::mem::size_of; use std::path::{Path, PathBuf}; use crossbeam_channel::{Receiver, Sender, unbounded}; -use hyperlight_common::mem::PAGE_SIZE_USIZE; use rust_embed::RustEmbed; use tracing::{Span, error, info, instrument}; use windows::Win32::Foundation::HANDLE; @@ -31,10 +30,6 @@ use windows::Win32::System::JobObjects::{ JOBOBJECT_BASIC_LIMIT_INFORMATION, JOBOBJECT_EXTENDED_LIMIT_INFORMATION, JobObjectExtendedLimitInformation, SetInformationJobObject, TerminateJobObject, }; -use windows::Win32::System::Memory::{ - MapViewOfFileNuma2, PAGE_NOACCESS, PAGE_PROTECTION_FLAGS, PAGE_READWRITE, VirtualProtectEx, -}; -use windows::Win32::System::SystemServices::NUMA_NO_PREFERRED_NODE; use windows::Win32::System::Threading::{ CREATE_SUSPENDED, CreateProcessA, PROCESS_INFORMATION, STARTUPINFOA, }; @@ -141,77 +136,9 @@ impl SurrogateProcessManager { /// allocates memory in the process. This should be called when a new /// HyperV on Windows Driver is created. #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")] - pub(super) fn get_surrogate_process( - &self, - raw_size: usize, - mmap_file_handle: HandleWrapper, - ) -> Result { + pub(super) fn get_surrogate_process(&self) -> Result { let surrogate_process_handle: HANDLE = self.process_receiver.recv()?.into(); - let mapping_file_handle: HANDLE = mmap_file_handle.into(); - - // Allocate the memory by creating a view over the memory mapped file - - // Use MapViewOfFile2 to map memory into the surrogate process, the MapViewOfFile2 API is implemented in as an inline function in a windows header file - // (see https://learn.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-mapviewoffile2#remarks) so we use the same API it uses in the header file here instead of - // MapViewOfFile2 which does not exist in the rust crate (see https://github.com/microsoft/windows-rs/issues/2595) - let allocated_address = unsafe { - MapViewOfFileNuma2( - mapping_file_handle, - surrogate_process_handle, - 0, - None, - raw_size, - 0, - PAGE_READWRITE.0, - NUMA_NO_PREFERRED_NODE, - ) - }; - - if allocated_address.Value.is_null() { - // Safety: `MapViewOfFileNuma2` will set the last error code if it fails. - let error = unsafe { windows::Win32::Foundation::GetLastError() }; - log_then_return!("MapViewOfFileNuma2 failed with error code: {:?}", error); - } - - // set up guard pages - - // If the following calls to VirtualProtectEx are changed make sure to update the calls to VirtualProtect in shared_mem.rs - - let mut unused_out_old_prot_flags = PAGE_PROTECTION_FLAGS(0); - - // the first page of the raw_size is the guard page - let first_guard_page_start = allocated_address.Value; - if let Err(e) = unsafe { - VirtualProtectEx( - surrogate_process_handle, - first_guard_page_start, - PAGE_SIZE_USIZE, - PAGE_NOACCESS, - &mut unused_out_old_prot_flags, - ) - } { - log_then_return!(WindowsAPIError(e.clone())); - } - - // the last page of the raw_size is the guard page - let last_guard_page_start = - unsafe { first_guard_page_start.add(raw_size - PAGE_SIZE_USIZE) }; - if let Err(e) = unsafe { - VirtualProtectEx( - surrogate_process_handle, - last_guard_page_start, - PAGE_SIZE_USIZE, - PAGE_NOACCESS, - &mut unused_out_old_prot_flags, - ) - } { - log_then_return!(WindowsAPIError(e.clone())); - } - - Ok(SurrogateProcess::new( - allocated_address.Value, - surrogate_process_handle, - )) + Ok(SurrogateProcess::new(surrogate_process_handle)) } /// Returns a surrogate process to the pool of surrogate processes. @@ -415,17 +342,12 @@ mod tests { use std::thread; use std::time::{Duration, Instant}; - use hyperlight_common::mem::PAGE_SIZE_USIZE; use rand::{Rng, rng}; - use windows::Win32::Foundation::{CloseHandle, HANDLE, INVALID_HANDLE_VALUE}; + use windows::Win32::Foundation::HANDLE; use windows::Win32::System::Diagnostics::ToolHelp::{ CreateToolhelp32Snapshot, PROCESSENTRY32, Process32First, Process32Next, TH32CS_SNAPPROCESS, }; use windows::Win32::System::JobObjects::IsProcessInJob; - use windows::Win32::System::Memory::{ - CreateFileMappingA, FILE_MAP_ALL_ACCESS, MapViewOfFile, PAGE_READWRITE, SEC_COMMIT, - UnmapViewOfFile, - }; use windows_result::BOOL; use super::*; @@ -440,7 +362,6 @@ mod tests { let thread_handle = thread::spawn(move || -> Result<()> { let surrogate_process_manager_res = get_surrogate_process_manager(); let mut rng = rng(); - let size = PAGE_SIZE_USIZE * 3; assert!(surrogate_process_manager_res.is_ok()); let surrogate_process_manager = surrogate_process_manager_res.unwrap(); let job_handle = surrogate_process_manager.job_handle; @@ -448,26 +369,9 @@ mod tests { // surrogate process, make sure we actually got one, // then put it back for p in 0..NUMBER_OF_SURROGATE_PROCESSES { - let dwmaximumsizehigh = 0; - let dwmaximumsizelow = (size & 0xFFFFFFFF) as u32; - let handle = unsafe { - CreateFileMappingA( - INVALID_HANDLE_VALUE, // Causes the page file to be used as the backing store - None, - PAGE_READWRITE | SEC_COMMIT, - dwmaximumsizehigh, - dwmaximumsizelow, - PCSTR::null(), - ) - .unwrap() - }; - - let addr = unsafe { MapViewOfFile(handle, FILE_MAP_ALL_ACCESS, 0, 0, 0) }; - let timer = Instant::now(); let surrogate_process = { - let res = surrogate_process_manager - .get_surrogate_process(size, HandleWrapper::from(handle))?; + let res = surrogate_process_manager.get_surrogate_process()?; let elapsed = timer.elapsed(); // Print out the time it took to get the process if its greater than 150ms (this is just to allow us to see that threads are blocking on the process queue) if (elapsed.as_millis() as u64) > 150 { @@ -492,11 +396,6 @@ mod tests { // dropping the surrogate process, as we do in the line // below, will return it to the surrogate process manager drop(surrogate_process); - let res = unsafe { UnmapViewOfFile(addr) }; - assert!(res.is_ok(), "Failed to UnmapViewOfFile: {:?}", res.err()); - - let res = unsafe { CloseHandle(handle) }; - assert!(res.is_ok(), "Failed to CloseHandle: {:?}", res.err()); } Ok(()) }); @@ -554,10 +453,12 @@ mod tests { let mgr = get_surrogate_process_manager().unwrap(); let mem = ExclusiveSharedMemory::new(SIZE).unwrap(); - let process = mgr - .get_surrogate_process( - mem.raw_mem_size(), + let mut process = mgr.get_surrogate_process().unwrap(); + let surrogate_address = process + .map( HandleWrapper::from(mem.get_mmap_file_handle()), + mem.raw_ptr() as usize, + mem.raw_mem_size(), ) .unwrap(); @@ -569,7 +470,7 @@ mod tests { // read the first guard page, should fail let success = windows::Win32::System::Diagnostics::Debug::ReadProcessMemory( process_handle, - process.allocated_address, + surrogate_address, buffer.as_ptr() as *mut c_void, SIZE, bytes_read, @@ -579,7 +480,7 @@ mod tests { // read the memory, should be OK let success = windows::Win32::System::Diagnostics::Debug::ReadProcessMemory( process_handle, - process.allocated_address.add(SIZE), + surrogate_address.wrapping_add(SIZE), buffer.as_ptr() as *mut c_void, SIZE, bytes_read, @@ -589,7 +490,7 @@ mod tests { // read the second guard page, should fail let success = windows::Win32::System::Diagnostics::Debug::ReadProcessMemory( process_handle, - process.allocated_address.add(2 * SIZE), + surrogate_address.wrapping_add(2 * SIZE), buffer.as_ptr() as *mut c_void, SIZE, bytes_read, diff --git a/src/hyperlight_host/src/hypervisor/virtual_machine/kvm.rs b/src/hyperlight_host/src/hypervisor/virtual_machine/kvm.rs index 5bf9d0756..11abde56a 100644 --- a/src/hyperlight_host/src/hypervisor/virtual_machine/kvm.rs +++ b/src/hyperlight_host/src/hypervisor/virtual_machine/kvm.rs @@ -78,6 +78,19 @@ impl KvmVm { let vm_fd = hv.create_vm_with_type(0)?; let vcpu_fd = vm_fd.create_vcpu(0)?; + // Set the CPUID leaf for MaxPhysAddr. KVM allows this to + // easily be overridden by the hypervisor and defaults it very + // low, while mshv passes it through from hardware unless an + // intercept is installed. + let mut kvm_cpuid = hv.get_supported_cpuid(kvm_bindings::KVM_MAX_CPUID_ENTRIES)?; + for entry in kvm_cpuid.as_mut_slice().iter_mut() { + if entry.function == 0x8000_0008 { + entry.eax &= !0xff; + entry.eax |= hyperlight_common::layout::MAX_GPA.ilog2() + 1; + } + } + vcpu_fd.set_cpuid2(&kvm_cpuid)?; + Ok(Self { vm_fd, vcpu_fd, diff --git a/src/hyperlight_host/src/hypervisor/virtual_machine/whp.rs b/src/hyperlight_host/src/hypervisor/virtual_machine/whp.rs index 4ada3714d..de8905113 100644 --- a/src/hyperlight_host/src/hypervisor/virtual_machine/whp.rs +++ b/src/hyperlight_host/src/hypervisor/virtual_machine/whp.rs @@ -16,7 +16,6 @@ limitations under the License. use std::os::raw::c_void; -use hyperlight_common::mem::PAGE_SIZE_USIZE; use windows::Win32::Foundation::{FreeLibrary, HANDLE}; use windows::Win32::System::Hypervisor::*; use windows::Win32::System::LibraryLoader::*; @@ -33,9 +32,8 @@ use crate::hypervisor::regs::{ use crate::hypervisor::surrogate_process::SurrogateProcess; use crate::hypervisor::surrogate_process_manager::get_surrogate_process_manager; use crate::hypervisor::virtual_machine::{VirtualMachine, VmExit}; -use crate::hypervisor::wrappers::HandleWrapper; use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags}; -use crate::{Result, log_then_return, new_error}; +use crate::{Result, new_error}; #[allow(dead_code)] // Will be used for runtime hypervisor detection pub(crate) fn is_hypervisor_present() -> bool { @@ -64,9 +62,6 @@ pub(crate) struct WhpVm { partition: WHV_PARTITION_HANDLE, // Surrogate process for memory mapping surrogate_process: SurrogateProcess, - // Offset between surrogate process and host process addresses (accounting for guard page) - // Calculated lazily on first map_memory call - surrogate_offset: Option, // Track if initial memory setup is complete. // Used to reject later memory mapping since it's not supported on windows. // TODO remove this flag once memory mapping is supported on windows. @@ -81,7 +76,7 @@ pub(crate) struct WhpVm { unsafe impl Send for WhpVm {} impl WhpVm { - pub(crate) fn new(mmap_file_handle: HandleWrapper, raw_size: usize) -> Result { + pub(crate) fn new() -> Result { const NUM_CPU: u32 = 1; let partition = unsafe { let partition = WHvCreatePartition()?; @@ -98,12 +93,11 @@ impl WhpVm { // Create the surrogate process with the total memory size let mgr = get_surrogate_process_manager()?; - let surrogate_process = mgr.get_surrogate_process(raw_size, mmap_file_handle)?; + let surrogate_process = mgr.get_surrogate_process()?; Ok(WhpVm { partition, surrogate_process, - surrogate_offset: None, initial_memory_setup_done: false, }) } @@ -132,39 +126,13 @@ impl WhpVm { impl VirtualMachine for WhpVm { unsafe fn map_memory(&mut self, (_slot, region): (u32, &MemoryRegion)) -> Result<()> { - // Only allow memory mapping during initial setup (the first batch of regions). - // After the initial setup is complete, subsequent calls should fail, - // since it's not yet implemented. - if self.initial_memory_setup_done { - // Initial setup already completed - reject this mapping - log_then_return!( - "Mapping host memory into the guest not yet supported on this platform" - ); - } - - // Calculate the offset on first call. The offset accounts for the guard page - // at the start of the surrogate process memory. - let offset = if let Some(offset) = self.surrogate_offset { - offset - } else { - // surrogate_address points to the start of the guard page, so add PAGE_SIZE - // to get to the actual shared memory start - let surrogate_address = - self.surrogate_process.allocated_address as usize + PAGE_SIZE_USIZE; - let host_address = region.host_region.start; - let offset = isize::try_from(surrogate_address)? - isize::try_from(host_address)?; - self.surrogate_offset = Some(offset); - offset - }; - - let process_handle: HANDLE = self.surrogate_process.process_handle.into(); - - let whvmapgparange2_func = unsafe { - match try_load_whv_map_gpa_range2() { - Ok(func) => func, - Err(e) => return Err(new_error!("Can't find API: {}", e)), - } - }; + // Calculate the surrogate process address for this region + let surrogate_base = self.surrogate_process.map( + region.host_region.start.from_handle, + region.host_region.start.handle_base, + region.host_region.start.handle_size, + )?; + let surrogate_addr = surrogate_base.wrapping_add(region.host_region.start.offset); let flags = region .flags @@ -181,13 +149,16 @@ impl VirtualMachine for WhpVm { .iter() .fold(WHvMapGpaRangeFlagNone, |acc, flag| acc | *flag); - // Calculate the surrogate process address for this region - let surrogate_addr = (isize::try_from(region.host_region.start)? + offset) as *const c_void; - + let whvmapgparange2_func = unsafe { + match try_load_whv_map_gpa_range2() { + Ok(func) => func, + Err(e) => return Err(new_error!("Can't find API: {}", e)), + } + }; let res = unsafe { whvmapgparange2_func( self.partition, - process_handle, + self.surrogate_process.process_handle.into(), surrogate_addr, region.guest_region.start as u64, region.guest_region.len() as u64, @@ -201,8 +172,17 @@ impl VirtualMachine for WhpVm { Ok(()) } - fn unmap_memory(&mut self, (_slot, _region): (u32, &MemoryRegion)) -> Result<()> { - log_then_return!("Mapping host memory into the guest not yet supported on this platform"); + fn unmap_memory(&mut self, (_slot, region): (u32, &MemoryRegion)) -> Result<()> { + unsafe { + WHvUnmapGpaRange( + self.partition, + region.guest_region.start as u64, + region.guest_region.len() as u64, + )?; + } + self.surrogate_process + .unmap(region.host_region.start.handle_base); + Ok(()) } #[expect(non_upper_case_globals, reason = "Windows API constant are lower case")] diff --git a/src/hyperlight_host/src/hypervisor/wrappers.rs b/src/hyperlight_host/src/hypervisor/wrappers.rs index 0d116f812..e0ab91579 100644 --- a/src/hyperlight_host/src/hypervisor/wrappers.rs +++ b/src/hyperlight_host/src/hypervisor/wrappers.rs @@ -57,7 +57,7 @@ impl From<&PSTRWrapper> for PSTR { } /// Wrapper for HANDLE, required since HANDLE is no longer Send. -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct HandleWrapper(HANDLE); impl From for HandleWrapper { diff --git a/src/hyperlight_host/src/mem/layout.rs b/src/hyperlight_host/src/mem/layout.rs index b407d5a45..e353f25b1 100644 --- a/src/hyperlight_host/src/mem/layout.rs +++ b/src/hyperlight_host/src/mem/layout.rs @@ -126,6 +126,10 @@ pub(crate) struct SandboxMemoryLayout { // The offset in the sandbox memory where the code starts guest_code_offset: usize, pub(crate) init_data_permissions: Option, + + // The size of the scratch region in physical memory; note that + // this will appear under the top of physical memory. + scratch_size: usize, } impl Debug for SandboxMemoryLayout { @@ -202,6 +206,10 @@ impl Debug for SandboxMemoryLayout { "Guest Code Offset", &format_args!("{:#x}", self.guest_code_offset), ) + .field( + "Scratch region size", + &format_args!("{:#x}", self.scratch_size), + ) .finish() } } @@ -229,6 +237,7 @@ impl SandboxMemoryLayout { code_size: usize, stack_size: usize, heap_size: usize, + scratch_size: usize, init_data_size: usize, init_data_permissions: Option, ) -> Result { @@ -295,6 +304,7 @@ impl SandboxMemoryLayout { init_data_permissions, pt_offset, pt_size: None, + scratch_size, }) } @@ -324,6 +334,11 @@ impl SandboxMemoryLayout { self.stack_size } + #[instrument(skip_all, parent = Span::current(), level= "Trace")] + pub(super) fn get_scratch_size(&self) -> usize { + self.scratch_size + } + /// Get the offset in guest memory to the output data pointer. #[instrument(skip_all, parent = Span::current(), level= "Trace")] fn get_output_data_pointer_offset(&self) -> usize { @@ -461,7 +476,12 @@ impl SandboxMemoryLayout { } pub fn get_memory_regions(&self, shared_mem: &GuestSharedMemory) -> Result> { - self.get_memory_regions_(shared_mem.base_addr()) + self.get_memory_regions_( + #[cfg(target_os = "windows")] + shared_mem.host_region_base(), + #[cfg(not(target_os = "windows"))] + shared_mem.base_addr(), + ) } /// Returns the memory regions associated with this memory layout, @@ -813,7 +833,7 @@ mod tests { fn test_get_memory_size() { let sbox_cfg = SandboxConfiguration::default(); let sbox_mem_layout = - SandboxMemoryLayout::new(sbox_cfg, 4096, 2048, 4096, 0, None).unwrap(); + SandboxMemoryLayout::new(sbox_cfg, 4096, 2048, 4096, 0x3000, 0, None).unwrap(); assert_eq!( sbox_mem_layout.get_memory_size().unwrap(), get_expected_memory_size(&sbox_mem_layout) diff --git a/src/hyperlight_host/src/mem/memory_region.rs b/src/hyperlight_host/src/mem/memory_region.rs index 4d03d83c4..18b044118 100644 --- a/src/hyperlight_host/src/mem/memory_region.rs +++ b/src/hyperlight_host/src/mem/memory_region.rs @@ -31,6 +31,9 @@ use mshv_bindings::{hv_x64_memory_intercept_message, mshv_user_mem_region}; #[cfg(target_os = "windows")] use windows::Win32::System::Hypervisor::{self, WHV_MEMORY_ACCESS_TYPE}; +#[cfg(target_os = "windows")] +use crate::hypervisor::wrappers::HandleWrapper; + pub(crate) const DEFAULT_GUEST_BLOB_MEM_FLAGS: MemoryRegionFlags = MemoryRegionFlags::READ; bitflags! { @@ -135,6 +138,8 @@ pub enum MemoryRegionType { GuardPage, /// The region contains the Stack Stack, + /// The scratch region + Scratch, } /// A trait that distinguishes between different kinds of memory region representations. @@ -142,7 +147,6 @@ pub enum MemoryRegionType { /// This trait is used to parameterize [`MemoryRegion_`] pub(crate) trait MemoryRegionKind { /// The type used to represent host memory addresses. - /// type HostBaseType: Copy; /// Computes an address by adding a size to a base address. @@ -162,6 +166,7 @@ pub(crate) trait MemoryRegionKind { #[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)] pub(crate) struct HostGuestMemoryRegion {} +#[cfg(not(target_os = "windows"))] impl MemoryRegionKind for HostGuestMemoryRegion { type HostBaseType = usize; @@ -169,6 +174,61 @@ impl MemoryRegionKind for HostGuestMemoryRegion { base + size } } +/// A [`HostRegionBase`] keeps track of not just a pointer, but also a +/// file mapping into which it is pointing. This is used on WHP, +/// where mapping the actual pointer into the VM actually involves +/// first mapping the file into a surrogate process. +#[cfg(target_os = "windows")] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub struct HostRegionBase { + /// The file handle from which the file mapping was created + pub from_handle: HandleWrapper, + /// The base of the file mapping + pub handle_base: usize, + /// The size of the file mapping + pub handle_size: usize, + /// The offset into file mapping region where this + /// [`HostRegionBase`] is pointing. + pub offset: usize, +} +#[cfg(target_os = "windows")] +impl std::hash::Hash for HostRegionBase { + fn hash(&self, state: &mut H) { + // it's safe not to hash the handle (which is not hashable) + // since, for any of these in use at the same time, the handle + // should be uniquely determined by the + // handle_base/handle_size combination. + self.handle_base.hash(state); + self.handle_size.hash(state); + self.offset.hash(state); + } +} +#[cfg(target_os = "windows")] +impl From for usize { + fn from(x: HostRegionBase) -> usize { + x.handle_base + x.offset + } +} +#[cfg(target_os = "windows")] +impl TryFrom for isize { + type Error = >::Error; + fn try_from(x: HostRegionBase) -> Result { + >::try_from(x.into()) + } +} +#[cfg(target_os = "windows")] +impl MemoryRegionKind for HostGuestMemoryRegion { + type HostBaseType = HostRegionBase; + + fn add(base: Self::HostBaseType, size: usize) -> Self::HostBaseType { + HostRegionBase { + from_handle: base.from_handle, + handle_base: base.handle_base, + handle_size: base.handle_size, + offset: base.offset + size, + } + } +} /// Type for memory regions that only track guest addresses. /// diff --git a/src/hyperlight_host/src/mem/mgr.rs b/src/hyperlight_host/src/mem/mgr.rs index e309650cb..0fedab3aa 100644 --- a/src/hyperlight_host/src/mem/mgr.rs +++ b/src/hyperlight_host/src/mem/mgr.rs @@ -45,6 +45,8 @@ pub(crate) const STACK_COOKIE_LEN: usize = 16; pub(crate) struct SandboxMemoryManager { /// Shared memory for the Sandbox pub(crate) shared_mem: S, + /// Scratch memory for the Sandbox + pub(crate) scratch_mem: S, /// The memory layout of the underlying shared memory pub(crate) layout: SandboxMemoryLayout, /// Pointer to where to load memory from @@ -145,6 +147,7 @@ where pub(crate) fn new( layout: SandboxMemoryLayout, shared_mem: S, + scratch_mem: S, load_addr: RawPtr, entrypoint_offset: Option, stack_cookie: [u8; STACK_COOKIE_LEN], @@ -152,6 +155,7 @@ where Self { layout, shared_mem, + scratch_mem, load_addr, entrypoint_offset, mapped_rgns: 0, @@ -191,12 +195,6 @@ where mapped_regions, ) } - - /// This function restores a memory snapshot from a given snapshot. - pub(crate) fn restore_snapshot(&mut self, snapshot: &Snapshot) -> Result<()> { - self.shared_mem.restore_from_snapshot(snapshot)?; - Ok(()) - } } impl SandboxMemoryManager { @@ -204,6 +202,7 @@ impl SandboxMemoryManager { let layout = *s.layout(); let mut shared_mem = ExclusiveSharedMemory::new(s.mem_size())?; shared_mem.copy_from_slice(s.memory(), 0)?; + let scratch_mem = ExclusiveSharedMemory::new(s.layout().get_scratch_size())?; let load_addr: RawPtr = RawPtr::try_from(layout.get_guest_code_address())?; let stack_cookie = rand::random::<[u8; STACK_COOKIE_LEN]>(); let entrypoint_gva = s.preinitialise(); @@ -211,6 +210,7 @@ impl SandboxMemoryManager { Ok(Self::new( layout, shared_mem, + scratch_mem, load_addr, entrypoint_offset, stack_cookie, @@ -236,9 +236,11 @@ impl SandboxMemoryManager { SandboxMemoryManager, ) { let (hshm, gshm) = self.shared_mem.build(); + let (hscratch, gscratch) = self.scratch_mem.build(); ( SandboxMemoryManager { shared_mem: hshm, + scratch_mem: hscratch, layout: self.layout, load_addr: self.load_addr.clone(), entrypoint_offset: self.entrypoint_offset, @@ -248,6 +250,7 @@ impl SandboxMemoryManager { }, SandboxMemoryManager { shared_mem: gshm, + scratch_mem: gscratch, layout: self.layout, load_addr: self.load_addr.clone(), entrypoint_offset: self.entrypoint_offset, @@ -382,6 +385,37 @@ impl SandboxMemoryManager { }; } } + + /// This function restores a memory snapshot from a given snapshot. + pub(crate) fn restore_snapshot( + &mut self, + snapshot: &Snapshot, + ) -> Result> { + if self.shared_mem.mem_size() != snapshot.mem_size() { + return Err(new_error!( + "Snapshot size does not match current memory size: {} != {}", + self.shared_mem.raw_mem_size(), + snapshot.mem_size() + )); + } + self.shared_mem.restore_from_snapshot(snapshot)?; + let new_scratch_size = snapshot.layout().get_scratch_size(); + if new_scratch_size == self.scratch_mem.mem_size() { + self.scratch_mem.zero()?; + Ok(None) + } else { + let new_scratch_mem = ExclusiveSharedMemory::new(new_scratch_size)?; + let (hscratch, gscratch) = new_scratch_mem.build(); + // Even though this destroys the reference to the host + // side of the old scratch mapping, the VM should still + // own the reference to the guest side of the old scratch + // mapping, so it won't actually be deallocated until it + // has been unmapped from the VM. + self.scratch_mem = hscratch; + + Ok(Some(gscratch)) + } + } } #[cfg(test)] diff --git a/src/hyperlight_host/src/mem/shared_mem.rs b/src/hyperlight_host/src/mem/shared_mem.rs index 66e10fb72..9630ab040 100644 --- a/src/hyperlight_host/src/mem/shared_mem.rs +++ b/src/hyperlight_host/src/mem/shared_mem.rs @@ -35,6 +35,9 @@ use windows::Win32::System::Memory::{ #[cfg(target_os = "windows")] use windows::core::PCSTR; +use super::memory_region::{ + HostGuestMemoryRegion, MemoryRegion, MemoryRegionFlags, MemoryRegionKind, MemoryRegionType, +}; use crate::HyperlightError::SnapshotSizeMismatch; #[cfg(target_os = "windows")] use crate::HyperlightError::WindowsAPIError; @@ -310,11 +313,12 @@ impl ExclusiveSharedMemory { #[cfg(target_os = "linux")] #[instrument(skip_all, parent = Span::current(), level= "Trace")] pub fn new(min_size_bytes: usize) -> Result { - #[cfg(miri)] - use libc::MAP_PRIVATE; - use libc::{MAP_ANONYMOUS, MAP_FAILED, PROT_READ, PROT_WRITE, c_int, mmap, off_t, size_t}; + use libc::{ + MAP_ANONYMOUS, MAP_FAILED, MAP_PRIVATE, PROT_READ, PROT_WRITE, c_int, mmap, off_t, + size_t, + }; #[cfg(not(miri))] - use libc::{MAP_NORESERVE, MAP_SHARED, PROT_NONE, mprotect}; + use libc::{MAP_NORESERVE, PROT_NONE, mprotect}; if min_size_bytes == 0 { return Err(new_error!("Cannot create shared memory with size 0")); @@ -342,7 +346,7 @@ impl ExclusiveSharedMemory { // allocate the memory #[cfg(not(miri))] - let flags = MAP_ANONYMOUS | MAP_SHARED | MAP_NORESERVE; + let flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE; #[cfg(miri)] let flags = MAP_ANONYMOUS | MAP_PRIVATE; @@ -641,6 +645,43 @@ impl ExclusiveSharedMemory { } } +impl GuestSharedMemory { + /// Create a [`super::memory_region::MemoryRegion`] structure + /// suitable for mapping this region into a VM + pub(crate) fn mapping_at( + &self, + guest_base: u64, + region_type: MemoryRegionType, + ) -> MemoryRegion { + let flags = match region_type { + MemoryRegionType::Scratch => MemoryRegionFlags::READ | MemoryRegionFlags::EXECUTE, + #[allow(clippy::panic)] + // In the future, all the host side knowledge about memory + // region types should collapse down to Snapshot vs + // Scratch, at which time this panicking case will be + // unnecessary. For now, we will panic if one of the + // legacy regions ends up in this function, which very + // much should be impossible (since the only callers of it + // directly pass a literal new-style region type). + _ => panic!( + "GuestSharedMemory::mapping_at should only be used for Scratch or Snapshot regions" + ), + }; + let guest_base = guest_base as usize; + #[cfg(not(windows))] + let host_base = self.base_addr(); + #[cfg(windows)] + let host_base = self.host_region_base(); + let host_end = ::add(host_base, self.mem_size()); + MemoryRegion { + guest_region: guest_base..(guest_base + self.mem_size()), + host_region: host_base..host_end, + region_type, + flags, + } + } +} + /// A trait that abstracts over the particular kind of SharedMemory, /// used when invoking operations from Rust that absolutely must have /// exclusive control over the shared memory for correctness + @@ -684,6 +725,18 @@ pub trait SharedMemory { self.region().size } + /// Extract a base address that can be mapped into a VM for this + /// SharedMemory + #[cfg(target_os = "windows")] + fn host_region_base(&self) -> super::memory_region::HostRegionBase { + super::memory_region::HostRegionBase { + from_handle: self.region().handle.into(), + handle_base: self.region().ptr as usize, + handle_size: self.region().size, + offset: PAGE_SIZE_USIZE, + } + } + /// Run some code with exclusive access to the SharedMemory /// underlying this. If the SharedMemory is not an /// ExclusiveSharedMemory, any concurrent accesses to the relevant @@ -701,6 +754,29 @@ pub trait SharedMemory { } self.with_exclusivity(|e| e.copy_from_slice(snapshot.memory(), 0))? } + + /// Zero a shared memory region + fn zero(&mut self) -> Result<()> { + self.with_exclusivity(|e| { + #[allow(unused_mut)] // unused on some platforms, although not others + let mut do_copy = true; + // TODO: Compare & add heuristic thresholds: mmap, MADV_DONTNEED, MADV_REMOVE, MADV_FREE (?) + #[cfg(target_os = "linux")] + unsafe { + let ret = libc::madvise( + e.region.ptr as *mut libc::c_void, + e.region.size, + libc::MADV_DONTNEED, + ); + if ret == 0 { + do_copy = false; + } + } + if do_copy { + e.as_mut_slice().fill(0); + } + }) + } } impl SharedMemory for ExclusiveSharedMemory { diff --git a/src/hyperlight_host/src/sandbox/config.rs b/src/hyperlight_host/src/sandbox/config.rs index 567247918..75fc36f40 100644 --- a/src/hyperlight_host/src/sandbox/config.rs +++ b/src/hyperlight_host/src/sandbox/config.rs @@ -79,6 +79,8 @@ pub struct SandboxConfiguration { /// Note: Since real-time signals can vary across platforms, ensure that the offset /// results in a signal number that is not already in use by other components of the system. interrupt_vcpu_sigrtmin_offset: u8, + /// How much writable memory to offer the guest + scratch_size: usize, } impl SandboxConfiguration { @@ -98,6 +100,8 @@ impl SandboxConfiguration { pub const DEFAULT_HEAP_SIZE: u64 = 131072; /// The default stack size of a hyperlight sandbox pub const DEFAULT_STACK_SIZE: u64 = 65536; + /// The default size of the scratch region + pub const DEFAULT_SCRATCH_SIZE: usize = 0x40000; #[allow(clippy::too_many_arguments)] /// Create a new configuration for a sandbox with the given sizes. @@ -107,6 +111,7 @@ impl SandboxConfiguration { output_data_size: usize, stack_size_override: Option, heap_size_override: Option, + scratch_size: usize, interrupt_retry_delay: Duration, interrupt_vcpu_sigrtmin_offset: u8, #[cfg(gdb)] guest_debug_info: Option, @@ -117,6 +122,7 @@ impl SandboxConfiguration { output_data_size: max(output_data_size, Self::MIN_OUTPUT_SIZE), stack_size_override: stack_size_override.unwrap_or(0), heap_size_override: heap_size_override.unwrap_or(0), + scratch_size, interrupt_retry_delay, interrupt_vcpu_sigrtmin_offset, #[cfg(gdb)] @@ -215,6 +221,11 @@ impl SandboxConfiguration { self.output_data_size } + #[instrument(skip_all, parent = Span::current(), level= "Trace")] + pub(crate) fn get_scratch_size(&self) -> usize { + self.scratch_size + } + #[cfg(crashdump)] #[instrument(skip_all, parent = Span::current(), level= "Trace")] pub(crate) fn get_guest_core_dump(&self) -> bool { @@ -262,6 +273,7 @@ impl Default for SandboxConfiguration { Self::DEFAULT_OUTPUT_SIZE, None, None, + Self::DEFAULT_SCRATCH_SIZE, Self::DEFAULT_INTERRUPT_RETRY_DELAY, Self::INTERRUPT_VCPU_SIGRTMIN_OFFSET, #[cfg(gdb)] @@ -282,11 +294,13 @@ mod tests { const HEAP_SIZE_OVERRIDE: u64 = 0x50000; const INPUT_DATA_SIZE_OVERRIDE: usize = 0x4000; const OUTPUT_DATA_SIZE_OVERRIDE: usize = 0x4001; + const SCRATCH_SIZE_OVERRIDE: usize = 0x60000; let mut cfg = SandboxConfiguration::new( INPUT_DATA_SIZE_OVERRIDE, OUTPUT_DATA_SIZE_OVERRIDE, Some(STACK_SIZE_OVERRIDE), Some(HEAP_SIZE_OVERRIDE), + SCRATCH_SIZE_OVERRIDE, SandboxConfiguration::DEFAULT_INTERRUPT_RETRY_DELAY, SandboxConfiguration::INTERRUPT_VCPU_SIGRTMIN_OFFSET, #[cfg(gdb)] @@ -297,13 +311,17 @@ mod tests { let stack_size = cfg.get_stack_size(); let heap_size = cfg.get_heap_size(); + let scratch_size = cfg.get_scratch_size(); assert_eq!(STACK_SIZE_OVERRIDE, stack_size); assert_eq!(HEAP_SIZE_OVERRIDE, heap_size); + assert_eq!(SCRATCH_SIZE_OVERRIDE, scratch_size); cfg.stack_size_override = 1024; cfg.heap_size_override = 2048; + cfg.scratch_size = 0x40000; assert_eq!(1024, cfg.stack_size_override); assert_eq!(2048, cfg.heap_size_override); + assert_eq!(0x40000, cfg.scratch_size); assert_eq!(INPUT_DATA_SIZE_OVERRIDE, cfg.input_data_size); assert_eq!(OUTPUT_DATA_SIZE_OVERRIDE, cfg.output_data_size); } @@ -315,6 +333,7 @@ mod tests { SandboxConfiguration::MIN_OUTPUT_SIZE - 1, None, None, + SandboxConfiguration::DEFAULT_SCRATCH_SIZE, SandboxConfiguration::DEFAULT_INTERRUPT_RETRY_DELAY, SandboxConfiguration::INTERRUPT_VCPU_SIGRTMIN_OFFSET, #[cfg(gdb)] diff --git a/src/hyperlight_host/src/sandbox/initialized_multi_use.rs b/src/hyperlight_host/src/sandbox/initialized_multi_use.rs index 349536cfc..72fa30f62 100644 --- a/src/hyperlight_host/src/sandbox/initialized_multi_use.rs +++ b/src/hyperlight_host/src/sandbox/initialized_multi_use.rs @@ -265,7 +265,9 @@ impl MultiUseSandbox { return Err(SnapshotSandboxMismatch); } - self.mem_mgr.restore_snapshot(&snapshot)?; + if let Some(gscratch) = self.mem_mgr.restore_snapshot(&snapshot)? { + self.vm.update_scratch_mapping(gscratch)?; + } let current_regions: HashSet<_> = self.vm.get_mapped_regions().cloned().collect(); let snapshot_regions: HashSet<_> = snapshot.regions().iter().cloned().collect(); diff --git a/src/hyperlight_host/src/sandbox/snapshot.rs b/src/hyperlight_host/src/sandbox/snapshot.rs index 1033d639a..6f9bd3a04 100644 --- a/src/hyperlight_host/src/sandbox/snapshot.rs +++ b/src/hyperlight_host/src/sandbox/snapshot.rs @@ -88,8 +88,12 @@ fn hash(memory: &[u8], regions: &[MemoryRegion]) -> Result<[u8; 32]> { for rgn in regions { hasher.update(&usize::to_le_bytes(rgn.guest_region.start)); let guest_len = rgn.guest_region.end - rgn.guest_region.start; - hasher.update(&usize::to_le_bytes(rgn.host_region.start)); - let host_len = rgn.host_region.end - rgn.host_region.start; + #[allow(clippy::useless_conversion)] + let host_start_addr: usize = rgn.host_region.start.into(); + #[allow(clippy::useless_conversion)] + let host_end_addr: usize = rgn.host_region.end.into(); + hasher.update(&usize::to_le_bytes(host_start_addr)); + let host_len = host_end_addr - host_start_addr; if guest_len != host_len { return Err(MemoryRegionSizeMismatch( host_len, @@ -136,6 +140,7 @@ impl Snapshot { exe_info.loaded_size(), usize::try_from(cfg.get_stack_size())?, usize::try_from(cfg.get_heap_size())?, + cfg.get_scratch_size(), guest_blob_size, guest_blob_mem_flags, )?; @@ -155,12 +160,15 @@ impl Snapshot { #[cfg(feature = "init-paging")] { + // Set up page table entries for the snapshot let pt_base_gpa = crate::mem::layout::SandboxMemoryLayout::BASE_ADDRESS + layout.get_pt_offset(); let pt_buf = crate::mem::mgr::GuestPageTableBuffer::new(pt_base_gpa); use hyperlight_common::vmem::{self, BasicMapping, Mapping, MappingKind}; use crate::mem::memory_region::{GuestMemoryRegion, MemoryRegionFlags}; + + // 1. Map the (ideally readonly) pages of snapshot data for rgn in layout.get_memory_regions_::(())?.iter() { let readable = rgn.flags.contains(MemoryRegionFlags::READ); let writable = rgn.flags.contains(MemoryRegionFlags::WRITE) @@ -184,6 +192,23 @@ impl Snapshot { }; unsafe { vmem::map(&pt_buf, mapping) }; } + + // 2. Map the scratch region + let scratch_size = cfg.get_scratch_size(); + let mapping = Mapping { + phys_base: hyperlight_common::layout::scratch_base_gpa(scratch_size), + virt_base: hyperlight_common::layout::scratch_base_gva(scratch_size), + len: scratch_size as u64, + kind: MappingKind::BasicMapping(BasicMapping { + readable: true, + writable: true, + executable: true, + }), + }; + unsafe { vmem::map(&pt_buf, mapping) }; + + // 3. Map the page tables themselves, in order to allow the + // guest to update them easily let mut pt_size_mapped = 0; while pt_buf.size() > pt_size_mapped { let mapping = Mapping { @@ -302,7 +327,8 @@ mod tests { let cfg = crate::sandbox::SandboxConfiguration::default(); let layout = - crate::mem::layout::SandboxMemoryLayout::new(cfg, 4096, 2048, 4096, 0, None).unwrap(); + crate::mem::layout::SandboxMemoryLayout::new(cfg, 4096, 2048, 4096, 0x3000, 0, None) + .unwrap(); // Take snapshot of data1 let snapshot = super::Snapshot::new( @@ -330,7 +356,8 @@ mod tests { let cfg = crate::sandbox::SandboxConfiguration::default(); let layout = - crate::mem::layout::SandboxMemoryLayout::new(cfg, 4096, 2048, 4096, 0, None).unwrap(); + crate::mem::layout::SandboxMemoryLayout::new(cfg, 4096, 2048, 4096, 0x3000, 0, None) + .unwrap(); let snapshot = super::Snapshot::new( &mut gm, @@ -349,7 +376,8 @@ mod tests { let cfg = crate::sandbox::SandboxConfiguration::default(); let layout = - crate::mem::layout::SandboxMemoryLayout::new(cfg, 4096, 2048, 4096, 0, None).unwrap(); + crate::mem::layout::SandboxMemoryLayout::new(cfg, 4096, 2048, 4096, 0x3000, 0, None) + .unwrap(); // Create first snapshot with pattern A let pattern_a = vec![0xAA; PAGE_SIZE_USIZE]; diff --git a/src/hyperlight_host/src/sandbox/uninitialized_evolve.rs b/src/hyperlight_host/src/sandbox/uninitialized_evolve.rs index c9f6bc220..683c6104a 100644 --- a/src/hyperlight_host/src/sandbox/uninitialized_evolve.rs +++ b/src/hyperlight_host/src/sandbox/uninitialized_evolve.rs @@ -156,20 +156,6 @@ pub(crate) fn set_up_hypervisor_partition( entrypoint_ptr.absolute()?, rsp_ptr.absolute()?, config, - #[cfg(target_os = "windows")] - { - use crate::hypervisor::wrappers::HandleWrapper; - use crate::mem::shared_mem::SharedMemory; - HandleWrapper::from( - mgr.shared_mem - .with_exclusivity(|s| s.get_mmap_file_handle())?, - ) - }, - #[cfg(target_os = "windows")] - { - use crate::mem::shared_mem::SharedMemory; - mgr.shared_mem.raw_mem_size() - }, #[cfg(gdb)] gdb_conn, #[cfg(crashdump)] diff --git a/src/tests/rust_guests/dummyguest/Cargo.lock b/src/tests/rust_guests/dummyguest/Cargo.lock index 7c63e8c58..3e2c4afef 100644 --- a/src/tests/rust_guests/dummyguest/Cargo.lock +++ b/src/tests/rust_guests/dummyguest/Cargo.lock @@ -22,11 +22,11 @@ checksum = "34efbcccd345379ca2868b2b2c9d3782e9cc58ba87bc7d79d5b53d9c9ae6f25d" [[package]] name = "buddy_system_allocator" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a0108968a3a2dab95b089c0fc3f1afa7759aa5ebe6f1d86d206d6f7ba726eb" +checksum = "b672b945a3e4f4f40bfd4cd5ee07df9e796a42254ce7cd6d2599ad969244c44a" dependencies = [ - "spin 0.9.8", + "spin", ] [[package]] @@ -60,9 +60,9 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "flatbuffers" -version = "25.9.23" +version = "25.12.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b6620799e7340ebd9968d2e0708eb82cf1971e9a16821e2091b6d6e475eed5" +checksum = "35f6839d7b3b98adde531effaf34f0c2badc6f4735d26fe74709d8e513a96ef3" dependencies = [ "bitflags", "rustc_version", @@ -87,7 +87,7 @@ dependencies = [ "anyhow", "flatbuffers", "log", - "spin 0.10.0", + "spin", "thiserror", ] @@ -118,7 +118,7 @@ dependencies = [ "hyperlight-guest-tracing", "linkme", "log", - "spin 0.10.0", + "spin", "tracing", ] @@ -137,7 +137,7 @@ name = "hyperlight-guest-tracing" version = "0.12.0" dependencies = [ "hyperlight-common", - "spin 0.10.0", + "spin", "tracing", "tracing-core", ] @@ -307,15 +307,6 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -dependencies = [ - "lock_api", -] - [[package]] name = "spin" version = "0.10.0" @@ -338,18 +329,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", diff --git a/src/tests/rust_guests/simpleguest/Cargo.lock b/src/tests/rust_guests/simpleguest/Cargo.lock index 00cc2a942..17e3e12da 100644 --- a/src/tests/rust_guests/simpleguest/Cargo.lock +++ b/src/tests/rust_guests/simpleguest/Cargo.lock @@ -333,18 +333,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", diff --git a/src/tests/rust_guests/witguest/Cargo.lock b/src/tests/rust_guests/witguest/Cargo.lock index 08d4f9a4b..791c313a7 100644 --- a/src/tests/rust_guests/witguest/Cargo.lock +++ b/src/tests/rust_guests/witguest/Cargo.lock @@ -81,11 +81,11 @@ checksum = "34efbcccd345379ca2868b2b2c9d3782e9cc58ba87bc7d79d5b53d9c9ae6f25d" [[package]] name = "buddy_system_allocator" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a0108968a3a2dab95b089c0fc3f1afa7759aa5ebe6f1d86d206d6f7ba726eb" +checksum = "b672b945a3e4f4f40bfd4cd5ee07df9e796a42254ce7cd6d2599ad969244c44a" dependencies = [ - "spin 0.9.8", + "spin", ] [[package]] @@ -146,9 +146,9 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "flatbuffers" -version = "25.9.23" +version = "25.12.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b6620799e7340ebd9968d2e0708eb82cf1971e9a16821e2091b6d6e475eed5" +checksum = "35f6839d7b3b98adde531effaf34f0c2badc6f4735d26fe74709d8e513a96ef3" dependencies = [ "bitflags", "rustc_version", @@ -189,7 +189,7 @@ dependencies = [ "anyhow", "flatbuffers", "log", - "spin 0.10.0", + "spin", "thiserror", ] @@ -247,7 +247,7 @@ dependencies = [ "hyperlight-guest-tracing", "linkme", "log", - "spin 0.10.0", + "spin", "tracing", ] @@ -266,7 +266,7 @@ name = "hyperlight-guest-tracing" version = "0.12.0" dependencies = [ "hyperlight-common", - "spin 0.10.0", + "spin", "tracing", "tracing-core", ] @@ -418,18 +418,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.103" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" dependencies = [ "unicode-ident", ] [[package]] name = "quote" -version = "1.0.42" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a" dependencies = [ "proc-macro2", ] @@ -537,15 +537,6 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -dependencies = [ - "lock_api", -] - [[package]] name = "spin" version = "0.10.0" @@ -557,9 +548,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.111" +version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", "quote", @@ -568,18 +559,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", @@ -766,5 +757,5 @@ dependencies = [ "hyperlight-component-macro", "hyperlight-guest", "hyperlight-guest-bin", - "spin 0.10.0", + "spin", ]