From b443bd89a0250caffdd1c961bfd7ea60721e52f7 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Fri, 10 Apr 2026 16:37:07 +0000 Subject: [PATCH] harden VTL memory protection --- litebox_platform_lvbs/src/lib.rs | 5 +++ litebox_platform_lvbs/src/mshv/error.rs | 6 +++- litebox_platform_lvbs/src/mshv/vsm.rs | 47 +++++++++++++++++++------ 3 files changed, 47 insertions(+), 11 deletions(-) diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 2487086d7..c7b483886 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -628,6 +628,11 @@ impl LinuxKernel { CPU_MHZ.store(cpu_mhz, core::sync::atomic::Ordering::Relaxed); } + /// Returns the physical frame range belonging to VTL1. + pub fn vtl1_phys_frame_range(&self) -> PhysFrameRange { + self.vtl1_phys_frame_range + } + /// This function maps VTL0 physical page frames containing the physical addresses /// from `phys_start` to `phys_end` to the VTL1 kernel page table. It internally page aligns /// the input addresses to ensure the mapped memory area covers the entire input addresses diff --git a/litebox_platform_lvbs/src/mshv/error.rs b/litebox_platform_lvbs/src/mshv/error.rs index 5d6274be5..2df1a0dba 100644 --- a/litebox_platform_lvbs/src/mshv/error.rs +++ b/litebox_platform_lvbs/src/mshv/error.rs @@ -41,6 +41,9 @@ pub enum VsmError { #[error("invalid physical address")] InvalidPhysicalAddress, + #[error("physical address range overlaps with VTL1 memory")] + Vtl1MemoryOverlap, + // Memory/Data Errors #[error("invalid memory attributes")] MemoryAttributeInvalid, @@ -172,7 +175,8 @@ impl From for Errno { | VsmError::BootSignalWriteFailed | VsmError::CpuOnlineMaskCopyFailed | VsmError::HekiPagesCopyFailed - | VsmError::Vtl0CopyFailed => Errno::EFAULT, + | VsmError::Vtl0CopyFailed + | VsmError::Vtl1MemoryOverlap => Errno::EFAULT, // Not found errors VsmError::SystemCertificatesNotFound diff --git a/litebox_platform_lvbs/src/mshv/vsm.rs b/litebox_platform_lvbs/src/mshv/vsm.rs index 458fb2f66..baafa74b5 100644 --- a/litebox_platform_lvbs/src/mshv/vsm.rs +++ b/litebox_platform_lvbs/src/mshv/vsm.rs @@ -85,13 +85,10 @@ pub(crate) fn init(is_bsp: bool) { if is_bsp { if let Ok((start, size)) = get_vtl1_memory_info() { debug_serial_println!("VSM: Protect GPAs from {:#x} to {:#x}", start, start + size); - if protect_physical_memory_range( - PhysFrame::range( - PhysFrame::containing_address(PhysAddr::new(start)), - PhysFrame::containing_address(PhysAddr::new(start + size)), - ), - MemAttr::empty(), - ) + if protect_vtl1_physical_memory_range(PhysFrame::range( + PhysFrame::containing_address(PhysAddr::new(start)), + PhysFrame::containing_address(PhysAddr::new(start + size)), + )) .is_err() { panic!("Failed to protect VTL1 memory"); @@ -1306,14 +1303,20 @@ fn copy_heki_pages_from_vtl0(pa: u64, nranges: u64) -> Option> { Some(heki_pages) } -/// This function protects a physical memory range. It is a safe wrapper for `hv_modify_vtl_protection_mask`. -/// `phys_frame_range` specifies the physical frame range to protect -/// `mem_attr` specifies the memory attributes to be applied to the range +/// This function protects a VTL0 physical memory range from potentially compromised VTL0 by +/// restricting its access permissions using VTL protection mask (e.g., kernel code integrity). +/// It is a safe wrapper for `hv_modify_vtl_protection_mask`. `phys_frame_range` specifies the +/// physical frame range to protect (must belong to VTL0) `mem_attr` specifies the memory +/// attributes (VTL0's allowed access) to be applied to the range #[inline] pub(crate) fn protect_physical_memory_range( phys_frame_range: PhysFrameRange, mem_attr: MemAttr, ) -> Result<(), VsmError> { + let vtl1_range = crate::platform_low().vtl1_phys_frame_range(); + if phys_frame_range.start < vtl1_range.end && vtl1_range.start < phys_frame_range.end { + return Err(VsmError::Vtl1MemoryOverlap); + } let pa = phys_frame_range.start.start_address().as_u64(); let num_pages = phys_frame_range.count() as u64; if num_pages > 0 { @@ -1323,6 +1326,30 @@ pub(crate) fn protect_physical_memory_range( Ok(()) } +/// This function is a variant of [`protect_physical_memory_range`] to protect a VTL1 physical memory range. +/// Unlike [`protect_physical_memory_range`], this is intended exclusively for securing VTL1's own pages. +/// VTL0 should never access VTL1 memory, so the memory attribute is always empty (no read, write, or execute). +/// +/// Note. This function doesn't check whether `phys_frame_range` belongs to VTL1 because it is called by BSP +/// before the kernel platform data structure is initialized. To this end, one might call this function with +/// a VTL0 physical memory range which only restricts access to the range. +#[inline] +pub(crate) fn protect_vtl1_physical_memory_range( + phys_frame_range: PhysFrameRange, +) -> Result<(), VsmError> { + let pa = phys_frame_range.start.start_address().as_u64(); + let num_pages = phys_frame_range.count() as u64; + if num_pages > 0 { + hv_modify_vtl_protection_mask( + pa, + num_pages, + mem_attr_to_hv_page_prot_flags(MemAttr::empty()), + ) + .map_err(VsmError::HypercallFailed)?; + } + Ok(()) +} + /// Data structure for maintaining the memory content of a kernel module by its sections. Currently, it only maintains /// certain sections like `.text` and `.init.text` which are needed for module validation. pub struct ModuleMemory {