From 1c50c5ff286f3accc9bca4991891a71d5296f98c Mon Sep 17 00:00:00 2001 From: yvt Date: Sat, 22 Nov 2025 14:38:14 +0900 Subject: [PATCH 01/11] fix(utils): use `UnsafeCell` in temporary reference created by `nonnull_slice_len` > [..] because [..] there might be outstanding mutable references to the > slice. > The exact aliasing rules are not determined yet, but here is an > outline of the general principles: `&T` must point to memory that is > not mutated while they are live (except for data inside an > `UnsafeCell`), --- crates/rlsf/src/utils.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/rlsf/src/utils.rs b/crates/rlsf/src/utils.rs index 275815f..f608ae9 100644 --- a/crates/rlsf/src/utils.rs +++ b/crates/rlsf/src/utils.rs @@ -1,4 +1,4 @@ -use core::{mem::MaybeUninit, ptr::NonNull}; +use core::{cell::UnsafeCell, mem::MaybeUninit, ptr::NonNull}; /// Polyfill for #[inline] @@ -9,11 +9,13 @@ pub fn nonnull_slice_from_raw_parts(ptr: NonNull, len: usize) -> NonNull<[ /// Polyfill for #[inline] pub fn nonnull_slice_len(ptr: NonNull<[T]>) -> usize { + // FIXME: Use `NonNull<[T]>::len` (stabilized in Rust 1.63) // Safety: We are just reading the slice length embedded in the fat // pointer and not dereferencing the pointer. We also convert it - // to `*mut [MaybeUninit]` just in case because the slice - // might be uninitialized. - unsafe { (*(ptr.as_ptr() as *const [MaybeUninit])).len() } + // to `*mut [MaybeUninit>]` just in case because the + // slice might be uninitialized and there might be outstanding + // mutable references to the slice. + unsafe { (&*(ptr.as_ptr() as *const [MaybeUninit>])).len() } } // Polyfill for From f6a554b4f562c0bd41ced41fcdcf83551c664fda Mon Sep 17 00:00:00 2001 From: yvt Date: Sun, 23 Nov 2025 11:57:11 +0900 Subject: [PATCH 02/11] refactor(utils): mark `nonnull_slice_len` as unsafe Because of how this polyfill is implemented, it can only be used on a dereferencable pointer. It does not need to be initialized (owing to `UnsafeCell`) or immuable (owing to `MaybeUninit`). --- crates/rlsf/src/flex.rs | 46 ++++++++++++++++++++++++++++------------ crates/rlsf/src/utils.rs | 15 ++++++++++--- 2 files changed, 44 insertions(+), 17 deletions(-) diff --git a/crates/rlsf/src/flex.rs b/crates/rlsf/src/flex.rs index 216b193..37ce251 100644 --- a/crates/rlsf/src/flex.rs +++ b/crates/rlsf/src/flex.rs @@ -222,8 +222,13 @@ const _: () = if core::mem::size_of::() != GRANULARITY / 2 { impl PoolFtr { /// Get a pointer to `PoolFtr` for a given allocation. + /// + /// # Safety + /// + /// `ptr` must be dereferencable. This is a limitation of + /// [`nonnull_slice_end`]. #[inline] - fn get_for_alloc(alloc: NonNull<[u8]>, alloc_align: usize) -> *mut Self { + unsafe fn get_for_alloc(alloc: NonNull<[u8]>, alloc_align: usize) -> *mut Self { let alloc_end = nonnull_slice_end(alloc); let mut ptr = alloc_end.wrapping_sub(core::mem::size_of::()); // If `alloc_end` is not well-aligned, we need to adjust the location @@ -375,17 +380,28 @@ impl< // still uninitialized because this allocation is still in // `self.growable_pool`, so we only have to move // `PoolFtr::prev_alloc_end`. - let old_pool_ftr = PoolFtr::get_for_alloc( - nonnull_slice_from_raw_parts( - growable_pool.alloc_start, - growable_pool.alloc_len, - ), - self.source.min_align(), - ); - let new_pool_ftr = PoolFtr::get_for_alloc( - nonnull_slice_from_raw_parts(growable_pool.alloc_start, new_alloc_len), - self.source.min_align(), - ); + + // Safety: The memory range represented by `growable_pool` + // is dereferencable + let old_pool_ftr = unsafe { + PoolFtr::get_for_alloc( + nonnull_slice_from_raw_parts( + growable_pool.alloc_start, + growable_pool.alloc_len, + ), + self.source.min_align(), + ) + }; + + // Safety: The memory range represented by `growable_pool` + // extended to `new_alloc_len` is dereferencable + let new_pool_ftr = unsafe { + PoolFtr::get_for_alloc( + nonnull_slice_from_raw_parts(growable_pool.alloc_start, new_alloc_len), + self.source.min_align(), + ) + }; + // Safety: Both `*new_pool_ftr` and `*old_pool_ftr` // represent pool footers we control unsafe { *new_pool_ftr = *old_pool_ftr }; @@ -480,7 +496,8 @@ impl< if self.source.supports_dealloc() { // Link the new memory pool's `PoolFtr::prev_alloc_end` to the // previous pool (`self.growable_pool`). - let pool_ftr = PoolFtr::get_for_alloc(alloc, self.source.min_align()); + // Safety: `alloc` is dereferencable + let pool_ftr = unsafe { PoolFtr::get_for_alloc(alloc, self.source.min_align()) }; let prev_alloc = self .growable_pool .map(|p| nonnull_slice_from_raw_parts(p.alloc_start, p.alloc_len)); @@ -492,7 +509,8 @@ impl< if use_growable_pool { self.growable_pool = Some(Pool { alloc_start: nonnull_slice_start(alloc), - alloc_len: nonnull_slice_len(alloc), + // Safety: `alloc` is derefencable + alloc_len: unsafe { nonnull_slice_len(alloc) }, pool_len, }); } diff --git a/crates/rlsf/src/utils.rs b/crates/rlsf/src/utils.rs index f608ae9..665fde4 100644 --- a/crates/rlsf/src/utils.rs +++ b/crates/rlsf/src/utils.rs @@ -7,15 +7,19 @@ pub fn nonnull_slice_from_raw_parts(ptr: NonNull, len: usize) -> NonNull<[ } /// Polyfill for +/// +/// # Safety +/// +/// `ptr` must be dereferencable. This is a limitation of the polyfill. #[inline] -pub fn nonnull_slice_len(ptr: NonNull<[T]>) -> usize { +pub unsafe fn nonnull_slice_len(ptr: NonNull<[T]>) -> usize { // FIXME: Use `NonNull<[T]>::len` (stabilized in Rust 1.63) // Safety: We are just reading the slice length embedded in the fat // pointer and not dereferencing the pointer. We also convert it // to `*mut [MaybeUninit>]` just in case because the // slice might be uninitialized and there might be outstanding // mutable references to the slice. - unsafe { (&*(ptr.as_ptr() as *const [MaybeUninit>])).len() } + (&*(ptr.as_ptr() as *const [MaybeUninit>])).len() } // Polyfill for @@ -24,7 +28,12 @@ pub fn nonnull_slice_start(ptr: NonNull<[T]>) -> NonNull { unsafe { NonNull::new_unchecked(ptr.as_ptr() as *mut T) } } +/// Get the one-past-end pointer of a slice pointer. +/// +/// # Safety +/// +/// `ptr` must be dereferencable. This is a limitation of [`nonnull_slice_len`]. #[inline] -pub fn nonnull_slice_end(ptr: NonNull<[T]>) -> *mut T { +pub unsafe fn nonnull_slice_end(ptr: NonNull<[T]>) -> *mut T { (ptr.as_ptr() as *mut T).wrapping_add(nonnull_slice_len(ptr)) } From 3425c7a13ca96d8e19f98b5e85601b2e878cfe59 Mon Sep 17 00:00:00 2001 From: yvt Date: Sat, 22 Nov 2025 14:49:59 +0900 Subject: [PATCH 03/11] test(flex): slice pool without borrowing entire pool `&mut self.pool[$expr]` would invalidate the previously returned slice pointer. --- crates/rlsf/src/flex/tests.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/crates/rlsf/src/flex/tests.rs b/crates/rlsf/src/flex/tests.rs index 5c137da..b15602e 100644 --- a/crates/rlsf/src/flex/tests.rs +++ b/crates/rlsf/src/flex/tests.rs @@ -140,7 +140,21 @@ unsafe impl FlexSource for CgFlexSource { .filter(|&x| x <= self.pool.len())?; self.allocated = new_allocated; - Some(NonNull::from(&mut self.pool[allocated..new_allocated])) + + // Slice the allocated of `self.pool`. + // + // - Do not do `&mut self.pool[..]` because mutably borrowing the whole + // `*self.pool` would invalidate the borrows made by previous + // allocations. + // + // - Do not create a mutable reference even to the sliced out part + // because the resulting pointer could not be expanded by + // `realloc_inplace_grow`. + let pool_ptr = self.pool.as_mut_ptr(); + Some(crate::utils::nonnull_slice_from_raw_parts( + NonNull::new(pool_ptr.add(allocated)).unwrap(), + new_allocated - allocated, + )) } unsafe fn realloc_inplace_grow( From 5ddadf003ac63eda5fbdc6244528342d38fd841a Mon Sep 17 00:00:00 2001 From: yvt Date: Sat, 22 Nov 2025 11:47:32 +0900 Subject: [PATCH 04/11] fix(tlsf): take `*const BlockHdr` instead of `&BlockHdr` in `next_phys_block` `&BlockHdr` lacks permission to access the next `BlockHdr`, which `BlockHdr::next_phys_block` returns. --- crates/rlsf/src/tlsf.rs | 52 +++++++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/crates/rlsf/src/tlsf.rs b/crates/rlsf/src/tlsf.rs index 96baa49..e61df6e 100644 --- a/crates/rlsf/src/tlsf.rs +++ b/crates/rlsf/src/tlsf.rs @@ -9,7 +9,7 @@ use core::{ marker::PhantomData, mem::{self, MaybeUninit}, num::NonZeroUsize, - ptr::{addr_of, NonNull}, + ptr::{addr_of, addr_of_mut, NonNull}, }; use crate::{ @@ -136,18 +136,19 @@ impl BlockHdr { /// /// # Safety /// - /// `self` must have a next block (it must not be the sentinel block in a + /// `this.size` must be safe to read. + /// + /// `this` must have a next block (it must not be the sentinel block in a /// pool). #[inline] - unsafe fn next_phys_block(&self) -> NonNull { - debug_assert!( - (self.size & SIZE_SENTINEL) == 0, - "`self` must not be a sentinel" - ); + unsafe fn next_phys_block(this: *const Self) -> NonNull { + let size = (*this).size; + + debug_assert!((size & SIZE_SENTINEL) == 0, "`self` must not be a sentinel"); // Safety: Since `self.size & SIZE_SENTINEL` is not lying, the // next block should exist at a non-null location. - NonNull::new_unchecked((self as *const _ as *mut u8).add(self.size & SIZE_SIZE_MASK)).cast() + NonNull::new_unchecked((this as *mut u8).add(size & SIZE_SIZE_MASK)).cast() } } @@ -520,10 +521,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons }; // Cap the end with a sentinel block (a permanently-used block) - let mut sentinel_block = block - .as_ref() - .common - .next_phys_block() + let mut sentinel_block = BlockHdr::next_phys_block(addr_of_mut!(block.as_mut().common)) .cast::(); sentinel_block.as_mut().common = BlockHdr { @@ -791,12 +789,13 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons // Get a free block: `block` let first_free = self.first_free.get_unchecked_mut(fl).get_unchecked_mut(sl); - let block = first_free.unwrap_or_else(|| { + let mut block = first_free.unwrap_or_else(|| { debug_assert!(false, "bitmap outdated"); // Safety: It's unreachable unreachable_unchecked() }); - let mut next_phys_block = block.as_ref().common.next_phys_block(); + let mut next_phys_block = + BlockHdr::next_phys_block(addr_of_mut!(block.as_mut().common)); let size_and_flags = block.as_ref().common.size; let size = size_and_flags /* size_and_flags & SIZE_SIZE_MASK */; debug_assert_eq!(size, size_and_flags & SIZE_SIZE_MASK); @@ -1041,7 +1040,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons // Merge the created hole with the next block if the next block is a // free block // Safety: `block.common` should be fully up-to-date and valid - let next_phys_block = block.as_ref().next_phys_block(); + let next_phys_block = BlockHdr::next_phys_block(block.as_ptr()); let next_phys_block_size_and_flags = next_phys_block.as_ref().size; if (next_phys_block_size_and_flags & SIZE_USED) == 0 { let next_phys_block_size = next_phys_block_size_and_flags; @@ -1055,7 +1054,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons // Safety: `next_phys_block` is a free block and therefore is not a // sentinel block - new_next_phys_block = next_phys_block.as_ref().next_phys_block(); + new_next_phys_block = BlockHdr::next_phys_block(next_phys_block.as_ptr()); // Unlink `next_phys_block`. self.unlink_free_block(next_phys_block.cast(), next_phys_block_size); @@ -1092,11 +1091,14 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons block.as_mut().size = size; // Link this free block to the corresponding free list - let block = block.cast::(); + let mut block = block.cast::(); self.link_free_block(block, size); // Link `new_next_phys_block.prev_phys_block` to `block` - debug_assert_eq!(new_next_phys_block, block.as_ref().common.next_phys_block()); + debug_assert_eq!( + new_next_phys_block, + BlockHdr::next_phys_block(addr_of_mut!(block.as_mut().common)) + ); new_next_phys_block.as_mut().prev_phys_block = Some(block.cast()); } @@ -1250,7 +1252,8 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons let mut new_free_block_size = shrink_by; // If the next block is a free block... - let mut next_phys_block = block.as_ref().common.next_phys_block(); + let mut next_phys_block = + BlockHdr::next_phys_block(addr_of_mut!(block.as_mut().common)); let next_phys_block_size_and_flags = next_phys_block.as_ref().size; if (next_phys_block_size_and_flags & SIZE_USED) == 0 { let next_phys_block_size = next_phys_block_size_and_flags; @@ -1264,7 +1267,8 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons self.unlink_free_block(next_phys_block.cast(), next_phys_block_size); new_free_block_size += next_phys_block_size; - let mut next_next_phys_block = next_phys_block.as_ref().next_phys_block(); + let mut next_next_phys_block = + BlockHdr::next_phys_block(next_phys_block.as_ptr()); next_next_phys_block.as_mut().prev_phys_block = Some(new_free_block.cast()); } else { // We can't merge a used block (`next_phys_block`) and @@ -1290,7 +1294,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons debug_assert!(new_size > old_size); let grow_by = new_size - old_size; - let next_phys_block = block.as_ref().common.next_phys_block(); + let next_phys_block = BlockHdr::next_phys_block(addr_of_mut!(block.as_mut().common)); // If we removed this block, there would be a continous free space of // `moving_clearance` bytes, which is followed by `moving_clearance_end` @@ -1315,7 +1319,8 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons // Now we know it's really a free block. let mut next_phys_block = next_phys_block.cast::(); - let mut next_next_phys_block = next_phys_block.as_ref().common.next_phys_block(); + let mut next_next_phys_block = + BlockHdr::next_phys_block(addr_of_mut!(next_phys_block.as_mut().common)); moving_clearance += next_phys_block_size; moving_clearance_end = next_next_phys_block; @@ -1443,7 +1448,8 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons self.unlink_free_block(moving_clearance_end.cast(), moving_clearance_end_size); new_free_block_size += moving_clearance_end_size_and_flags; - let mut next_next_phys_block = moving_clearance_end.as_ref().next_phys_block(); + let mut next_next_phys_block = + BlockHdr::next_phys_block(moving_clearance_end.as_mut()); next_next_phys_block.as_mut().prev_phys_block = Some(new_free_block.cast()); } else { // We can't merge a used block (`moving_clearance_end`) and From cc74c94b06b89b79d8525a002bb8e628f8d253a5 Mon Sep 17 00:00:00 2001 From: yvt Date: Sat, 22 Nov 2025 15:35:31 +0900 Subject: [PATCH 05/11] fix(tlsf): do not pass borrow to `BlockHdr::next_phys_block` A borrowed `*BlockHdr` lacks permission to access the adjacent blocks. --- crates/rlsf/src/lib.rs | 4 +++- crates/rlsf/src/tlsf.rs | 22 ++++++++++------------ crates/rlsf/src/utils.rs | 7 +++++++ 3 files changed, 20 insertions(+), 13 deletions(-) diff --git a/crates/rlsf/src/lib.rs b/crates/rlsf/src/lib.rs index 3ce959a..b36af0f 100644 --- a/crates/rlsf/src/lib.rs +++ b/crates/rlsf/src/lib.rs @@ -6,10 +6,12 @@ #[doc = include_str!("../CHANGELOG.md")] pub mod _changelog_ {} +#[macro_use] +mod utils; + mod flex; pub mod int; mod tlsf; -mod utils; pub use self::{ flex::*, tlsf::{Tlsf, GRANULARITY}, diff --git a/crates/rlsf/src/tlsf.rs b/crates/rlsf/src/tlsf.rs index e61df6e..f36fac6 100644 --- a/crates/rlsf/src/tlsf.rs +++ b/crates/rlsf/src/tlsf.rs @@ -9,7 +9,7 @@ use core::{ marker::PhantomData, mem::{self, MaybeUninit}, num::NonZeroUsize, - ptr::{addr_of, addr_of_mut, NonNull}, + ptr::{addr_of, NonNull}, }; use crate::{ @@ -521,8 +521,8 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons }; // Cap the end with a sentinel block (a permanently-used block) - let mut sentinel_block = BlockHdr::next_phys_block(addr_of_mut!(block.as_mut().common)) - .cast::(); + let mut sentinel_block = + BlockHdr::next_phys_block(nn_field!(block, common)).cast::(); sentinel_block.as_mut().common = BlockHdr { size: GRANULARITY | SIZE_USED | SIZE_SENTINEL, @@ -789,13 +789,12 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons // Get a free block: `block` let first_free = self.first_free.get_unchecked_mut(fl).get_unchecked_mut(sl); - let mut block = first_free.unwrap_or_else(|| { + let block = first_free.unwrap_or_else(|| { debug_assert!(false, "bitmap outdated"); // Safety: It's unreachable unreachable_unchecked() }); - let mut next_phys_block = - BlockHdr::next_phys_block(addr_of_mut!(block.as_mut().common)); + let mut next_phys_block = BlockHdr::next_phys_block(nn_field!(block, common)); let size_and_flags = block.as_ref().common.size; let size = size_and_flags /* size_and_flags & SIZE_SIZE_MASK */; debug_assert_eq!(size, size_and_flags & SIZE_SIZE_MASK); @@ -1091,13 +1090,13 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons block.as_mut().size = size; // Link this free block to the corresponding free list - let mut block = block.cast::(); + let block = block.cast::(); self.link_free_block(block, size); // Link `new_next_phys_block.prev_phys_block` to `block` debug_assert_eq!( new_next_phys_block, - BlockHdr::next_phys_block(addr_of_mut!(block.as_mut().common)) + BlockHdr::next_phys_block(nn_field!(block, common)) ); new_next_phys_block.as_mut().prev_phys_block = Some(block.cast()); } @@ -1252,8 +1251,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons let mut new_free_block_size = shrink_by; // If the next block is a free block... - let mut next_phys_block = - BlockHdr::next_phys_block(addr_of_mut!(block.as_mut().common)); + let mut next_phys_block = BlockHdr::next_phys_block(nn_field!(block, common)); let next_phys_block_size_and_flags = next_phys_block.as_ref().size; if (next_phys_block_size_and_flags & SIZE_USED) == 0 { let next_phys_block_size = next_phys_block_size_and_flags; @@ -1294,7 +1292,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons debug_assert!(new_size > old_size); let grow_by = new_size - old_size; - let next_phys_block = BlockHdr::next_phys_block(addr_of_mut!(block.as_mut().common)); + let next_phys_block = BlockHdr::next_phys_block(nn_field!(block, common)); // If we removed this block, there would be a continous free space of // `moving_clearance` bytes, which is followed by `moving_clearance_end` @@ -1320,7 +1318,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons // Now we know it's really a free block. let mut next_phys_block = next_phys_block.cast::(); let mut next_next_phys_block = - BlockHdr::next_phys_block(addr_of_mut!(next_phys_block.as_mut().common)); + BlockHdr::next_phys_block(nn_field!(next_phys_block, common)); moving_clearance += next_phys_block_size; moving_clearance_end = next_next_phys_block; diff --git a/crates/rlsf/src/utils.rs b/crates/rlsf/src/utils.rs index 665fde4..5ea23ef 100644 --- a/crates/rlsf/src/utils.rs +++ b/crates/rlsf/src/utils.rs @@ -37,3 +37,10 @@ pub fn nonnull_slice_start(ptr: NonNull<[T]>) -> NonNull { pub unsafe fn nonnull_slice_end(ptr: NonNull<[T]>) -> *mut T { (ptr.as_ptr() as *mut T).wrapping_add(nonnull_slice_len(ptr)) } + +/// Get a pointer to a field in `NonNull`. +macro_rules! nn_field { + ($ptr:expr, $($tt:tt)*) => { + core::ptr::addr_of_mut!((*$ptr.as_ptr()).$($tt)*) + }; +} From 27cb69f350aee3ff04b3faf83cae1c4b0565f23d Mon Sep 17 00:00:00 2001 From: yvt Date: Sat, 22 Nov 2025 15:35:56 +0900 Subject: [PATCH 06/11] fix(tlsf): do not borrow uninited `*BlockHdr` Get a raw pointer to a specific field and read or write to it instead of borrowing a whole `{Free,Used}BlockHdr` unless it is known to be fully initialized. --- crates/rlsf/src/tlsf.rs | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/crates/rlsf/src/tlsf.rs b/crates/rlsf/src/tlsf.rs index f36fac6..aab2b93 100644 --- a/crates/rlsf/src/tlsf.rs +++ b/crates/rlsf/src/tlsf.rs @@ -369,7 +369,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons /// contain `block`. /// #[cfg_attr(target_arch = "wasm32", inline(never))] - unsafe fn link_free_block(&mut self, mut block: NonNull, size: usize) { + unsafe fn link_free_block(&mut self, block: NonNull, size: usize) { let (fl, sl) = Self::map_floor(size).unwrap_or_else(|| { debug_assert!(false, "could not map size {}", size); // Safety: It's unreachable @@ -377,8 +377,8 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons }); let first_free = &mut self.first_free[fl][sl]; let next_free = first_free.replace(block); - block.as_mut().next_free = next_free; - block.as_mut().prev_free = None; + *nn_field!(block, next_free) = next_free; + *nn_field!(block, prev_free) = None; if let Some(mut next_free) = next_free { next_free.as_mut().prev_free = Some(block); } @@ -512,19 +512,19 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons // The new free block // Safety: `cursor` is not zero. - let mut block = NonNull::new_unchecked(cursor as *mut FreeBlockHdr); + let block = NonNull::new_unchecked(cursor as *mut FreeBlockHdr); // Initialize the new free block - block.as_mut().common = BlockHdr { + *nn_field!(block, common) = BlockHdr { size: chunk_size - GRANULARITY, prev_phys_block: None, }; // Cap the end with a sentinel block (a permanently-used block) - let mut sentinel_block = + let sentinel_block = BlockHdr::next_phys_block(nn_field!(block, common)).cast::(); - sentinel_block.as_mut().common = BlockHdr { + *nn_field!(sentinel_block, common) = BlockHdr { size: GRANULARITY | SIZE_USED | SIZE_SENTINEL, prev_phys_block: Some(block.cast()), }; @@ -844,7 +844,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons // The allocation partially fills this free block. Create a new // free block header at `block + new_size..block + size` // of length (`new_free_block_size`). - let mut new_free_block: NonNull = + let new_free_block: NonNull = NonNull::new_unchecked(block.cast::().as_ptr().add(new_size)).cast(); let new_free_block_size = size - new_size; @@ -855,7 +855,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons next_phys_block.as_mut().prev_phys_block = Some(new_free_block.cast()); // Create the new free block header - new_free_block.as_mut().common = BlockHdr { + *nn_field!(new_free_block, common) = BlockHdr { size: new_free_block_size, prev_phys_block: Some(block.cast()), }; @@ -1246,7 +1246,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons let shrink_by = old_size - new_size; // We will create a new free block at this address - let mut new_free_block: NonNull = + let new_free_block: NonNull = NonNull::new_unchecked(block.cast::().as_ptr().add(new_size)).cast(); let mut new_free_block_size = shrink_by; @@ -1274,7 +1274,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons next_phys_block.as_mut().prev_phys_block = Some(new_free_block.cast()); } - new_free_block.as_mut().common = BlockHdr { + *nn_field!(new_free_block, common) = BlockHdr { size: new_free_block_size, prev_phys_block: Some(block.cast()), }; @@ -1337,7 +1337,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons next_phys_block = NonNull::new_unchecked(block.cast::().as_ptr().add(new_size)).cast(); - next_phys_block.as_mut().common = BlockHdr { + *nn_field!(next_phys_block, common) = BlockHdr { size: next_phys_block_size, prev_phys_block: Some(block.cast()), }; @@ -1428,7 +1428,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons // The allocation partially fills this free block. Create a new // free block header at `new_block + new_size..new_block // + moving_clearance`. - let mut new_free_block: NonNull = + let new_free_block: NonNull = NonNull::new_unchecked(new_block.cast::().as_ptr().add(new_size)).cast(); let mut new_free_block_size = moving_clearance - new_size; @@ -1455,7 +1455,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons moving_clearance_end.as_mut().prev_phys_block = Some(new_free_block.cast()); } - new_free_block.as_mut().common = BlockHdr { + *nn_field!(new_free_block, common) = BlockHdr { size: new_free_block_size, prev_phys_block: Some(new_block.cast()), }; From e219d3baa54ea2e7d373874d0420f4c09c8e943a Mon Sep 17 00:00:00 2001 From: yvt Date: Sat, 22 Nov 2025 14:25:51 +0900 Subject: [PATCH 07/11] test(flex): call `ShadowAllocator::remove_pool` before deallocating range `ShadowAllocator::remove_pool` needs a dereferencable slice pointer to get its length because `<*const [T]>::len` is not stable yet in the MSRV. --- crates/rlsf/src/flex/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rlsf/src/flex/tests.rs b/crates/rlsf/src/flex/tests.rs index b15602e..e619d5b 100644 --- a/crates/rlsf/src/flex/tests.rs +++ b/crates/rlsf/src/flex/tests.rs @@ -86,9 +86,9 @@ unsafe impl FlexSource for TrackingFlexSource { unsafe fn dealloc(&mut self, ptr: NonNull<[u8]>) { // TODO: check that `ptr` represents an exact allocation, not just // a part of it + self.sa.remove_pool(ptr.as_ptr()); self.inner.dealloc(ptr); log::trace!("FlexSource::dealloc({:?})", ptr); - self.sa.remove_pool(ptr.as_ptr()); } #[inline] From e84f135c556fa4a66b09336874a95e7b3a7bc083 Mon Sep 17 00:00:00 2001 From: yvt Date: Sat, 22 Nov 2025 14:26:27 +0900 Subject: [PATCH 08/11] test: do not assume immutable and inited slice pointer in `ShadowAllocator::remove_pool` --- crates/rlsf/src/tests.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/crates/rlsf/src/tests.rs b/crates/rlsf/src/tests.rs index 197256a..c87661f 100644 --- a/crates/rlsf/src/tests.rs +++ b/crates/rlsf/src/tests.rs @@ -1,4 +1,7 @@ -use std::{alloc::Layout, collections::BTreeMap, ops::Range, prelude::v1::*, ptr::NonNull}; +use std::{ + alloc::Layout, cell::UnsafeCell, collections::BTreeMap, mem::MaybeUninit, ops::Range, + prelude::v1::*, ptr::NonNull, +}; #[derive(Debug)] pub struct ShadowAllocator { @@ -121,7 +124,9 @@ impl ShadowAllocator { pub fn remove_pool(&mut self, range: *const [T]) { let start = range as *const T as usize; - let end = unsafe { &*range }.len() + start; + // FIXME: Use `<*const [T]>::len` (stabilized in Rust 1.79) + // FIXME: Or at least `NonNull<[T]>::len` (stabilized in Rust 1.63) + let end = unsafe { &*(range as *const [MaybeUninit>]) }.len() + start; if start >= end { return; } From e5538d7c7ec969d70a9fab887a5f24aade457e81 Mon Sep 17 00:00:00 2001 From: yvt Date: Sat, 22 Nov 2025 15:43:37 +0900 Subject: [PATCH 09/11] test(tlsf): do not create pool ptr from mut ref to first byte of pool A pointer created from `pool.0[0]` only has permission for `pool.0[0]`, not whole `pool.0`. --- crates/rlsf/src/tlsf/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/rlsf/src/tlsf/tests.rs b/crates/rlsf/src/tlsf/tests.rs index e9f99ef..82c2d7f 100644 --- a/crates/rlsf/src/tlsf/tests.rs +++ b/crates/rlsf/src/tlsf/tests.rs @@ -139,8 +139,8 @@ macro_rules! gen_test { let mut tlsf: TheTlsf = Tlsf::new(); - let mut pool = Align([MaybeUninit::uninit(); 512]); - let mut cursor = pool.0[0].as_mut_ptr() as *mut u8; + let mut pool = Align([MaybeUninit::::uninit(); 512]); + let mut cursor = pool.0.as_mut_ptr() as *mut u8; let mut remaining_len = 512; let pool0_len = unsafe { From 9be6e88f56ea20f3e3f419890c899625d0926473 Mon Sep 17 00:00:00 2001 From: yvt Date: Sun, 23 Nov 2025 11:10:32 +0900 Subject: [PATCH 10/11] chore(ci): add miri workflow --- .github/workflows/ci.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c654442..c0fc59c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -77,3 +77,19 @@ jobs: with: command: test args: --target wasm32-wasi -p rlsf nonexistent + + test-miri: + name: Miri + runs-on: ubuntu-24.04 + timeout-minutes: 20 + steps: + - uses: actions/checkout@v2 + + - name: Install nightly toolchain + run: | + echo nightly > rust-toolchain + rustup component add miri rust-src + + - name: cargo miri test (partial) + # Miri is too slow to run all tests + run: cargo miri test -p rlsf --lib -- _u8_u8_8_8 global From 537c407ec734ce7783e95d2cdc05fdc8dd1c0f12 Mon Sep 17 00:00:00 2001 From: yvt Date: Thu, 27 Nov 2025 09:37:23 +0900 Subject: [PATCH 11/11] doc(changelog): add entry about resolving illegal ptr ops --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a1421ff..d18283a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. - `GlobalTlsf` now implements `Default`. +### Fixed + +- Fixed illegal pointer operations. `{Flex,}Tlsf` no longer trigger errors when running in MIRI. + ## [0.2.1] - 2023-02-17 ### Fixed