Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -77,3 +77,19 @@ jobs:
with:
command: test
args: --target wasm32-wasi -p rlsf nonexistent

test-miri:
name: Miri
runs-on: ubuntu-24.04
timeout-minutes: 20
steps:
- uses: actions/checkout@v2

- name: Install nightly toolchain
run: |
echo nightly > rust-toolchain
rustup component add miri rust-src

- name: cargo miri test (partial)
# Miri is too slow to run all tests
run: cargo miri test -p rlsf --lib -- _u8_u8_8_8 global
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.

- `GlobalTlsf` now implements `Default`.

### Fixed

- Fixed illegal pointer operations. `{Flex,}Tlsf` no longer trigger errors when running in MIRI.

## [0.2.1] - 2023-02-17

### Fixed
Expand Down
46 changes: 32 additions & 14 deletions crates/rlsf/src/flex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,102 +11,102 @@
};

/// The trait for dynamic storage allocators that can back [`FlexTlsf`].
pub unsafe trait FlexSource {
/// Allocate a memory block of the requested minimum size.
///
/// Returns the address range of the allocated memory block.
///
/// # Safety
///
/// `min_size` must be a multiple of [`GRANULARITY`]. `min_size` must not
/// be zero.
#[inline]
unsafe fn alloc(&mut self, min_size: usize) -> Option<NonNull<[u8]>> {
let _ = min_size;
None
}

/// Attempt to grow the specified allocation without moving it. Returns
/// the final allocation size (which must be greater than or equal to
/// `min_new_len`) on success.
///
/// # Safety
///
/// `ptr` must be an existing allocation made by this
/// allocator. `min_new_len` must be greater than or equal to `ptr.len()`.
#[inline]
unsafe fn realloc_inplace_grow(
&mut self,
ptr: NonNull<[u8]>,
min_new_len: usize,
) -> Option<usize> {
let _ = (ptr, min_new_len);
None
}

/// Deallocate a previously allocated memory block.
///
/// # Safety
///
/// `ptr` must denote an existing allocation made by this allocator.
#[inline]
unsafe fn dealloc(&mut self, ptr: NonNull<[u8]>) {
let _ = ptr;
unimplemented!("`supports_dealloc` returned `true`, but `dealloc` is not implemented");
}

/// Check if this allocator implements [`Self::dealloc`].
///
/// If this method returns `false`, [`FlexTlsf`] will not call `dealloc` to
/// release memory blocks. It also applies some optimizations.
///
/// The returned value must be constant for a particular instance of `Self`.
#[inline]
fn supports_dealloc(&self) -> bool {
false
}

/// Check if this allocator implements [`Self::realloc_inplace_grow`].
///
/// If this method returns `false`, [`FlexTlsf`] will not call
/// `realloc_inplace_grow` to attempt to grow memory blocks. It also applies
/// some optimizations.
///
/// The returned value must be constant for a particular instance of `Self`.
#[inline]
fn supports_realloc_inplace_grow(&self) -> bool {
false
}

/// Returns `true` if this allocator is implemented by managing one
/// contiguous region, which is grown every time `alloc` or
/// `realloc_inplace_grow` is called.
///
/// For example, in WebAssembly, there is usually only one continuous
/// memory region available for data processing, and the only way to acquire
/// more memory is to grow this region by executing `memory.grow`
/// instructions. An implementation of `FlexSource` based on this system
/// would use this instruction to implement both `alloc` and
/// `realloc_inplace_grow` methods. Therefore, it's pointless for
/// [`FlexTlsf`] to call `alloc` when `realloc_inplace_grow` fails. This
/// method can be used to remove such redundant calls to `alloc`.
///
/// The returned value must be constant for a particular instance of `Self`.
#[inline]
fn is_contiguous_growable(&self) -> bool {
false
}

/// Get the minimum alignment of allocations made by this allocator.
/// [`FlexTlsf`] may be less efficient if this method returns a value
/// less than [`GRANULARITY`].
///
/// The returned value must be constant for a particular instance of `Self`.
#[inline]
fn min_align(&self) -> usize {
1
}
}

Check warning on line 109 in crates/rlsf/src/flex.rs

View workflow job for this annotation

GitHub Actions / clippy

docs for unsafe trait missing `# Safety` section

warning: docs for unsafe trait missing `# Safety` section --> crates/rlsf/src/flex.rs:14:1 | 14 | / pub unsafe trait FlexSource { 15 | | /// Allocate a memory block of the requested minimum size. 16 | | /// 17 | | /// Returns the address range of the allocated memory block. ... | 108 | | } 109 | | } | |_^ | = note: `#[warn(clippy::missing_safety_doc)]` on by default = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#missing_safety_doc

trait FlexSourceExt: FlexSource {
#[inline]
Expand Down Expand Up @@ -222,8 +222,13 @@

impl PoolFtr {
/// Get a pointer to `PoolFtr` for a given allocation.
///
/// # Safety
///
/// `ptr` must be dereferencable. This is a limitation of
/// [`nonnull_slice_end`].
#[inline]
fn get_for_alloc(alloc: NonNull<[u8]>, alloc_align: usize) -> *mut Self {
unsafe fn get_for_alloc(alloc: NonNull<[u8]>, alloc_align: usize) -> *mut Self {
let alloc_end = nonnull_slice_end(alloc);
let mut ptr = alloc_end.wrapping_sub(core::mem::size_of::<Self>());
// If `alloc_end` is not well-aligned, we need to adjust the location
Expand Down Expand Up @@ -375,17 +380,28 @@
// still uninitialized because this allocation is still in
// `self.growable_pool`, so we only have to move
// `PoolFtr::prev_alloc_end`.
let old_pool_ftr = PoolFtr::get_for_alloc(
nonnull_slice_from_raw_parts(
growable_pool.alloc_start,
growable_pool.alloc_len,
),
self.source.min_align(),
);
let new_pool_ftr = PoolFtr::get_for_alloc(
nonnull_slice_from_raw_parts(growable_pool.alloc_start, new_alloc_len),
self.source.min_align(),
);

// Safety: The memory range represented by `growable_pool`
// is dereferencable
let old_pool_ftr = unsafe {
PoolFtr::get_for_alloc(
nonnull_slice_from_raw_parts(
growable_pool.alloc_start,
growable_pool.alloc_len,
),
self.source.min_align(),
)
};

// Safety: The memory range represented by `growable_pool`
// extended to `new_alloc_len` is dereferencable
let new_pool_ftr = unsafe {
PoolFtr::get_for_alloc(
nonnull_slice_from_raw_parts(growable_pool.alloc_start, new_alloc_len),
self.source.min_align(),
)
};

// Safety: Both `*new_pool_ftr` and `*old_pool_ftr`
// represent pool footers we control
unsafe { *new_pool_ftr = *old_pool_ftr };
Expand Down Expand Up @@ -480,7 +496,8 @@
if self.source.supports_dealloc() {
// Link the new memory pool's `PoolFtr::prev_alloc_end` to the
// previous pool (`self.growable_pool`).
let pool_ftr = PoolFtr::get_for_alloc(alloc, self.source.min_align());
// Safety: `alloc` is dereferencable
let pool_ftr = unsafe { PoolFtr::get_for_alloc(alloc, self.source.min_align()) };
let prev_alloc = self
.growable_pool
.map(|p| nonnull_slice_from_raw_parts(p.alloc_start, p.alloc_len));
Expand All @@ -492,7 +509,8 @@
if use_growable_pool {
self.growable_pool = Some(Pool {
alloc_start: nonnull_slice_start(alloc),
alloc_len: nonnull_slice_len(alloc),
// Safety: `alloc` is derefencable
alloc_len: unsafe { nonnull_slice_len(alloc) },
pool_len,
});
}
Expand Down
18 changes: 16 additions & 2 deletions crates/rlsf/src/flex/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,9 @@ unsafe impl<T: FlexSource> FlexSource for TrackingFlexSource<T> {
unsafe fn dealloc(&mut self, ptr: NonNull<[u8]>) {
// TODO: check that `ptr` represents an exact allocation, not just
// a part of it
self.sa.remove_pool(ptr.as_ptr());
self.inner.dealloc(ptr);
log::trace!("FlexSource::dealloc({:?})", ptr);
self.sa.remove_pool(ptr.as_ptr());
}

#[inline]
Expand Down Expand Up @@ -140,7 +140,21 @@ unsafe impl FlexSource for CgFlexSource {
.filter(|&x| x <= self.pool.len())?;

self.allocated = new_allocated;
Some(NonNull::from(&mut self.pool[allocated..new_allocated]))

// Slice the allocated of `self.pool`.
//
// - Do not do `&mut self.pool[..]` because mutably borrowing the whole
// `*self.pool` would invalidate the borrows made by previous
// allocations.
//
// - Do not create a mutable reference even to the sliced out part
// because the resulting pointer could not be expanded by
// `realloc_inplace_grow`.
let pool_ptr = self.pool.as_mut_ptr();
Some(crate::utils::nonnull_slice_from_raw_parts(
NonNull::new(pool_ptr.add(allocated)).unwrap(),
new_allocated - allocated,
))
}

unsafe fn realloc_inplace_grow(
Expand Down
4 changes: 3 additions & 1 deletion crates/rlsf/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@
#[doc = include_str!("../CHANGELOG.md")]
pub mod _changelog_ {}

#[macro_use]
mod utils;

mod flex;
pub mod int;
mod tlsf;
mod utils;
pub use self::{
flex::*,
tlsf::{Tlsf, GRANULARITY},
Expand Down
9 changes: 7 additions & 2 deletions crates/rlsf/src/tests.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
use std::{alloc::Layout, collections::BTreeMap, ops::Range, prelude::v1::*, ptr::NonNull};
use std::{
alloc::Layout, cell::UnsafeCell, collections::BTreeMap, mem::MaybeUninit, ops::Range,
prelude::v1::*, ptr::NonNull,
};

#[derive(Debug)]
pub struct ShadowAllocator {
Expand Down Expand Up @@ -121,7 +124,9 @@ impl ShadowAllocator {

pub fn remove_pool<T>(&mut self, range: *const [T]) {
let start = range as *const T as usize;
let end = unsafe { &*range }.len() + start;
// FIXME: Use `<*const [T]>::len` (stabilized in Rust 1.79)
// FIXME: Or at least `NonNull<[T]>::len` (stabilized in Rust 1.63)
let end = unsafe { &*(range as *const [MaybeUninit<UnsafeCell<T>>]) }.len() + start;
if start >= end {
return;
}
Expand Down
72 changes: 38 additions & 34 deletions crates/rlsf/src/tlsf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,18 +136,19 @@ impl BlockHdr {
///
/// # Safety
///
/// `self` must have a next block (it must not be the sentinel block in a
/// `this.size` must be safe to read.
///
/// `this` must have a next block (it must not be the sentinel block in a
/// pool).
#[inline]
unsafe fn next_phys_block(&self) -> NonNull<BlockHdr> {
debug_assert!(
(self.size & SIZE_SENTINEL) == 0,
"`self` must not be a sentinel"
);
unsafe fn next_phys_block(this: *const Self) -> NonNull<BlockHdr> {
let size = (*this).size;

debug_assert!((size & SIZE_SENTINEL) == 0, "`self` must not be a sentinel");

// Safety: Since `self.size & SIZE_SENTINEL` is not lying, the
// next block should exist at a non-null location.
NonNull::new_unchecked((self as *const _ as *mut u8).add(self.size & SIZE_SIZE_MASK)).cast()
NonNull::new_unchecked((this as *mut u8).add(size & SIZE_SIZE_MASK)).cast()
}
}

Expand Down Expand Up @@ -368,16 +369,16 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons
/// contain `block`.
///
#[cfg_attr(target_arch = "wasm32", inline(never))]
unsafe fn link_free_block(&mut self, mut block: NonNull<FreeBlockHdr>, size: usize) {
unsafe fn link_free_block(&mut self, block: NonNull<FreeBlockHdr>, size: usize) {
let (fl, sl) = Self::map_floor(size).unwrap_or_else(|| {
debug_assert!(false, "could not map size {}", size);
// Safety: It's unreachable
unreachable_unchecked()
});
let first_free = &mut self.first_free[fl][sl];
let next_free = first_free.replace(block);
block.as_mut().next_free = next_free;
block.as_mut().prev_free = None;
*nn_field!(block, next_free) = next_free;
*nn_field!(block, prev_free) = None;
if let Some(mut next_free) = next_free {
next_free.as_mut().prev_free = Some(block);
}
Expand Down Expand Up @@ -511,22 +512,19 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons

// The new free block
// Safety: `cursor` is not zero.
let mut block = NonNull::new_unchecked(cursor as *mut FreeBlockHdr);
let block = NonNull::new_unchecked(cursor as *mut FreeBlockHdr);

// Initialize the new free block
block.as_mut().common = BlockHdr {
*nn_field!(block, common) = BlockHdr {
size: chunk_size - GRANULARITY,
prev_phys_block: None,
};

// Cap the end with a sentinel block (a permanently-used block)
let mut sentinel_block = block
.as_ref()
.common
.next_phys_block()
.cast::<UsedBlockHdr>();
let sentinel_block =
BlockHdr::next_phys_block(nn_field!(block, common)).cast::<UsedBlockHdr>();

sentinel_block.as_mut().common = BlockHdr {
*nn_field!(sentinel_block, common) = BlockHdr {
size: GRANULARITY | SIZE_USED | SIZE_SENTINEL,
prev_phys_block: Some(block.cast()),
};
Expand Down Expand Up @@ -796,7 +794,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons
// Safety: It's unreachable
unreachable_unchecked()
});
let mut next_phys_block = block.as_ref().common.next_phys_block();
let mut next_phys_block = BlockHdr::next_phys_block(nn_field!(block, common));
let size_and_flags = block.as_ref().common.size;
let size = size_and_flags /* size_and_flags & SIZE_SIZE_MASK */;
debug_assert_eq!(size, size_and_flags & SIZE_SIZE_MASK);
Expand Down Expand Up @@ -846,7 +844,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons
// The allocation partially fills this free block. Create a new
// free block header at `block + new_size..block + size`
// of length (`new_free_block_size`).
let mut new_free_block: NonNull<FreeBlockHdr> =
let new_free_block: NonNull<FreeBlockHdr> =
NonNull::new_unchecked(block.cast::<u8>().as_ptr().add(new_size)).cast();
let new_free_block_size = size - new_size;

Expand All @@ -857,7 +855,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons
next_phys_block.as_mut().prev_phys_block = Some(new_free_block.cast());

// Create the new free block header
new_free_block.as_mut().common = BlockHdr {
*nn_field!(new_free_block, common) = BlockHdr {
size: new_free_block_size,
prev_phys_block: Some(block.cast()),
};
Expand Down Expand Up @@ -1041,7 +1039,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons
// Merge the created hole with the next block if the next block is a
// free block
// Safety: `block.common` should be fully up-to-date and valid
let next_phys_block = block.as_ref().next_phys_block();
let next_phys_block = BlockHdr::next_phys_block(block.as_ptr());
let next_phys_block_size_and_flags = next_phys_block.as_ref().size;
if (next_phys_block_size_and_flags & SIZE_USED) == 0 {
let next_phys_block_size = next_phys_block_size_and_flags;
Expand All @@ -1055,7 +1053,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons

// Safety: `next_phys_block` is a free block and therefore is not a
// sentinel block
new_next_phys_block = next_phys_block.as_ref().next_phys_block();
new_next_phys_block = BlockHdr::next_phys_block(next_phys_block.as_ptr());

// Unlink `next_phys_block`.
self.unlink_free_block(next_phys_block.cast(), next_phys_block_size);
Expand Down Expand Up @@ -1096,7 +1094,10 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons
self.link_free_block(block, size);

// Link `new_next_phys_block.prev_phys_block` to `block`
debug_assert_eq!(new_next_phys_block, block.as_ref().common.next_phys_block());
debug_assert_eq!(
new_next_phys_block,
BlockHdr::next_phys_block(nn_field!(block, common))
);
new_next_phys_block.as_mut().prev_phys_block = Some(block.cast());
}

Expand Down Expand Up @@ -1245,12 +1246,12 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons
let shrink_by = old_size - new_size;

// We will create a new free block at this address
let mut new_free_block: NonNull<FreeBlockHdr> =
let new_free_block: NonNull<FreeBlockHdr> =
NonNull::new_unchecked(block.cast::<u8>().as_ptr().add(new_size)).cast();
let mut new_free_block_size = shrink_by;

// If the next block is a free block...
let mut next_phys_block = block.as_ref().common.next_phys_block();
let mut next_phys_block = BlockHdr::next_phys_block(nn_field!(block, common));
let next_phys_block_size_and_flags = next_phys_block.as_ref().size;
if (next_phys_block_size_and_flags & SIZE_USED) == 0 {
let next_phys_block_size = next_phys_block_size_and_flags;
Expand All @@ -1264,15 +1265,16 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons
self.unlink_free_block(next_phys_block.cast(), next_phys_block_size);
new_free_block_size += next_phys_block_size;

let mut next_next_phys_block = next_phys_block.as_ref().next_phys_block();
let mut next_next_phys_block =
BlockHdr::next_phys_block(next_phys_block.as_ptr());
next_next_phys_block.as_mut().prev_phys_block = Some(new_free_block.cast());
} else {
// We can't merge a used block (`next_phys_block`) and
// a free block (`new_free_block`).
next_phys_block.as_mut().prev_phys_block = Some(new_free_block.cast());
}

new_free_block.as_mut().common = BlockHdr {
*nn_field!(new_free_block, common) = BlockHdr {
size: new_free_block_size,
prev_phys_block: Some(block.cast()),
};
Expand All @@ -1290,7 +1292,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons
debug_assert!(new_size > old_size);

let grow_by = new_size - old_size;
let next_phys_block = block.as_ref().common.next_phys_block();
let next_phys_block = BlockHdr::next_phys_block(nn_field!(block, common));

// If we removed this block, there would be a continous free space of
// `moving_clearance` bytes, which is followed by `moving_clearance_end`
Expand All @@ -1315,7 +1317,8 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons

// Now we know it's really a free block.
let mut next_phys_block = next_phys_block.cast::<FreeBlockHdr>();
let mut next_next_phys_block = next_phys_block.as_ref().common.next_phys_block();
let mut next_next_phys_block =
BlockHdr::next_phys_block(nn_field!(next_phys_block, common));

moving_clearance += next_phys_block_size;
moving_clearance_end = next_next_phys_block;
Expand All @@ -1334,7 +1337,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons

next_phys_block =
NonNull::new_unchecked(block.cast::<u8>().as_ptr().add(new_size)).cast();
next_phys_block.as_mut().common = BlockHdr {
*nn_field!(next_phys_block, common) = BlockHdr {
size: next_phys_block_size,
prev_phys_block: Some(block.cast()),
};
Expand Down Expand Up @@ -1425,7 +1428,7 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons
// The allocation partially fills this free block. Create a new
// free block header at `new_block + new_size..new_block
// + moving_clearance`.
let mut new_free_block: NonNull<FreeBlockHdr> =
let new_free_block: NonNull<FreeBlockHdr> =
NonNull::new_unchecked(new_block.cast::<u8>().as_ptr().add(new_size)).cast();
let mut new_free_block_size = moving_clearance - new_size;

Expand All @@ -1443,15 +1446,16 @@ impl<'pool, FLBitmap: BinInteger, SLBitmap: BinInteger, const FLLEN: usize, cons
self.unlink_free_block(moving_clearance_end.cast(), moving_clearance_end_size);
new_free_block_size += moving_clearance_end_size_and_flags;

let mut next_next_phys_block = moving_clearance_end.as_ref().next_phys_block();
let mut next_next_phys_block =
BlockHdr::next_phys_block(moving_clearance_end.as_mut());
next_next_phys_block.as_mut().prev_phys_block = Some(new_free_block.cast());
} else {
// We can't merge a used block (`moving_clearance_end`) and
// a free block (`new_free_block`).
moving_clearance_end.as_mut().prev_phys_block = Some(new_free_block.cast());
}

new_free_block.as_mut().common = BlockHdr {
*nn_field!(new_free_block, common) = BlockHdr {
size: new_free_block_size,
prev_phys_block: Some(new_block.cast()),
};
Expand Down
Loading
Loading