diff --git a/Cargo.lock b/Cargo.lock index 1020a07318..78bb46b817 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -100,6 +100,7 @@ dependencies = [ "memory", "no_drop", "page_attribute_table", + "per_cpu", "scheduler", "spawn", "stack", @@ -386,6 +387,7 @@ dependencies = [ "no_drop", "ota_update_client", "page_attribute_table", + "per_cpu", "scheduler", "simd_personality", "spawn", @@ -624,6 +626,19 @@ dependencies = [ "tock-registers", ] +[[package]] +name = "cpu_local" +version = "0.1.0" +dependencies = [ + "crossbeam-utils", + "irq_safety", + "log", + "memory", + "preemption", + "spin 0.9.4", + "x86_64", +] + [[package]] name = "cranelift-entity" version = "0.77.0" @@ -1947,6 +1962,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +dependencies = [ + "autocfg", +] + [[package]] name = "memory" version = "0.1.0" @@ -2595,6 +2619,18 @@ dependencies = [ "spin 0.9.4", ] +[[package]] +name = "per_cpu" +version = "0.1.0" +dependencies = [ + "cpu", + "cpu_local", + "log", + "memoffset 0.8.0", + "preemption", + "task", +] + [[package]] name = "percent-encoding" version = "1.0.2" @@ -3687,6 +3723,7 @@ version = "0.1.0" dependencies = [ "context_switch", "cpu", + "cpu_local", "crossbeam-utils", "environment", "irq_safety", diff --git a/kernel/ap_start/Cargo.toml b/kernel/ap_start/Cargo.toml index c4364e6429..b8e4a9ab42 100644 --- a/kernel/ap_start/Cargo.toml +++ b/kernel/ap_start/Cargo.toml @@ -16,6 +16,7 @@ scheduler = { path = "../scheduler" } spawn = { path = "../spawn" } kernel_config = { path = "../kernel_config" } cpu = { path = "../cpu" } +per_cpu = { path = "../per_cpu" } no_drop = { path = "../no_drop" } early_tls = { path = "../early_tls" } diff --git a/kernel/ap_start/src/lib.rs b/kernel/ap_start/src/lib.rs index 7be4657c4a..911be4f597 100644 --- a/kernel/ap_start/src/lib.rs +++ b/kernel/ap_start/src/lib.rs @@ -106,7 +106,11 @@ pub fn kstart_ap( } // Now that the Local APIC has been initialized for this CPU, we can initialize the - // task management subsystem and create the idle task for this CPU. + // per-CPU storage, tasking, and create the idle task for this CPU. + + #[cfg(target_arch = "x86_64")] // not yet supported on aarch64 + per_cpu::init(cpu_id).unwrap(); + let bootstrap_task = spawn::init(kernel_mmi_ref.clone(), cpu_id, this_ap_stack).unwrap(); spawn::create_idle_task().unwrap(); diff --git a/kernel/captain/Cargo.toml b/kernel/captain/Cargo.toml index 269f5823bb..17cffb52f7 100644 --- a/kernel/captain/Cargo.toml +++ b/kernel/captain/Cargo.toml @@ -23,6 +23,7 @@ spawn = { path = "../spawn" } stack = { path = "../stack" } task = { path = "../task" } cpu = { path = "../cpu" } +per_cpu = { path = "../per_cpu" } [target.'cfg(target_arch = "x86_64")'.dependencies] logger_x86_64 = { path = "../logger_x86_64" } diff --git a/kernel/captain/src/lib.rs b/kernel/captain/src/lib.rs index 19533fc2da..5bab4f58e2 100644 --- a/kernel/captain/src/lib.rs +++ b/kernel/captain/src/lib.rs @@ -121,6 +121,8 @@ pub fn init( // get BSP's CPU ID let bsp_id = cpu::bootstrap_cpu().ok_or("captain::init(): couldn't get ID of bootstrap CPU!")?; + #[cfg(target_arch = "x86_64")] // not yet supported on aarch64 + per_cpu::init(bsp_id)?; // Initialize the scheduler and create the initial `Task`, // which is bootstrapped from this current execution context. diff --git a/kernel/cpu_local/Cargo.toml b/kernel/cpu_local/Cargo.toml new file mode 100644 index 0000000000..3734b87c50 --- /dev/null +++ b/kernel/cpu_local/Cargo.toml @@ -0,0 +1,18 @@ +[package] +authors = ["Kevin Boos "] +name = "cpu_local" +description = "Support for accessing CPU-local storage via per-CPU variables" +version = "0.1.0" +edition = "2021" + +[dependencies] +crossbeam-utils = { version = "0.8.12", default-features = false } +log = "0.4.8" +spin = "0.9.0" + +irq_safety = { git = "https://github.com/theseus-os/irq_safety" } +memory = { path = "../memory" } +preemption = { path = "../preemption" } + +[target.'cfg(target_arch = "x86_64")'.dependencies] +x86_64 = "0.14.8" diff --git a/kernel/cpu_local/src/lib.rs b/kernel/cpu_local/src/lib.rs new file mode 100644 index 0000000000..a60de4bde3 --- /dev/null +++ b/kernel/cpu_local/src/lib.rs @@ -0,0 +1,305 @@ +//! Offers types and macros to declare and access CPU-local storage (per-CPU variables). +//! +//! CPU-local variables cannot be used until after a given CPU has been initialized, +//! i.e., its Local APIC (on x86_64) has been discovered and properly configured. +//! Currently, the [`init()`] routine in this crate should be invoked by +//! another init routine from the `per_cpu` crate. +//! +//! Note that Rust offers the `#[thread_local]` attribute for thread-local storage (TLS), +//! but there is no equivalent for CPU-local storage. +//! On x86_64, TLS areas use the `fs` segment register for the TLS base, +//! and this crate uses the `gs` segment register for the CPU-local base. + +#![no_std] +#![feature(thread_local)] + +extern crate alloc; + +use core::marker::PhantomData; +use alloc::collections::{BTreeMap, btree_map::Entry}; +use memory::{MappedPages, PteFlags}; +use preemption::{hold_preemption, PreemptionGuard}; +use spin::Mutex; + +#[cfg(target_arch = "x86_64")] +use x86_64::{registers::model_specific::GsBase, VirtAddr}; + + +/// The available CPU-local variables, i.e., fields in `per_cpu::PerCpuData` struct. +// +// NOTE: These fields and their offsets must be kept in sync with `per_cpu::PerCpuData`. +// +#[derive(PartialEq, Eq)] +pub enum PerCpuField { + CpuId, + PreemptionCount, + TaskSwitchPreemptionGuard, + DropAfterTaskSwitch, +} +impl PerCpuField { + /// Returns the offset of this field in the `per_cpu::PerCpuData` struct. + pub const fn offset(&self) -> usize { + match self { + Self::CpuId => 8, + Self::PreemptionCount => 12, + Self::TaskSwitchPreemptionGuard => 16, + Self::DropAfterTaskSwitch => 24, + } + } +} + + +/// This trait must be implemented for each field in `per_cpu::PerCpuData`. +/// +/// ## Safety +/// This is marked `unsafe` because the implementor must guarantee +/// that the associated `FIELD` constant is correctly specified. +/// * For example, the implementation of this trait for `CpuId` must specify +/// the `FIELD` const as [`PerCpuField::CpuId`], +/// but we cannot verify that here due to cyclic dependency issues. +pub unsafe trait CpuLocalField: Sized { + const FIELD: PerCpuField; + + // In the future, we will add a `DeadlockPrevention` parameter here + // to allow each field to dictate what needs to be temporarily disabled + // while accessing this field immutably or mutably. + // For example, disabling preemption, interrupts, or nothing. +} + + +/// A reference to a CPU-local variable. +/// +/// ## Usage Notes +/// * This does not currently permit or handle usage of `CpuLocal::with_mut()` +/// from within an interrupt handler context. +/// * Interrupt handler contexts should only access a `CpuLocal` *immutably*. +/// * If you need to mutate/modify a CPU-local variable from within an +/// interrupt handler, please file an issue to alert the Theseus developers. +/// * This struct does not contain an instance of the type `T`. +/// Thus, dropping it has no effect. +pub struct CpuLocal(PhantomData); +impl CpuLocal { + /// Creates a new reference to a predefined CPU-local variable. + /// + /// ## Arguments + /// * `field`: the field in the `per_cpu::PerCpuData` struct that + /// you wish to access via the returned `CpuLocal` object. + /// + /// The type `T: CpuLocalField` must be specified with the turbofish operator: + /// ```rust,no_run + /// static CPU_ID: CpuLocal = CpuId::new(PerCpuField::CpuId); + /// ``` + pub const fn new(field: PerCpuField) -> Self { + assert!(field.offset() == T::FIELD.offset()); + Self(PhantomData) + } + + /// Invokes the given `func` with an immutable reference to this `CpuLocal` variable. + /// + /// Preemption will be disabled for the duration of this function + /// in order to ensure that this task cannot be switched away from + /// or migrated to another CPU. + /// + /// If the caller has already disabled preemption, it is more efficient to + /// use the [`with_preempt()`] function, which allows the caller to pass in + /// an existing preemption guard to prove that preemption is already disabled. + pub fn with(&self, func: F) -> R + where + F: FnOnce(&T) -> R, + { + let guard = hold_preemption(); + self.with_preempt(&guard, func) + } + + /// Invokes the given `func` with an immutable reference to this `CpuLocal` variable. + /// + /// This function accepts an existing preemption guard, which efficiently proves + /// that preemption has already been disabled. + pub fn with_preempt(&self, _guard: &PreemptionGuard, func: F) -> R + where + F: FnOnce(&T) -> R, + { + let ptr_to_cpu_local = self.self_ptr() + T::FIELD.offset(); + let local_ref = unsafe { &*(ptr_to_cpu_local as *const T) }; + func(local_ref) + } + + /// Invokes the given `func` with a mutable reference to this `CpuLocal` variable. + /// + /// Interrupts will be disabled for the duration of this function + /// in order to ensure atomicity while this per-CPU state is being modified. + pub fn with_mut(&self, func: F) -> R + where + F: FnOnce(&mut T) -> R, + { + let _held_interrupts = irq_safety::hold_interrupts(); + let ptr_to_cpu_local = self.self_ptr() + T::FIELD.offset(); + let local_ref_mut = unsafe { &mut *(ptr_to_cpu_local as *mut T) }; + func(local_ref_mut) + } + + /// Returns the value of the self pointer, which points to this CPU's `PerCpuData`. + #[cfg_attr(not(target_arch = "x86_64"), allow(unreachable_code, unused))] + fn self_ptr(&self) -> usize { + let self_ptr: usize; + #[cfg(target_arch = "x86_64")] + unsafe { + core::arch::asm!( + "mov {}, gs:[0]", // the self ptr offset is 0 + lateout(reg) self_ptr, + options(nostack, preserves_flags, readonly, pure) + ); + } + + #[cfg(not(target_arch = "x86_64"))] + todo!("CPU Locals are not yet supported on non-x86_64 platforms"); + + self_ptr + } +} + +impl CpuLocal +where + T: Copy + CpuLocalField, +{ + /// Returns a copy of this `CpuLocal`'s inner value of type `T`. + /// + /// This is a convenience function only available for types where `T: Copy`. + pub fn get(&self) -> T { + self.with(|v| *v) + } +} + + +/// The underlying memory region for each CPU's per-CPU data. +#[derive(Debug)] +struct CpuLocalDataRegion(MappedPages); +impl CpuLocalDataRegion { + /// Allocates a new CPU-local data image. + fn new(size_of_per_cpu_data: usize) -> Result { + let mp = memory::create_mapping( + size_of_per_cpu_data, + PteFlags::new().writable(true).valid(true), + )?; + Ok(CpuLocalDataRegion(mp)) + } + + /// Sets this CPU's base register (e.g., GsBase on x86_64) to the address + /// of this CPU-local data image, making it "currently active" and accessible. + fn set_as_current_cpu_local_base(&self) { + let self_ptr_value = self.0.start_address().value(); + + #[cfg(target_arch = "x86_64")] { + let gsbase_val = VirtAddr::new_truncate(self_ptr_value as u64); + GsBase::write(gsbase_val); + } + + #[cfg(not(target_arch = "x86_64"))] { + let _ = self_ptr_value; // TODO + todo!("CPU-local variable access is not yet implemented on this architecture") + } + } +} + + +/// Initializes the CPU-local data region for this CPU. +/// +/// Note: this is invoked by the `per_cpu` crate; +/// other crates do not need to invoke this. +pub fn init

( + cpu_id: u32, + size_of_per_cpu_data: usize, + per_cpu_data_initializer: impl FnOnce(usize) -> P +) -> Result<(), &'static str> { + /// The global set of all per-CPU data regions. + static CPU_LOCAL_DATA_REGIONS: Mutex> = Mutex::new(BTreeMap::new()); + + let mut regions = CPU_LOCAL_DATA_REGIONS.lock(); + let entry = regions.entry(cpu_id); + let data_region = match entry { + Entry::Vacant(v) => v.insert(CpuLocalDataRegion::new(size_of_per_cpu_data)?), + Entry::Occupied(_) => return Err("BUG: cannot init CPU-local data more than once"), + }; + + // Run the given initializer function to initialize the per-CPU data region. + { + let self_ptr = data_region.0.start_address().value(); + let initial_value = per_cpu_data_initializer(self_ptr); + // SAFETY: + // ✅ We just allocated memory for the self ptr above, it is only accessible by us. + // ✅ That memory is mutable (writable) and is aligned to a page boundary. + // ✅ The memory is at least as large as `size_of::

()`. + unsafe { core::ptr::write(self_ptr as *mut P, initial_value) }; + } + + // Set the new CPU-local data region as active and ready to be used on this CPU. + data_region.set_as_current_cpu_local_base(); + Ok(()) +} + + +// NOTE: +// we don't currently use this because we always load a pointer to the CpuLocal +// instead of loading or storing the value directly. +// If/when we wish to support these direct loads/stores of values from/to a +// GS-based offset, then we can uncomment this module. +/* +mod load_store_direct { + + mod sealed { + pub(crate) trait SingleMovGs { + unsafe fn load(offset: usize) -> Self; + unsafe fn store(offset: usize, val: Self); + } + } + pub(crate) use sealed::SingleMovGs; + + macro_rules! impl_single_mov_gs { + ($type:ty, $reg:ident, $reg_str:literal) => { + impl SingleMovGs for [u8; size_of::<$type>()] { + #[inline] + unsafe fn load(offset: usize) -> Self { + let val: $type; + asm!( + concat!("mov ", $reg_str, ", gs:[{}]"), + lateout($reg) val, in(reg) offset, + options(nostack, preserves_flags, readonly, pure) + ); + val.to_ne_bytes() + } + #[inline] + unsafe fn store(offset: usize, val: Self) { + asm!( + concat!("mov gs:[{}], ", $reg_str), + in(reg) offset, in($reg) <$type>::from_ne_bytes(val), + options(nostack, preserves_flags) + ); + } + } + }; + } + + impl_single_mov_gs!(u64, reg, "{}"); + impl_single_mov_gs!(u32, reg, "{:e}"); + impl_single_mov_gs!(u16, reg, "{:x}"); + impl_single_mov_gs!(u8, reg_byte, "{}"); + + /// Load `SIZE` bytes from the offset relative to the GsBase segment register. + #[inline] + unsafe fn load(offset: usize) -> [u8; SIZE] + where + [u8; SIZE]: SingleMovGs, + { + SingleMovGs::load(offset) + } + + /// Store `val` at the offset relative to the GsBase segment register. + #[inline] + unsafe fn store(offset: usize, val: [u8; SIZE]) + where + [u8; SIZE]: SingleMovGs, + { + SingleMovGs::store(offset, val) + } +} +*/ diff --git a/kernel/per_cpu/Cargo.toml b/kernel/per_cpu/Cargo.toml new file mode 100644 index 0000000000..7afa28de1f --- /dev/null +++ b/kernel/per_cpu/Cargo.toml @@ -0,0 +1,15 @@ +[package] +authors = ["Kevin Boos "] +name = "per_cpu" +description = "Support for defining per-CPU data types" +version = "0.1.0" +edition = "2021" + +[dependencies] +log = "0.4.8" +memoffset = "0.8.0" + +cpu = { path = "../cpu" } +cpu_local = { path = "../cpu_local" } +preemption = { path = "../preemption" } +task = { path = "../task" } diff --git a/kernel/per_cpu/src/lib.rs b/kernel/per_cpu/src/lib.rs new file mode 100644 index 0000000000..174048c574 --- /dev/null +++ b/kernel/per_cpu/src/lib.rs @@ -0,0 +1,141 @@ +//! Contains [`PerCpuData`], the data stored on a per-CPU basis in Theseus. +//! +//! Each CPU has its own instance of `PerCpuData`, and each CPU's instance +//! can only be accessed by itself. +//! +//! ## This `per_cpu` crate vs. the `cpu_local` crate +//! These two crates exist to solve a circular dependency problem: +//! the crate that defines the per-CPU data structure (this `per_cpu` crate) +//! must depend on all the foreign crates that define the types used for +//! each field in the per-CPU data structure. +//! However, those foreign crates also want to *access* these per-CPU states, +//! which would require depending on this `per_cpu` crate. +//! This would create a cyclic dependency, so we break it into two crates. +//! +//! 1. This crate `per_cpu` directly depends on many other kernel crates, +//! specifically the ones that define the types needed for each field of [`PerCpuData`]. +//! * If you want to add another piece of per-CPU data, you can do that here +//! by modifying the [`PerCpuData`] struct, and then updating the const definitions +//! of offsets and other metadata in `cpu_local::FixedCpuLocal`. +//! * To actually access per-CPU data, do not use this crate, +//! use `cpu_local::CpuLocal` instead. +//! +//! 2. The `cpu_local` crate is the "top-level" crate that is depended upon +//! by each of the crates that needs to access per-CPU data. +//! * `cpu_local` is a mostly standalone crate that does not depend +//! on any of the specific types from other Theseus crates, +//! which allows other Theseus crates to depend upon it. +//! * `cpu_local` effectively decouples the definitions +//! * This `per_cpu` crate also depends on `cpu_local` in order to initialize itself +//! for each CPU right after that CPU has booted. +//! + +#![no_std] +#![feature(const_refs_to_cell)] + +use core::ops::Deref; +use cpu::CpuId; +use cpu_local::PerCpuField; +use task::{DropAfterTaskSwitch, TaskSwitchPreemptionGuard}; +use preemption::PreemptionCount; + +/// The data stored on a per-CPU basis in Theseus. +/// +/// Currently, we do not support additional arbitrary per-CPU states, e.g., +/// dynamically adding or removing states, or defining per-CPU states +/// outside this struct. +/// +/// This struct is not directly accessible; per-CPU states are accessible +/// by other crates using the functions in the [`cpu_local`] crate. +/// +/// ## Required traits +/// Each field in this struct must implement [`cpu_local::PerCpuField`], +/// which in turn mandates that each field have a unique type distinct from the type +/// of every other field. +/// Currently we achieve this with newtype wrappers +#[allow(dead_code)] // These fields are accessed via `cpu_local` functions. +#[repr(C)] +// +// IMPORTANT NOTE: +// * These fields must be kept in sync with `cpu_local::PerCpuField`. +// * The same applies for the `const_assertions` module at the end of this file. +// +pub struct PerCpuData { + /// A pointer to the start of this struct in memory, similar to a TLS self pointer. + /// This has a different initial value for each CPU's data image, of course. + /// + /// We use this to allow writes to this entire structure (for initialization), + /// and also to allow faster access to large fields herein, as they don't need to be + /// loaded in full before accessing a single sub-field. See this for more: + /// . + self_ptr: usize, + /// The unique ID of this CPU. This is immutable. + cpu_id: CpuLocalCpuId, + /// The current preemption count of this CPU, which is used to determine + /// whether task switching can occur or not. + /// + /// TODO: we must merge `cpu_local` and `preemption` in order to use something like + /// `CpuLocal` in the `preemption` crate. + preemption_count: PreemptionCount, + /// A preemption guard used during task switching to ensure that one task switch + /// cannot interrupt (preempt) another task switch already in progress. + task_switch_preemption_guard: TaskSwitchPreemptionGuard, + /// Data that should be dropped after switching away from a task that has exited. + /// Currently, this contains the previous task's `TaskRef` that was removed + /// from its TLS area during the last task switch away from it. + drop_after_task_switch: DropAfterTaskSwitch, +} + +impl PerCpuData { + /// Defines the initial values of each per-CPU state. + fn new(self_ptr: usize, cpu_id: cpu::CpuId) -> Self { + Self { + self_ptr, + cpu_id: CpuLocalCpuId(cpu_id), + preemption_count: PreemptionCount::new(), + task_switch_preemption_guard: TaskSwitchPreemptionGuard::new(), + drop_after_task_switch: DropAfterTaskSwitch::new(), + } + } +} + + +/// An immutable type wrapper for this CPU's unique ID, kept in CPU-local storage. +/// +/// Derefs into a [`CpuId`]. +pub struct CpuLocalCpuId(CpuId); +impl Deref for CpuLocalCpuId { + type Target = CpuId; + fn deref(&self) -> &Self::Target { + &self.0 + } +} +// SAFETY: The `CpuLocalCpuId` type corresponds to a field in `PerCpuData` +// with the offset specified by `PerCpuField::CpuId`. +unsafe impl cpu_local::CpuLocalField for CpuLocalCpuId { + const FIELD: PerCpuField = PerCpuField::CpuId; +} + + +/// Initializes the current CPU's `PerCpuData`. +/// +/// This must be invoked from (run on) the actual CPU with the given `cpu_id`; +/// the main bootstrap CPU cannot run this for all CPUs itself. +pub fn init(cpu_id: cpu::CpuId) -> Result<(), &'static str> { + cpu_local::init( + cpu_id.value(), + core::mem::size_of::(), + |self_ptr| PerCpuData::new(self_ptr, cpu_id), + ) +} + +mod const_assertions { + use memoffset::offset_of; + use super::*; + + const _: () = assert!(0 == offset_of!(PerCpuData, self_ptr)); + const _: () = assert!(PerCpuField::CpuId.offset() == offset_of!(PerCpuData, cpu_id)); + const _: () = assert!(PerCpuField::PreemptionCount.offset() == offset_of!(PerCpuData, preemption_count)); + const _: () = assert!(PerCpuField::TaskSwitchPreemptionGuard.offset() == offset_of!(PerCpuData, task_switch_preemption_guard)); + const _: () = assert!(PerCpuField::DropAfterTaskSwitch.offset() == offset_of!(PerCpuData, drop_after_task_switch)); +} diff --git a/kernel/preemption/src/lib.rs b/kernel/preemption/src/lib.rs index e5f86f551f..9fb7350dcb 100644 --- a/kernel/preemption/src/lib.rs +++ b/kernel/preemption/src/lib.rs @@ -27,6 +27,12 @@ const ATOMIC_U8_ZERO: AtomicU8 = AtomicU8::new(0); /// If a CPU's preemption count is greater than `0`, preemption is disabled. static PREEMPTION_COUNT: [AtomicU8; MAX_CPU_CORES] = [ATOMIC_U8_ZERO; MAX_CPU_CORES]; +pub struct PreemptionCount(AtomicU8); +impl PreemptionCount { + pub const fn new() -> Self { + PreemptionCount(ATOMIC_U8_ZERO) + } +} /// Prevents preemption (preemptive task switching) from occurring /// until the returned guard object is dropped. diff --git a/kernel/task/Cargo.toml b/kernel/task/Cargo.toml index 5eb0936c10..7c570d88f8 100644 --- a/kernel/task/Cargo.toml +++ b/kernel/task/Cargo.toml @@ -15,6 +15,7 @@ irq_safety = { git = "https://github.com/theseus-os/irq_safety" } context_switch = { path = "../context_switch" } cpu = { path = "../cpu" } +cpu_local = { path = "../cpu_local" } environment = { path = "../environment" } memory = { path = "../memory" } mod_mgmt = { path = "../mod_mgmt" } diff --git a/kernel/task/src/lib.rs b/kernel/task/src/lib.rs index 7cd36cabde..fe13527a39 100755 --- a/kernel/task/src/lib.rs +++ b/kernel/task/src/lib.rs @@ -44,6 +44,7 @@ use core::{ task::Waker, }; use cpu::CpuId; +use cpu_local::{CpuLocalField, PerCpuField}; use crossbeam_utils::atomic::AtomicCell; use irq_safety::{hold_interrupts, MutexIrqSafe}; use log::error; @@ -91,6 +92,34 @@ pub fn all_tasks() -> Vec<(usize, WeakTaskRef)> { pub type FailureCleanupFunction = fn(ExitableTaskRef, KillReason) -> !; +/// A type wrapper used to hold a CPU-local `PreemptionGuard` +/// on the current CPU during a task switch operation. +#[derive(Default)] +pub struct TaskSwitchPreemptionGuard(Option); +impl TaskSwitchPreemptionGuard { + pub const fn new() -> Self { Self(None) } +} +// SAFETY: The `TaskSwitchPreemptionGuard` type corresponds to a field in `PerCpuData` +// with the offset specified by `PerCpuField::TaskSwitchPreemptionGuard`. +unsafe impl CpuLocalField for TaskSwitchPreemptionGuard { + const FIELD: PerCpuField = PerCpuField::TaskSwitchPreemptionGuard; +} + + +/// A type wrapper used to hold CPU-local data that should be dropped +/// after switching away from a task that has exited. +#[derive(Default)] +pub struct DropAfterTaskSwitch(Option); +impl DropAfterTaskSwitch { + pub const fn new() -> Self { Self(None) } +} +// SAFETY: The `DropAfterTaskSwitch` type corresponds to a field in `PerCpuData` +// with the offset specified by `PerCpuField::DropAfterTaskSwitch`. +unsafe impl CpuLocalField for DropAfterTaskSwitch { + const FIELD: PerCpuField = PerCpuField::DropAfterTaskSwitch; +} + + /// A shareable, cloneable reference to a `Task` that exposes more methods /// for task management and auto-derefs into an immutable `&Task` reference. ///