diff --git a/Cargo.lock b/Cargo.lock index 4121f86a1a..c8457831e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -474,6 +474,11 @@ name = "cls" version = "0.1.0" dependencies = [ "cls_macros", + "cortex-a", + "irq_safety", + "preemption", + "tock-registers", + "x86_64", ] [[package]] @@ -2599,6 +2604,7 @@ dependencies = [ "log", "memoffset 0.8.0", "no_drop", + "preemption", "task", ] @@ -2735,7 +2741,7 @@ name = "preemption" version = "0.1.0" dependencies = [ "apic", - "cls", + "cls_macros", "cpu", ] @@ -3698,6 +3704,7 @@ source = "git+https://github.com/theseus-os/target-lexicon?branch=theseus#75d36c name = "task" version = "0.1.0" dependencies = [ + "cls", "context_switch", "cpu", "cpu_local", diff --git a/kernel/cls/Cargo.toml b/kernel/cls/Cargo.toml index 7bbd4ff784..965ed5bbda 100644 --- a/kernel/cls/Cargo.toml +++ b/kernel/cls/Cargo.toml @@ -7,3 +7,12 @@ edition = "2021" [dependencies] cls_macros = { path = "cls_macros" } +irq_safety = { git = "https://github.com/theseus-os/irq_safety" } +preemption = { path = "../preemption" } + +[target.'cfg(target_arch = "x86_64")'.dependencies] +x86_64 = "0.14.8" + +[target.'cfg(target_arch = "aarch64")'.dependencies] +tock-registers = "0.7.0" +cortex-a = "7.5.0" diff --git a/kernel/cls/cls_macros/src/int.rs b/kernel/cls/cls_macros/src/int.rs new file mode 100644 index 0000000000..5bfa56a2ea --- /dev/null +++ b/kernel/cls/cls_macros/src/int.rs @@ -0,0 +1,120 @@ +use proc_macro2::TokenStream; +use quote::{quote, ToTokens}; +use syn::{LitInt, Type}; + +pub(crate) fn int_functions(ty: Type, offset: LitInt) -> Option { + let ((x64_asm_width, x64_reg_class), (aarch64_reg_modifier, aarch64_instr_width)) = + match ty.to_token_stream().to_string().as_ref() { + "u8" => (("byte", quote! { reg_byte }), (":w", "b")), + "u16" => (("word", quote! { reg }), (":w", "w")), + "u32" => (("dword", quote! { reg }), (":w", "")), + "u64" => (("qword", quote! { reg }), ("", "")), + _ => { + return None; + } + }; + let x64_width_modifier = format!("{x64_asm_width} ptr "); + let x64_cls_location = format!("gs:[{offset}]"); + + Some(quote! { + #[inline] + pub fn load(&self) -> #ty { + #[cfg(target_arch = "x86_64")] + { + let ret; + unsafe { + ::core::arch::asm!( + ::core::concat!("mov {}, ", #x64_cls_location), + out(#x64_reg_class) ret, + options(preserves_flags, nostack), + ) + }; + ret + } + #[cfg(target_arch = "aarch64")] + { + let ret; + unsafe { + ::core::arch::asm!( + "2:", + // Load value. + "mrs {tp_1}, tpidr_el1", + concat!( + "ldr", #aarch64_instr_width, + " {ret", #aarch64_reg_modifier,"},", + " [{tp_1},#", stringify!(#offset), "]", + ), + + // Make sure task wasn't migrated between mrs and ldr. + "mrs {tp_2}, tpidr_el1", + "cmp {tp_1}, {tp_2}", + "b.ne 2b", + + tp_1 = out(reg) _, + ret = out(reg) ret, + tp_2 = out(reg) _, + + options(nostack), + ) + }; + ret + } + } + + #[inline] + pub fn fetch_add(&self, mut operand: #ty) -> #ty { + #[cfg(target_arch = "x86_64")] + { + unsafe { + ::core::arch::asm!( + ::core::concat!("xadd ", #x64_width_modifier, #x64_cls_location, ", {}"), + inout(#x64_reg_class) operand, + options(nostack), + ) + }; + operand + } + #[cfg(target_arch = "aarch64")] + { + let ret; + unsafe { + ::core::arch::asm!( + "2:", + // Load value. + "mrs {tp_1}, tpidr_el1", + concat!("add {ptr}, {tp_1}, ", stringify!(#offset)), + concat!("ldxr", #aarch64_instr_width, " {value", #aarch64_reg_modifier,"}, [{ptr}]"), + + // Make sure task wasn't migrated between msr and ldxr. + "mrs {tp_2}, tpidr_el1", + "cmp {tp_1}, {tp_2}", + "b.ne 2b", + + // Compute and store value (reuse tp_1 register). + "add {tp_1}, {value}, {operand}", + concat!("stxr", #aarch64_instr_width, " {cond:w}, {tp_1", #aarch64_reg_modifier,"}, [{ptr}]"), + + // Make sure task wasn't migrated between ldxr and stxr. + "cbnz {cond}, 2b", + + tp_1 = out(reg) ret, + ptr = out(reg) _, + value = out(reg) ret, + tp_2 = out(reg) _, + operand = in(reg) operand, + cond = out(reg) _, + + options(nostack), + ) + }; + ret + } + } + + #[inline] + pub fn fetch_sub(&self, mut operand: #ty) -> #ty { + operand = operand.overflowing_neg().0; + self.fetch_add(operand) + } + }) +} diff --git a/kernel/cls/cls_macros/src/lib.rs b/kernel/cls/cls_macros/src/lib.rs index d528a5743d..9fcd625367 100644 --- a/kernel/cls/cls_macros/src/lib.rs +++ b/kernel/cls/cls_macros/src/lib.rs @@ -2,6 +2,8 @@ #![feature(proc_macro_diagnostic, proc_macro_span, let_chains)] +mod int; + use convert_case::{Case, Casing}; use proc_macro::{Diagnostic, Level, Span, TokenStream}; use quote::{quote, ToTokens}; @@ -9,7 +11,7 @@ use syn::{ parse::{Parse, ParseStream}, parse_macro_input, spanned::Spanned, - Attribute, Expr, Ident, LitInt, Token, Type, Visibility, + Attribute, Expr, Ident, LitBool, LitInt, Path, Token, Type, Visibility, }; struct CpuLocal { @@ -41,35 +43,94 @@ impl Parse for CpuLocal { } } +struct Args { + offset: LitInt, + cls_dependency: bool, + stores_guard: Option, +} + +impl Parse for Args { + fn parse(input: ParseStream) -> syn::Result { + let offset = input.parse()?; + let mut cls_dependency = true; + let mut stores_guard = None; + + while input.parse::().is_ok() { + let name = input.parse::()?; + input.parse::()?; + + if name.is_ident("cls_dep") { + cls_dependency = input.parse::()?.value(); + } else if name.is_ident("stores_guard") { + stores_guard = Some((name, input.parse::()?)); + } else { + Diagnostic::spanned( + name.span().unwrap(), + Level::Error, + format!("invalid argument `{}`", name.to_token_stream()), + ) + .help("valid arguments are: `cls_dep`, `stores_guard`") + .emit(); + return Err(syn::Error::new(name.span(), "")); + } + } + + if let Some((ref name, ref value)) = stores_guard && !cls_dependency { + let span = name.span().join(value.span()).unwrap().unwrap(); + Diagnostic::spanned( + span, + Level::Error, + "`stores_guard` requires `cls_dep`", + ) + .emit(); + return Err(syn::Error::new(name.span(), "")); + } + + Ok(Self { + offset, + cls_dependency, + stores_guard: stores_guard.map(|tuple| tuple.1), + }) + } +} + /// A macro for declaring CPU-local variables. /// /// Variables must be an unsigned integer, bar `u128`. /// /// The initialisation expression has no effect; to set the initial value, /// `per_cpu::PerCpuData::new` must be modified. +/// +/// # Arguments +/// +/// The macro supports additional named arguments defined after the offset (e.g. +/// `#[cpu_local(0, cls_dep = false)]`): +/// - `cls_dep`: Whether to define methods that depend on `cls` indirectly +/// adding a dependency on `preemption` and `irq_safety`. This is only really +/// useful for CPU locals defined in `preemption` to avoid a circular +/// dependency. Defaults to true. +/// - `stores_guard`: If defined, must be set to either `HeldInterrupts` or +/// `PreemptionGuard` and signifies that the CPU local has the type +/// `Option`. This option defines special methods that use the guard +/// being switched into the CPU local, rather than an additional guard +/// parameter, as proof that the CPU local can be safely accessed. #[proc_macro_attribute] pub fn cpu_local(args: TokenStream, input: TokenStream) -> TokenStream { - // if !args.is_empty() { - // Diagnostic::spanned( - // Span::call_site(), - // Level::Error, - // "malformed `cpu_local` attribute input", - // ) - // .help("must be of the form `#[cpu_local]`") - // .emit(); - // return TokenStream::new(); - // } - - let offset = if let Ok(i) = syn::parse::(args.clone()) { - i - } else { - let span = args - .into_iter() - .map(|tt| tt.span()) - .reduce(|a, b| a.join(b).unwrap()) - .unwrap_or_else(Span::call_site); - Diagnostic::spanned(span, Level::Error, "invalid offset").emit(); - return TokenStream::new(); + let Args { + offset, + cls_dependency, + stores_guard, + } = match syn::parse(args) { + Ok(args) => args, + Err(error) => { + if error.to_string() == "" { + // We've already emmited an error diagnostic. + return TokenStream::new(); + } else { + Diagnostic::spanned(error.span().unwrap(), Level::Error, error.to_string()).emit(); + return TokenStream::new(); + } + } }; let CpuLocal { @@ -87,22 +148,104 @@ pub fn cpu_local(args: TokenStream, input: TokenStream) -> TokenStream { Span::call_site().into(), ); - let ((x64_asm_width, x64_reg_class), (aarch64_reg_modifier, aarch64_instr_width)) = - match ty.to_token_stream().to_string().as_ref() { - "u8" => (("byte", quote! { reg_byte }), (":w", "b")), - "u16" => (("word", quote! { reg }), (":w", "w")), - "u32" => (("dword", quote! { reg }), (":w", "")), - "u64" => (("qword", quote! { reg }), ("", "")), - _ => { - Diagnostic::spanned(ty.span().unwrap(), Level::Error, "invalid type") - .help("CPU locals only support these types: `u8`, `u16`, `u32`, `u64`") - .emit(); - return TokenStream::new(); + let ref_expr = quote! { + { + #[cfg(target_arch = "x86_64")] + let mut ptr = { + use cls::__private::x86_64::registers::segmentation::{GS, Segment64}; + GS::read_base().as_u64() + }; + #[cfg(target_arch = "aarch64")] + let mut ptr = { + use cls::__private::tock_registers::interfaces::Readable; + cls::__private::cortex_a::registers::TPIDR_EL1.get() + }; + ptr += #offset; + unsafe { &mut*(ptr as *mut #ty) } + } + }; + + let cls_dep_functions = if cls_dependency { + let guarded_functions = quote! { + #[inline] + pub fn replace_guarded(&self, mut value: #ty, guard: &G) -> #ty + where + G: ::cls::CpuAtomicGuard, + { + let rref = #ref_expr; + ::core::mem::swap(rref, &mut value); + value + } + + #[inline] + pub fn set_guarded(&self, mut value: #ty, guard: &G) + where + G: ::cls::CpuAtomicGuard, + { + self.replace_guarded(value, guard); } }; - let x64_width_modifier = format!("{x64_asm_width} ptr "); - let x64_cls_location = format!("gs:[{offset}]"); + if let Some(guard_type) = stores_guard { + quote! { + #guarded_functions + + #[inline] + pub fn replace(&self, guard: #guard_type) -> #ty { + // Check that the guard type matches the type of the static. + // https://github.com/rust-lang/rust/issues/20041#issuecomment-820911297 + trait TyEq {} + impl TyEq for (T, T) {} + fn ty_eq() + where + (A, B): TyEq + {} + ty_eq::<::core::option::Option<#guard_type>, #ty>(); + + // Check that the guard type implements cls::Guard. + fn implements_guard_trait() + where + T: ::cls::CpuAtomicGuard + {} + implements_guard_trait::<#guard_type>(); + + let rref = #ref_expr; + let mut guard = Some(guard); + ::core::mem::swap(rref, &mut guard); + + guard + } + + #[inline] + pub fn set(&self, mut guard: #guard_type) { + self.replace(guard); + } + } + } else { + quote! { + #guarded_functions + + #[inline] + pub fn replace(&self, value: #ty) -> #ty { + // TODO: Should this ever be `disable_interrupts` instead? + let guard = ::cls::__private::preemption::hold_preemption(); + self.replace_guarded(value, &guard) + } + + #[inline] + pub fn set(&self, value: #ty) { + // TODO: Should this ever be `disable_interrupts` instead? + let guard = ::cls::__private::preemption::hold_preemption(); + self.set_guarded(value, &guard); + } + + } + } + } else { + proc_macro2::TokenStream::new() + }; + + let int_functions = int::int_functions(ty, offset); quote! { #(#attributes)* @@ -115,105 +258,8 @@ pub fn cpu_local(args: TokenStream, input: TokenStream) -> TokenStream { #visibility struct #struct_name; impl #struct_name { - #[inline] - pub fn load(&self) -> #ty { - #[cfg(target_arch = "x86_64")] - { - let ret; - unsafe { - ::core::arch::asm!( - ::core::concat!("mov {}, ", #x64_cls_location), - out(#x64_reg_class) ret, - options(preserves_flags, nostack), - ) - }; - ret - } - #[cfg(target_arch = "aarch64")] - { - let ret; - unsafe { - ::core::arch::asm!( - "2:", - // Load value. - "mrs {tp_1}, tpidr_el1", - concat!( - "ldr", #aarch64_instr_width, - " {ret", #aarch64_reg_modifier,"},", - " [{tp_1},#", stringify!(#offset), "]", - ), - - // Make sure task wasn't migrated between mrs and ldr. - "mrs {tp_2}, tpidr_el1", - "cmp {tp_1}, {tp_2}", - "b.ne 2b", - - tp_1 = out(reg) _, - ret = out(reg) ret, - tp_2 = out(reg) _, - - options(nostack), - ) - }; - ret - } - } - - #[inline] - pub fn fetch_add(&self, mut operand: #ty) -> #ty { - #[cfg(target_arch = "x86_64")] - { - unsafe { - ::core::arch::asm!( - ::core::concat!("xadd ", #x64_width_modifier, #x64_cls_location, ", {}"), - inout(#x64_reg_class) operand, - options(nostack), - ) - }; - operand - } - #[cfg(target_arch = "aarch64")] - { - let ret; - unsafe { - ::core::arch::asm!( - "2:", - // Load value. - "mrs {tp_1}, tpidr_el1", - concat!("add {ptr}, {tp_1}, ", stringify!(#offset)), - concat!("ldxr", #aarch64_instr_width, " {value", #aarch64_reg_modifier,"}, [{ptr}]"), - - // Make sure task wasn't migrated between msr and ldxr. - "mrs {tp_2}, tpidr_el1", - "cmp {tp_1}, {tp_2}", - "b.ne 2b", - - // Compute and store value (reuse tp_1 register). - "add {tp_1}, {value}, {operand}", - concat!("stxr", #aarch64_instr_width, " {cond:w}, {tp_1", #aarch64_reg_modifier,"}, [{ptr}]"), - - // Make sure task wasn't migrated between ldxr and stxr. - "cbnz {cond}, 2b", - - tp_1 = out(reg) ret, - ptr = out(reg) _, - value = out(reg) ret, - tp_2 = out(reg) _, - operand = in(reg) operand, - cond = out(reg) _, - - options(nostack), - ) - }; - ret - } - } - - #[inline] - pub fn fetch_sub(&self, mut operand: #ty) -> #ty { - operand = operand.overflowing_neg().0; - self.fetch_add(operand) - } + #int_functions + #cls_dep_functions } } .into() diff --git a/kernel/cls/src/lib.rs b/kernel/cls/src/lib.rs index 8343398c4f..895c7bb2e2 100644 --- a/kernel/cls/src/lib.rs +++ b/kernel/cls/src/lib.rs @@ -7,3 +7,33 @@ extern crate alloc; pub use cls_macros::cpu_local; + +/// A trait abstracting over guards that ensure atomicity with respect to the +/// current CPU. +/// +/// This trait is "sealed" and cannot be implemented by anything outside this +/// crate. +pub trait CpuAtomicGuard: sealed::Sealed {} + +impl sealed::Sealed for irq_safety::HeldInterrupts {} +impl CpuAtomicGuard for irq_safety::HeldInterrupts {} + +impl sealed::Sealed for preemption::PreemptionGuard {} +impl CpuAtomicGuard for preemption::PreemptionGuard {} + +mod sealed { + pub trait Sealed {} +} + +#[doc(hidden)] +pub mod __private { + pub use preemption; + + #[cfg(target_arch = "x86_64")] + pub use x86_64; + + #[cfg(target_arch = "aarch64")] + pub use tock_registers; + #[cfg(target_arch = "aarch64")] + pub use cortex_a; +} diff --git a/kernel/cpu_local/src/lib.rs b/kernel/cpu_local/src/lib.rs index bf48a7dedb..f3edd289a3 100644 --- a/kernel/cpu_local/src/lib.rs +++ b/kernel/cpu_local/src/lib.rs @@ -21,7 +21,7 @@ use preemption::{hold_preemption, PreemptionGuard}; use sync_spin::SpinMutex; #[cfg(target_arch = "x86_64")] -use x86_64::{registers::model_specific::GsBase, VirtAddr}; +use x86_64::{registers::segmentation::{GS, Segment64}, VirtAddr}; #[cfg(target_arch = "aarch64")] use { @@ -200,7 +200,7 @@ impl CpuLocalDataRegion { #[cfg(target_arch = "x86_64")] { let gsbase_val = VirtAddr::new_truncate(self_ptr_value as u64); - GsBase::write(gsbase_val); + unsafe { GS::write_base(gsbase_val) }; } #[cfg(target_arch = "aarch64")] { @@ -219,6 +219,13 @@ pub fn init

( size_of_per_cpu_data: usize, per_cpu_data_initializer: impl FnOnce(usize) -> P ) -> Result<(), &'static str> { + // Enable FSGSBASE instructions + #[cfg(target_arch = "x86_64")] + { + use x86_64::registers::control::{Cr4, Cr4Flags}; + unsafe { Cr4::update(|flags| flags.insert(Cr4Flags::FSGSBASE)) }; + } + /// The global set of all per-CPU data regions. static CPU_LOCAL_DATA_REGIONS: SpinMutex> = SpinMutex::new(BTreeMap::new()); diff --git a/kernel/per_cpu/Cargo.toml b/kernel/per_cpu/Cargo.toml index 33f42fc401..db9d356a66 100644 --- a/kernel/per_cpu/Cargo.toml +++ b/kernel/per_cpu/Cargo.toml @@ -12,4 +12,5 @@ memoffset = "0.8.0" cpu = { path = "../cpu" } cpu_local = { path = "../cpu_local" } no_drop = { path = "../no_drop" } +preemption = { path = "../preemption" } task = { path = "../task" } diff --git a/kernel/per_cpu/src/lib.rs b/kernel/per_cpu/src/lib.rs index 0c4a4bbedf..4554294013 100644 --- a/kernel/per_cpu/src/lib.rs +++ b/kernel/per_cpu/src/lib.rs @@ -36,7 +36,8 @@ use core::ops::Deref; use cpu::CpuId; use cpu_local::PerCpuField; -use task::{DropAfterTaskSwitch, TaskSwitchPreemptionGuard}; +use preemption::PreemptionGuard; +use task::TaskRef; /// The data stored on a per-CPU basis in Theseus. /// @@ -75,11 +76,11 @@ pub struct PerCpuData { preemption_count: u8, /// A preemption guard used during task switching to ensure that one task switch /// cannot interrupt (preempt) another task switch already in progress. - task_switch_preemption_guard: TaskSwitchPreemptionGuard, + task_switch_preemption_guard: Option, /// Data that should be dropped after switching away from a task that has exited. /// Currently, this contains the previous task's `TaskRef` that was removed /// from its TLS area during the last task switch away from it. - drop_after_task_switch: DropAfterTaskSwitch, + drop_after_task_switch: Option, } impl PerCpuData { @@ -89,8 +90,8 @@ impl PerCpuData { self_ptr, cpu_id: CpuLocalCpuId(cpu_id), preemption_count: 0, - task_switch_preemption_guard: TaskSwitchPreemptionGuard::new(), - drop_after_task_switch: DropAfterTaskSwitch::new(), + task_switch_preemption_guard: None, + drop_after_task_switch: None, } } } diff --git a/kernel/preemption/Cargo.toml b/kernel/preemption/Cargo.toml index 2e160a4eb6..78e22981f3 100644 --- a/kernel/preemption/Cargo.toml +++ b/kernel/preemption/Cargo.toml @@ -6,7 +6,7 @@ description = "Handles enabling and disabling preemption for each CPU core" edition = "2021" [dependencies] -cls = { path = "../cls" } +cls_macros = { path = "../cls/cls_macros" } cpu = { path = "../cpu" } [target.'cfg(target_arch = "x86_64")'.dependencies] diff --git a/kernel/preemption/src/lib.rs b/kernel/preemption/src/lib.rs index 204c1e4962..45cb7b664e 100644 --- a/kernel/preemption/src/lib.rs +++ b/kernel/preemption/src/lib.rs @@ -10,7 +10,7 @@ use cpu::CpuId; /// A reference to the preemption counter for the current CPU (in CPU-local storage). // NOTE: This offset must be kept in sync with `cpu_local::PerCpuField`. -#[cls::cpu_local(12)] +#[cls_macros::cpu_local(12, cls_dep = false)] static PREEMPTION_COUNT: u8 = 0; /// Prevents preemption (preemptive task switching) from occurring diff --git a/kernel/task/Cargo.toml b/kernel/task/Cargo.toml index b239940f26..9c7198c06f 100644 --- a/kernel/task/Cargo.toml +++ b/kernel/task/Cargo.toml @@ -14,6 +14,7 @@ crossbeam-utils = { version = "0.8.2", default-features = false } irq_safety = { git = "https://github.com/theseus-os/irq_safety" } context_switch = { path = "../context_switch" } +cls = { path = "../cls" } cpu = { path = "../cpu" } cpu_local = { path = "../cpu_local" } environment = { path = "../environment" } diff --git a/kernel/task/src/lib.rs b/kernel/task/src/lib.rs index 3cd44d2566..9afe27a8c5 100755 --- a/kernel/task/src/lib.rs +++ b/kernel/task/src/lib.rs @@ -912,7 +912,7 @@ fn task_switch_inner( if curr_task_has_exited { // log::trace!("[CPU {}] task_switch(): deiniting current task TLS for: {:?}, next: {}", cpu_id, curr_task_tls_slot.as_deref(), next.deref()); let prev_taskref = curr_task_tls_slot.take(); - DROP_AFTER_TASK_SWITCH.with_mut(|d| d.0 = prev_taskref); + DROP_AFTER_TASK_SWITCH.set_guarded(prev_taskref, &preemption_guard); } // Now we are done touching the current task's TLS slot, so proactively drop it now @@ -937,7 +937,7 @@ fn task_switch_inner( // Move the preemption guard into CPU-local storage such that we can retrieve it // after the actual context switch operation has completed. - TASK_SWITCH_PREEMPTION_GUARD.with_mut(|p| p.0 = Some(preemption_guard)); + TASK_SWITCH_PREEMPTION_GUARD.set(preemption_guard); #[cfg(not(simd_personality))] return Ok((prev_task_saved_sp, next_task_saved_sp)); @@ -953,70 +953,32 @@ fn task_switch_inner( /// 2. Obtains the preemption guard such that preemption can be re-enabled /// when it is appropriate to do so. fn post_context_switch_action() -> PreemptionGuard { - // Step 1: drop data from previously running task - { - let prev_task_data_to_drop = DROP_AFTER_TASK_SWITCH.with_mut(|d| d.0.take()); - drop(prev_task_data_to_drop); - } - - // Step 2: retake ownership of preemption guard in order to re-enable preemption. - { - TASK_SWITCH_PREEMPTION_GUARD.with_mut(|p| p.0.take()) - .expect("BUG: post_context_switch_action: no PreemptionGuard existed") - } + let guard_1 = preemption::hold_preemption(); + let guard_2 = TASK_SWITCH_PREEMPTION_GUARD + .replace_guarded(None, &guard_1) + .expect("BUG: post_context_switch_action: no PreemptionGuard existed"); + // Doesn't really matter which guard we use. + DROP_AFTER_TASK_SWITCH.set_guarded(None, &guard_2); + guard_2 } -pub use cpu_local_task_switch::*; -/// CPU-local data related to task switching. -mod cpu_local_task_switch { - use cpu_local::{CpuLocal, CpuLocalField, PerCpuField}; - use preemption::PreemptionGuard; - - /// The preemption guard that was used for safe task switching on each CPU. - /// - /// The `PreemptionGuard` is stored here right before a context switch begins - /// and then retrieved from here right after the context switch ends. - /// It is stored in a CPU-local variable because it's only related to - /// a task switching operation on a particular CPU. - pub(crate) static TASK_SWITCH_PREEMPTION_GUARD: CpuLocal = - CpuLocal::new(PerCpuField::TaskSwitchPreemptionGuard); - - /// Data that should be dropped after switching away from a task that has exited. - /// - /// Currently, this contains the previous Task's `TaskRef` removed from its TLS area; - /// it is stored in a CPU-local variable because it's only related to - /// a task switching operation on a particular CPU. - pub(crate) static DROP_AFTER_TASK_SWITCH: CpuLocal = - CpuLocal::new(PerCpuField::DropAfterTaskSwitch); - - /// A type wrapper used to hold a CPU-local `PreemptionGuard` - /// on the current CPU during a task switch operation. - #[derive(Default)] - pub struct TaskSwitchPreemptionGuard(pub(crate) Option); - impl TaskSwitchPreemptionGuard { - pub const fn new() -> Self { Self(None) } - } - // SAFETY: The `TaskSwitchPreemptionGuard` type corresponds to a field in `PerCpuData` - // with the offset specified by `PerCpuField::TaskSwitchPreemptionGuard`. - unsafe impl CpuLocalField for TaskSwitchPreemptionGuard { - const FIELD: PerCpuField = PerCpuField::TaskSwitchPreemptionGuard; - } - - /// A type wrapper used to hold CPU-local data that should be dropped - /// after switching away from a task that has exited. - #[derive(Default)] - pub struct DropAfterTaskSwitch(pub(crate) Option); - impl DropAfterTaskSwitch { - pub const fn new() -> Self { Self(None) } - } - // SAFETY: The `DropAfterTaskSwitch` type corresponds to a field in `PerCpuData` - // with the offset specified by `PerCpuField::DropAfterTaskSwitch`. - unsafe impl CpuLocalField for DropAfterTaskSwitch { - const FIELD: PerCpuField = PerCpuField::DropAfterTaskSwitch; - } -} - +/// The preemption guard that was used for safe task switching on each CPU. +/// +/// The `PreemptionGuard` is stored here right before a context switch begins +/// and then retrieved from here right after the context switch ends. +/// It is stored in a CPU-local variable because it's only related to +/// a task switching operation on a particular CPU. +#[cls::cpu_local(16, stores_guard = PreemptionGuard)] +static TASK_SWITCH_PREEMPTION_GUARD: Option = None; + +/// Data that should be dropped after switching away from a task that has exited. +/// +/// Currently, this contains the previous Task's `TaskRef` removed from its TLS area; +/// it is stored in a CPU-local variable because it's only related to +/// a task switching operation on a particular CPU. +#[cls::cpu_local(24)] +static DROP_AFTER_TASK_SWITCH: Option = None; pub use tls_current_task::*; /// A private module to ensure the below TLS variables aren't modified directly.