From c0dc73c842143250af4b0a42c1b40ea84fe614b5 Mon Sep 17 00:00:00 2001 From: Zac Harrold Date: Mon, 2 Jun 2025 11:48:45 +1000 Subject: [PATCH 1/5] MVP `no_std` for `wgpu-core` --- .github/workflows/ci.yml | 2 + Cargo.lock | 3 + Cargo.toml | 2 + wgpu-core/Cargo.toml | 26 ++- wgpu-core/src/lock/observing.rs | 27 +-- wgpu-core/src/lock/ranked.rs | 27 +-- wgpu-core/src/lock/vanilla.rs | 168 ++++++++++++++++--- wgpu-core/src/pool.rs | 50 +++++- wgpu-core/src/timestamp_normalization/mod.rs | 3 + 9 files changed, 249 insertions(+), 59 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c9449f0a118..310ffc10a49 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -303,11 +303,13 @@ jobs: cargo clippy --target ${{ matrix.target }} ${{ matrix.extra-flags }} -p wgpu-types --no-default-features cargo clippy --target ${{ matrix.target }} ${{ matrix.extra-flags }} -p naga --no-default-features cargo clippy --target ${{ matrix.target }} ${{ matrix.extra-flags }} -p wgpu-hal --no-default-features + cargo clippy --target ${{ matrix.target }} ${{ matrix.extra-flags }} -p wgpu-core --no-default-features # Check with all compatible features cargo clippy --target ${{ matrix.target }} ${{ matrix.extra-flags }} -p wgpu-types --no-default-features --features strict_asserts,fragile-send-sync-non-atomic-wasm,serde,counters cargo clippy --target ${{ matrix.target }} ${{ matrix.extra-flags }} -p naga --no-default-features --features dot-out,compact cargo clippy --target ${{ matrix.target }} ${{ matrix.extra-flags }} -p wgpu-hal --no-default-features --features fragile-send-sync-non-atomic-wasm + cargo clippy --target ${{ matrix.target }} ${{ matrix.extra-flags }} -p wgpu-core --no-default-features --features spin,once_cell,once_cell/critical-section,api_log_info,resource_log_info,strict_asserts,serde,replay,raw-window-handle,counters # Building for native platforms with standard tests. - name: Check native diff --git a/Cargo.lock b/Cargo.lock index 43d19fb4c98..2ff55d66431 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4805,12 +4805,14 @@ dependencies = [ "bit-vec 0.8.0", "bitflags 2.9.1", "bytemuck", + "cfg-if", "cfg_aliases 0.2.1", "document-features", "hashbrown", "indexmap", "log", "naga", + "num-traits", "once_cell", "parking_lot", "portable-atomic", @@ -4820,6 +4822,7 @@ dependencies = [ "rustc-hash", "serde", "smallvec", + "spin", "thiserror 2.0.12", "wgpu-core-deps-apple", "wgpu-core-deps-emscripten", diff --git a/Cargo.toml b/Cargo.toml index ddf412c74b0..79cc36ec5d0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -178,6 +178,8 @@ serde_json = "1.0.118" serde = { version = "1.0.219", default-features = false } shell-words = "1" smallvec = "1.9" +# NOTE: `crossbeam-deque` currently relies on this version of spin +spin = { version = "0.9", default-features = false } spirv = "0.3" static_assertions = "1.1" strum = { version = "0.27", default-features = false, features = ["derive"] } diff --git a/wgpu-core/Cargo.toml b/wgpu-core/Cargo.toml index ee131dc9f82..5748b87b2d2 100644 --- a/wgpu-core/Cargo.toml +++ b/wgpu-core/Cargo.toml @@ -40,7 +40,7 @@ unexpected_cfgs = { level = "warn", check-cfg = ['cfg(wgpu_validate_locks)'] } # TODO(https://github.com/gfx-rs/wgpu/issues/6826): "std" is a default feature for # compatibility with prior behavior only, and should be removed once we know how # wgpu-core’s dependents want to handle no_std. -default = ["std"] +default = ["std", "parking_lot", "once_cell"] #! ### Logging Configuration # -------------------------------------------------------------------- @@ -117,7 +117,7 @@ fragile-send-sync-non-atomic-wasm = [ ## Enable certain items to be `Send` and `Sync` when they would not otherwise be. ## Also enables backtraces in some error cases when also under cfg(debug_assertions). -std = [] +std = ["once_cell?/std"] #! ### External libraries # -------------------------------------------------------------------- @@ -129,6 +129,15 @@ static-dxc = ["wgpu-hal/static-dxc"] ## Enable portable atomics on platforms that do not support 64bit atomics. portable-atomic = ["dep:portable-atomic", "wgpu-hal/portable-atomic"] +## Enables the `parking_lot` set of locking primitives. +parking_lot = ["dep:parking_lot"] + +## Enables the `spin` set of locking primitives. +spin = ["dep:spin"] + +## Enables `once_cell` initialization primitives. +once_cell = ["dep:once_cell"] + #! ### Target Conditional Features # -------------------------------------------------------------------- # Look to wgpu-hal's Cargo.toml for explaination how these features and the wgpu-core @@ -181,18 +190,25 @@ bit-vec.workspace = true bit-set.workspace = true bitflags.workspace = true bytemuck = { workspace = true, optional = true } +cfg-if.workspace = true document-features.workspace = true hashbrown.workspace = true indexmap.workspace = true log.workspace = true -once_cell = { workspace = true, features = ["std"] } -parking_lot.workspace = true +once_cell = { workspace = true, optional = true } +num-traits = { workspace = true } +parking_lot = { workspace = true, optional = true } profiling = { workspace = true, default-features = false } raw-window-handle = { workspace = true, optional = true } ron = { workspace = true, optional = true } rustc-hash.workspace = true -serde = { workspace = true, features = ["default", "derive"], optional = true } +serde = { workspace = true, features = ["derive"], optional = true } smallvec.workspace = true +spin = { workspace = true, features = [ + "rwlock", + "mutex", + "spin_mutex", +], optional = true } thiserror.workspace = true [target.'cfg(not(target_has_atomic = "64"))'.dependencies] diff --git a/wgpu-core/src/lock/observing.rs b/wgpu-core/src/lock/observing.rs index 108bdbb69e3..c05c598ff32 100644 --- a/wgpu-core/src/lock/observing.rs +++ b/wgpu-core/src/lock/observing.rs @@ -36,34 +36,35 @@ use std::{ }; use super::rank::{LockRank, LockRankSet}; +use super::vanilla; use crate::FastHashSet; /// A `Mutex` instrumented for lock acquisition order observation. /// -/// This is just a wrapper around a [`parking_lot::Mutex`], along with +/// This is just a wrapper around a [`vanilla::Mutex`], along with /// its rank in the `wgpu_core` lock ordering. /// /// For details, see [the module documentation][self]. pub struct Mutex { - inner: parking_lot::Mutex, + inner: vanilla::Mutex, rank: LockRank, } /// A guard produced by locking [`Mutex`]. /// -/// This is just a wrapper around a [`parking_lot::MutexGuard`], along +/// This is just a wrapper around a [`vanilla::MutexGuard`], along /// with the state needed to track lock acquisition. /// /// For details, see [the module documentation][self]. pub struct MutexGuard<'a, T> { - inner: parking_lot::MutexGuard<'a, T>, + inner: vanilla::MutexGuard<'a, T>, _state: LockStateGuard, } impl Mutex { pub fn new(rank: LockRank, value: T) -> Mutex { Mutex { - inner: parking_lot::Mutex::new(value), + inner: vanilla::Mutex::new(rank, value), rank, } } @@ -104,41 +105,41 @@ impl core::fmt::Debug for Mutex { /// An `RwLock` instrumented for lock acquisition order observation. /// -/// This is just a wrapper around a [`parking_lot::RwLock`], along with +/// This is just a wrapper around a [`vanilla::RwLock`], along with /// its rank in the `wgpu_core` lock ordering. /// /// For details, see [the module documentation][self]. pub struct RwLock { - inner: parking_lot::RwLock, + inner: vanilla::RwLock, rank: LockRank, } /// A read guard produced by locking [`RwLock`] for reading. /// -/// This is just a wrapper around a [`parking_lot::RwLockReadGuard`], along with +/// This is just a wrapper around a [`vanilla::RwLockReadGuard`], along with /// the state needed to track lock acquisition. /// /// For details, see [the module documentation][self]. pub struct RwLockReadGuard<'a, T> { - inner: parking_lot::RwLockReadGuard<'a, T>, + inner: vanilla::RwLockReadGuard<'a, T>, _state: LockStateGuard, } /// A write guard produced by locking [`RwLock`] for writing. /// -/// This is just a wrapper around a [`parking_lot::RwLockWriteGuard`], along +/// This is just a wrapper around a [`vanilla::RwLockWriteGuard`], along /// with the state needed to track lock acquisition. /// /// For details, see [the module documentation][self]. pub struct RwLockWriteGuard<'a, T> { - inner: parking_lot::RwLockWriteGuard<'a, T>, + inner: vanilla::RwLockWriteGuard<'a, T>, _state: LockStateGuard, } impl RwLock { pub fn new(rank: LockRank, value: T) -> RwLock { RwLock { - inner: parking_lot::RwLock::new(value), + inner: vanilla::RwLock::new(rank, value), rank, } } @@ -165,7 +166,7 @@ impl RwLock { impl<'a, T> RwLockWriteGuard<'a, T> { pub fn downgrade(this: Self) -> RwLockReadGuard<'a, T> { RwLockReadGuard { - inner: parking_lot::RwLockWriteGuard::downgrade(this.inner), + inner: vanilla::RwLockWriteGuard::downgrade(this.inner), _state: this._state, } } diff --git a/wgpu-core/src/lock/ranked.rs b/wgpu-core/src/lock/ranked.rs index 6f368c8f348..86853dd68a2 100644 --- a/wgpu-core/src/lock/ranked.rs +++ b/wgpu-core/src/lock/ranked.rs @@ -58,26 +58,27 @@ use core::{cell::Cell, fmt, ops, panic::Location}; use super::rank::LockRank; +use super::vanilla; /// A `Mutex` instrumented for deadlock prevention. /// -/// This is just a wrapper around a [`parking_lot::Mutex`], along with +/// This is just a wrapper around a [`vanilla::Mutex`], along with /// its rank in the `wgpu_core` lock ordering. /// /// For details, see [the module documentation][self]. pub struct Mutex { - inner: parking_lot::Mutex, + inner: vanilla::Mutex, rank: LockRank, } /// A guard produced by locking [`Mutex`]. /// -/// This is just a wrapper around a [`parking_lot::MutexGuard`], along +/// This is just a wrapper around a [`vanilla::MutexGuard`], along /// with the state needed to track lock acquisition. /// /// For details, see [the module documentation][self]. pub struct MutexGuard<'a, T> { - inner: parking_lot::MutexGuard<'a, T>, + inner: vanilla::MutexGuard<'a, T>, saved: LockStateGuard, } @@ -177,7 +178,7 @@ fn release(saved: LockState) { impl Mutex { pub fn new(rank: LockRank, value: T) -> Mutex { Mutex { - inner: parking_lot::Mutex::new(value), + inner: vanilla::Mutex::new(rank, value), rank, } } @@ -214,41 +215,41 @@ impl fmt::Debug for Mutex { /// An `RwLock` instrumented for deadlock prevention. /// -/// This is just a wrapper around a [`parking_lot::RwLock`], along with +/// This is just a wrapper around a [`vanilla::RwLock`], along with /// its rank in the `wgpu_core` lock ordering. /// /// For details, see [the module documentation][self]. pub struct RwLock { - inner: parking_lot::RwLock, + inner: vanilla::RwLock, rank: LockRank, } /// A read guard produced by locking [`RwLock`] for reading. /// -/// This is just a wrapper around a [`parking_lot::RwLockReadGuard`], along with +/// This is just a wrapper around a [`vanilla::RwLockReadGuard`], along with /// the state needed to track lock acquisition. /// /// For details, see [the module documentation][self]. pub struct RwLockReadGuard<'a, T> { - inner: parking_lot::RwLockReadGuard<'a, T>, + inner: vanilla::RwLockReadGuard<'a, T>, saved: LockStateGuard, } /// A write guard produced by locking [`RwLock`] for writing. /// -/// This is just a wrapper around a [`parking_lot::RwLockWriteGuard`], along +/// This is just a wrapper around a [`vanilla::RwLockWriteGuard`], along /// with the state needed to track lock acquisition. /// /// For details, see [the module documentation][self]. pub struct RwLockWriteGuard<'a, T> { - inner: parking_lot::RwLockWriteGuard<'a, T>, + inner: vanilla::RwLockWriteGuard<'a, T>, saved: LockStateGuard, } impl RwLock { pub fn new(rank: LockRank, value: T) -> RwLock { RwLock { - inner: parking_lot::RwLock::new(value), + inner: vanilla::RwLock::new(rank, value), rank, } } @@ -275,7 +276,7 @@ impl RwLock { impl<'a, T> RwLockWriteGuard<'a, T> { pub fn downgrade(this: Self) -> RwLockReadGuard<'a, T> { RwLockReadGuard { - inner: parking_lot::RwLockWriteGuard::downgrade(this.inner), + inner: vanilla::RwLockWriteGuard::downgrade(this.inner), saved: this.saved, } } diff --git a/wgpu-core/src/lock/vanilla.rs b/wgpu-core/src/lock/vanilla.rs index c085d0f9532..2ae3d39757e 100644 --- a/wgpu-core/src/lock/vanilla.rs +++ b/wgpu-core/src/lock/vanilla.rs @@ -1,13 +1,54 @@ -//! Plain, uninstrumented wrappers around [`parking_lot`] lock types. -//! +//! Plain, uninstrumented wrappers around a particular implementation of lock types. //! These definitions are used when no particular lock instrumentation //! Cargo feature is selected. +//! The exact implementation used depends on the following features: +//! +//! 1. [`parking_lot`] (default) +//! 2. [`std`] +//! 3. [`spin`] +//! 4. [`RefCell`](core::cell::RefCell) (fallback) +//! +//! These are ordered by priority. +//! For example if `parking_lot` and `std` are both enabled, `parking_lot` will +//! be used as the implementation. +//! +//! Generally you should use `parking_lot` for the optimal performance, at the +//! expense of reduced target compatibility. +//! In contrast, `spin` provides the best compatibility (e.g., `no_std`) in exchange +//! for potentially worse performance. +//! If no implementation is chosen, [`RefCell`](core::cell::RefCell) will be used +//! as a fallback. +//! Note that the fallback implementation is _not_ [`Sync`] and will [spin](core::hint::spin_loop) +//! when a lock is contested. +//! +//! [`parking_lot`]: https://docs.rs/parking_lot/ +//! [`std`]: https://docs.rs/std/ +//! [`spin`]: https://docs.rs/std/ use core::{fmt, ops}; -/// A plain wrapper around [`parking_lot::Mutex`]. +cfg_if::cfg_if! { + if #[cfg(feature = "parking_lot")] { + use parking_lot as implementation; + } else if #[cfg(feature = "std")] { + use std::sync as implementation; + } else if #[cfg(feature = "spin")] { + use spin as implementation; + } else { + mod implementation { + pub(super) use core::cell::RefCell as Mutex; + pub(super) use core::cell::RefMut as MutexGuard; + + pub(super) use core::cell::RefCell as RwLock; + pub(super) use core::cell::Ref as RwLockReadGuard; + pub(super) use core::cell::RefMut as RwLockWriteGuard; + } + } +} + +/// A plain wrapper around [`implementation::Mutex`]. /// -/// This is just like [`parking_lot::Mutex`], except that our [`new`] +/// This is just like [`implementation::Mutex`], except that our [`new`] /// method takes a rank, indicating where the new mutex should sit in /// `wgpu-core`'s lock ordering. The rank is ignored. /// @@ -15,24 +56,46 @@ use core::{fmt, ops}; /// /// [`new`]: Mutex::new /// [`lock`]: crate::lock -pub struct Mutex(parking_lot::Mutex); +pub struct Mutex(implementation::Mutex); /// A guard produced by locking [`Mutex`]. /// -/// This is just a wrapper around a [`parking_lot::MutexGuard`]. -pub struct MutexGuard<'a, T>(parking_lot::MutexGuard<'a, T>); +/// This is just a wrapper around a [`implementation::MutexGuard`]. +pub struct MutexGuard<'a, T>(implementation::MutexGuard<'a, T>); impl Mutex { pub fn new(_rank: super::rank::LockRank, value: T) -> Mutex { - Mutex(parking_lot::Mutex::new(value)) + Mutex(implementation::Mutex::new(value)) } pub fn lock(&self) -> MutexGuard { - MutexGuard(self.0.lock()) + cfg_if::cfg_if! { + if #[cfg(feature = "parking_lot")] { + let lock = self.0.lock(); + } else if #[cfg(feature = "std")] { + let lock = self.0.lock().unwrap_or_else(std::sync::PoisonError::into_inner); + } else if #[cfg(feature = "spin")] { + let lock = self.0.lock(); + } else { + let lock = loop { + if let Ok(lock) = self.0.try_borrow_mut() { + break lock; + } + core::hint::spin_loop(); + }; + } + } + + MutexGuard(lock) } pub fn into_inner(self) -> T { - self.0.into_inner() + let inner = self.0.into_inner(); + + #[cfg(feature = "std")] + let inner = inner.unwrap_or_else(std::sync::PoisonError::into_inner); + + inner } } @@ -56,9 +119,9 @@ impl fmt::Debug for Mutex { } } -/// A plain wrapper around [`parking_lot::RwLock`]. +/// A plain wrapper around [`implementation::RwLock`]. /// -/// This is just like [`parking_lot::RwLock`], except that our [`new`] +/// This is just like [`implementation::RwLock`], except that our [`new`] /// method takes a rank, indicating where the new mutex should sit in /// `wgpu-core`'s lock ordering. The rank is ignored. /// @@ -66,35 +129,92 @@ impl fmt::Debug for Mutex { /// /// [`new`]: RwLock::new /// [`lock`]: crate::lock -pub struct RwLock(parking_lot::RwLock); +pub struct RwLock(implementation::RwLock); /// A read guard produced by locking [`RwLock`] as a reader. /// -/// This is just a wrapper around a [`parking_lot::RwLockReadGuard`]. -pub struct RwLockReadGuard<'a, T>(parking_lot::RwLockReadGuard<'a, T>); +/// This is just a wrapper around a [`implementation::RwLockReadGuard`]. +pub struct RwLockReadGuard<'a, T> { + guard: implementation::RwLockReadGuard<'a, T>, +} /// A write guard produced by locking [`RwLock`] as a writer. /// -/// This is just a wrapper around a [`parking_lot::RwLockWriteGuard`]. -pub struct RwLockWriteGuard<'a, T>(parking_lot::RwLockWriteGuard<'a, T>); +/// This is just a wrapper around a [`implementation::RwLockWriteGuard`]. +pub struct RwLockWriteGuard<'a, T> { + guard: implementation::RwLockWriteGuard<'a, T>, + /// Allows for a safe `downgrade` method without `parking_lot` + #[cfg(not(feature = "parking_lot"))] + lock: &'a RwLock, +} impl RwLock { pub fn new(_rank: super::rank::LockRank, value: T) -> RwLock { - RwLock(parking_lot::RwLock::new(value)) + RwLock(implementation::RwLock::new(value)) } pub fn read(&self) -> RwLockReadGuard { - RwLockReadGuard(self.0.read()) + cfg_if::cfg_if! { + if #[cfg(feature = "parking_lot")] { + let guard = self.0.read(); + } else if #[cfg(feature = "std")] { + let guard = self.0.read().unwrap_or_else(std::sync::PoisonError::into_inner); + } else if #[cfg(feature = "spin")] { + let guard = self.0.read(); + } else { + let guard = loop { + if let Ok(guard) = self.0.try_borrow() { + break guard; + } + core::hint::spin_loop(); + }; + } + } + + RwLockReadGuard { guard } } pub fn write(&self) -> RwLockWriteGuard { - RwLockWriteGuard(self.0.write()) + cfg_if::cfg_if! { + if #[cfg(feature = "parking_lot")] { + let guard = self.0.write(); + } else if #[cfg(feature = "std")] { + let guard = self.0.write().unwrap_or_else(std::sync::PoisonError::into_inner); + } else if #[cfg(feature = "spin")] { + let guard = self.0.write(); + } else { + let guard = loop { + if let Ok(guard) = self.0.try_borrow_mut() { + break guard; + } + core::hint::spin_loop(); + }; + } + } + + RwLockWriteGuard { + guard, + #[cfg(not(feature = "parking_lot"))] + lock: self, + } } } impl<'a, T> RwLockWriteGuard<'a, T> { pub fn downgrade(this: Self) -> RwLockReadGuard<'a, T> { - RwLockReadGuard(parking_lot::RwLockWriteGuard::downgrade(this.0)) + cfg_if::cfg_if! { + if #[cfg(feature = "parking_lot")] { + RwLockReadGuard(implementation::RwLockWriteGuard::downgrade(this.guard)) + } else { + let RwLockWriteGuard { guard, lock } = this; + + // FIXME(https://github.com/rust-lang/rust/issues/128203): Replace with `RwLockWriteGuard::downgrade` once stable. + // This implementation allows for a different thread to "steal" the lock in-between the drop and the read. + // Ideally, `downgrade` should hold the lock the entire time, maintaining uninterrupted custody. + drop(guard); + lock.read() + } + } } } @@ -108,7 +228,7 @@ impl<'a, T> ops::Deref for RwLockReadGuard<'a, T> { type Target = T; fn deref(&self) -> &Self::Target { - self.0.deref() + self.guard.deref() } } @@ -116,12 +236,12 @@ impl<'a, T> ops::Deref for RwLockWriteGuard<'a, T> { type Target = T; fn deref(&self) -> &Self::Target { - self.0.deref() + self.guard.deref() } } impl<'a, T> ops::DerefMut for RwLockWriteGuard<'a, T> { fn deref_mut(&mut self) -> &mut Self::Target { - self.0.deref_mut() + self.guard.deref_mut() } } diff --git a/wgpu-core/src/pool.rs b/wgpu-core/src/pool.rs index ae5272e09bc..bc34daeaf15 100644 --- a/wgpu-core/src/pool.rs +++ b/wgpu-core/src/pool.rs @@ -2,11 +2,19 @@ use alloc::sync::{Arc, Weak}; use core::hash::Hash; use hashbrown::{hash_map::Entry, HashMap}; -use once_cell::sync::OnceCell; use crate::lock::{rank, Mutex}; use crate::FastHashMap; +cfg_if::cfg_if! { + if #[cfg(feature = "once_cell")] { + use once_cell::sync::OnceCell; + } else { + // NOTE: Unlike `once_cell`, the `OnceCell` from `core` is _not_ `Sync`. + use core::cell::OnceCell; + } +} + type SlotInner = Weak; type ResourcePoolSlot = Arc>>; @@ -60,12 +68,46 @@ impl ResourcePool { // // We pass the strong reference outside of the closure to keep it alive while we're the only one keeping a reference to it. let mut strong = None; - let weak = entry.get_or_try_init(|| { - let strong_inner = constructor.take().unwrap()(key.take().unwrap())?; + #[cfg(not(feature = "once_cell"))] + let mut removal_key = None; + let mut try_constructor = || { + let key = key.take().unwrap(); + + #[cfg(not(feature = "once_cell"))] + { + removal_key = Some(key.clone()); + } + + let strong_inner = constructor.take().unwrap()(key)?; let weak = Arc::downgrade(&strong_inner); strong = Some(strong_inner); Ok(weak) - })?; + }; + cfg_if::cfg_if! { + if #[cfg(feature = "once_cell")] { + let weak = entry.get_or_try_init(f)?; + } else { + // FIXME(https://github.com/rust-lang/rust/issues/109737): use `get_or_try_init` once stable. + let mut error = None; + let weak = entry.get_or_init(|| { + try_constructor().unwrap_or_else(|err| { + error = err; + Weak::new() + }) + }); + if let Some(error) = error { + // Since `error` is `Some(...)`, then `try_constructor` was called, meaning + // this entry now contains a dead-end `Weak`. + // To maintain consistency with `get_or_try_init`, we replace the dead-end + // `OnceCell` with a fresh one ready for another call to `get_or_init` to + // initialize. + let key = removal_key.unwrap(); + let mut map_guard = self.inner.lock(); + map_guard.insert(key, Arc::new(OnceCell::new())); + return Err(error); + } + } + } // If strong is Some, that means we just initialized the entry, so we can just return it. if let Some(strong) = strong { diff --git a/wgpu-core/src/timestamp_normalization/mod.rs b/wgpu-core/src/timestamp_normalization/mod.rs index dd4d466235c..14645877307 100644 --- a/wgpu-core/src/timestamp_normalization/mod.rs +++ b/wgpu-core/src/timestamp_normalization/mod.rs @@ -380,6 +380,9 @@ impl TimestampNormalizationBindGroup { } fn compute_timestamp_period(input: f32) -> (u32, u32) { + #[cfg(not(feature = "std"))] + use num_traits::float::Float as _; + let pow2 = input.log2().ceil() as i32; let clamped_pow2 = pow2.clamp(-32, 32).unsigned_abs(); let shift = 32 - clamped_pow2; From 069196d6f81a31e0a8fd86b42bb43200a0d3bf42 Mon Sep 17 00:00:00 2001 From: Zac Harrold Date: Mon, 2 Jun 2025 12:34:13 +1000 Subject: [PATCH 2/5] CI Feedback --- Cargo.toml | 2 +- wgpu-core/src/lock/vanilla.rs | 4 ++-- wgpu-core/src/pool.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 79cc36ec5d0..26839d8fba8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -179,7 +179,7 @@ serde = { version = "1.0.219", default-features = false } shell-words = "1" smallvec = "1.9" # NOTE: `crossbeam-deque` currently relies on this version of spin -spin = { version = "0.9", default-features = false } +spin = { version = "0.9.8", default-features = false } spirv = "0.3" static_assertions = "1.1" strum = { version = "0.27", default-features = false, features = ["derive"] } diff --git a/wgpu-core/src/lock/vanilla.rs b/wgpu-core/src/lock/vanilla.rs index 2ae3d39757e..563863b5738 100644 --- a/wgpu-core/src/lock/vanilla.rs +++ b/wgpu-core/src/lock/vanilla.rs @@ -92,7 +92,7 @@ impl Mutex { pub fn into_inner(self) -> T { let inner = self.0.into_inner(); - #[cfg(feature = "std")] + #[cfg(all(feature = "std", not(feature = "parking_lot")))] let inner = inner.unwrap_or_else(std::sync::PoisonError::into_inner); inner @@ -204,7 +204,7 @@ impl<'a, T> RwLockWriteGuard<'a, T> { pub fn downgrade(this: Self) -> RwLockReadGuard<'a, T> { cfg_if::cfg_if! { if #[cfg(feature = "parking_lot")] { - RwLockReadGuard(implementation::RwLockWriteGuard::downgrade(this.guard)) + RwLockReadGuard { guard: implementation::RwLockWriteGuard::downgrade(this.guard) } } else { let RwLockWriteGuard { guard, lock } = this; diff --git a/wgpu-core/src/pool.rs b/wgpu-core/src/pool.rs index bc34daeaf15..664305eba1c 100644 --- a/wgpu-core/src/pool.rs +++ b/wgpu-core/src/pool.rs @@ -85,7 +85,7 @@ impl ResourcePool { }; cfg_if::cfg_if! { if #[cfg(feature = "once_cell")] { - let weak = entry.get_or_try_init(f)?; + let weak = entry.get_or_try_init(try_constructor)?; } else { // FIXME(https://github.com/rust-lang/rust/issues/109737): use `get_or_try_init` once stable. let mut error = None; From 9368f26d874e649bb1b68d6261edcfaa85ea878d Mon Sep 17 00:00:00 2001 From: Zac Harrold Date: Mon, 2 Jun 2025 12:36:22 +1000 Subject: [PATCH 3/5] Address `unused_mut` --- wgpu-core/src/pool.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/wgpu-core/src/pool.rs b/wgpu-core/src/pool.rs index 664305eba1c..959344055c3 100644 --- a/wgpu-core/src/pool.rs +++ b/wgpu-core/src/pool.rs @@ -70,7 +70,7 @@ impl ResourcePool { let mut strong = None; #[cfg(not(feature = "once_cell"))] let mut removal_key = None; - let mut try_constructor = || { + let try_constructor = || { let key = key.take().unwrap(); #[cfg(not(feature = "once_cell"))] @@ -88,6 +88,7 @@ impl ResourcePool { let weak = entry.get_or_try_init(try_constructor)?; } else { // FIXME(https://github.com/rust-lang/rust/issues/109737): use `get_or_try_init` once stable. + let mut try_constructor = try_constructor; let mut error = None; let weak = entry.get_or_init(|| { try_constructor().unwrap_or_else(|err| { From 89e256adebc23f8477ca81e1e9b0ce31f854548f Mon Sep 17 00:00:00 2001 From: Zac Harrold Date: Fri, 6 Jun 2025 10:28:59 +1000 Subject: [PATCH 4/5] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a951f182938..6e1bbfed835 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -87,6 +87,7 @@ Naga now infers the correct binding layout when a resource appears only in an as - Removed `MaintainBase` in favor of using `PollType`. By @waywardmonkeys in [#7508](https://github.com/gfx-rs/wgpu/pull/7508). - The `destroy` functions for buffers and textures in wgpu-core are now infallible. Previously, they returned an error if called multiple times for the same object. This only affects the wgpu-core API; the wgpu API already allowed multiple `destroy` calls. By @andyleiserson in [#7686](https://github.com/gfx-rs/wgpu/pull/7686) and [#7720](https://github.com/gfx-rs/wgpu/pull/7720). +- Gated usage of `once_cell` and `parking_lot` in `wgpu-core` under features of the same name, enabling `no_std` support. By @bushrat011899 in [#7746](https://github.com/gfx-rs/wgpu/pull/7746). #### Naga From c186bfbf1f82ce64b92ba915e375ed2caef44392 Mon Sep 17 00:00:00 2001 From: Zac Harrold Date: Tue, 10 Jun 2025 09:50:42 +1000 Subject: [PATCH 5/5] Document cross-feature interactions and fallback behaviour --- wgpu-core/Cargo.toml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/wgpu-core/Cargo.toml b/wgpu-core/Cargo.toml index 5748b87b2d2..c50472e110e 100644 --- a/wgpu-core/Cargo.toml +++ b/wgpu-core/Cargo.toml @@ -130,12 +130,22 @@ static-dxc = ["wgpu-hal/static-dxc"] portable-atomic = ["dep:portable-atomic", "wgpu-hal/portable-atomic"] ## Enables the `parking_lot` set of locking primitives. +## This is the recommended implementation and will be used in preference to +## any other implementation. +## Will fallback to a `RefCell` based implementation which is `!Sync` when no +## alternative feature is enabled. parking_lot = ["dep:parking_lot"] ## Enables the `spin` set of locking primitives. +## This is generally only useful for `no_std` targets, and will be unused if +## either `std` or `parking_lot` are available. +## Will fallback to a `RefCell` based implementation which is `!Sync` when no +## alternative feature is enabled. spin = ["dep:spin"] ## Enables `once_cell` initialization primitives. +## This allows the `ResourcePool` to be `Sync`, but requires either `std` or +## `once_cell/critical-section` to be enabled. once_cell = ["dep:once_cell"] #! ### Target Conditional Features