|
31 | 31 | #define MOZC_BASE_THREAD_H_ |
32 | 32 |
|
33 | 33 | #include <atomic> |
| 34 | +#include <cstdint> |
34 | 35 | #include <functional> |
35 | 36 | #include <memory> |
36 | 37 | #include <optional> |
37 | 38 | #include <utility> |
38 | 39 |
|
| 40 | +#include "absl/base/internal/sysinfo.h" |
39 | 41 | #include "absl/base/thread_annotations.h" |
| 42 | +#include "absl/log/check.h" |
40 | 43 | #include "absl/synchronization/mutex.h" |
41 | 44 | #include "absl/synchronization/notification.h" |
42 | 45 |
|
@@ -307,6 +310,83 @@ class CopyableAtomic : public std::atomic<T> { |
307 | 310 | } |
308 | 311 | }; |
309 | 312 |
|
| 313 | +// Simple recursive mutex based on absl::Mutex. |
| 314 | +// The lock() requests from the same thread are not repeated. |
| 315 | +// Recursive locks are used in extremely limited situations, e.g., |
| 316 | +// introducing thread-safeness to legacy non-thread-safety classes |
| 317 | +// without breaking the current design. |
| 318 | +class ABSL_LOCKABLE RecursiveMutex { |
| 319 | + public: |
| 320 | + ~RecursiveMutex() { |
| 321 | + // when lock/unlock are constantly called, these conditions must be true. |
| 322 | + DCHECK_EQ(owner_.load(std::memory_order_acquire), 0); |
| 323 | + DCHECK_EQ(recursion_depth_, 0); |
| 324 | + } |
| 325 | + |
| 326 | + void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { |
| 327 | + const uint32_t current_tid = GetTID(); |
| 328 | + |
| 329 | + // Check if the `current_tid` is the owner |
| 330 | + if (owner_.load(std::memory_order_acquire) == current_tid) { |
| 331 | + // Since the owner is guaranteed to be the current thread, |
| 332 | + // recursion_depth_ can be safely incremented without a lock. |
| 333 | + ++recursion_depth_; |
| 334 | + return; |
| 335 | + } |
| 336 | + |
| 337 | + // Wait until no one is the owner (owner_ == 0). |
| 338 | + mutex_.LockWhen(absl::Condition( |
| 339 | + +[](std::atomic<uint32_t>* owner) { |
| 340 | + return owner->load(std::memory_order_relaxed) == 0; |
| 341 | + }, |
| 342 | + &owner_)); |
| 343 | + |
| 344 | + // Acquiring the lock, and set the owner and recursion_depth_. |
| 345 | + DCHECK_EQ(recursion_depth_, 0); |
| 346 | + owner_.store(current_tid, std::memory_order_release); |
| 347 | + recursion_depth_ = 1; |
| 348 | + } |
| 349 | + |
| 350 | + void unlock() ABSL_UNLOCK_FUNCTION() { |
| 351 | + // Assuming the current thread is the owner, no internal Mutex lock is |
| 352 | + // needed. Decrement the depth. |
| 353 | + DCHECK(owns_lock()); |
| 354 | + if (--recursion_depth_ > 0) { |
| 355 | + return; |
| 356 | + } |
| 357 | + |
| 358 | + // Depth reached 0, so clear the owner and release the internal Mutex so |
| 359 | + // other thread can start the task. |
| 360 | + owner_.store(0, std::memory_order_release); |
| 361 | + mutex_.unlock(); |
| 362 | + } |
| 363 | + |
| 364 | + // Returns true if the mute is owned by the current thread. |
| 365 | + bool owns_lock() const { |
| 366 | + return owner_.load(std::memory_order_acquire) == GetTID(); |
| 367 | + } |
| 368 | + |
| 369 | + // Upper case versions are deprecated in absl::Mutex too. |
| 370 | + // They are not compatible with std::lock_gurad<>/std::unique_lock<>. |
| 371 | + inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { lock(); } |
| 372 | + inline void Unlock() ABSL_UNLOCK_FUNCTION() { unlock(); } |
| 373 | + |
| 374 | + private: |
| 375 | + static inline uint32_t GetTID() { |
| 376 | + // NOLINTBEGIN(abseil-no-internal-dependencies) |
| 377 | + // Abseil's cached version is 100 times faster. |
| 378 | + // Since `pid_t` is not used in all architectures, use uint32_t. |
| 379 | + return static_cast<uint32_t>(absl::base_internal::GetCachedTID()); |
| 380 | + // NOLINTEND(abseil-no-internal-dependencies) |
| 381 | + } |
| 382 | + |
| 383 | + absl::Mutex mutex_; |
| 384 | + std::atomic<uint32_t> owner_{0}; |
| 385 | + // Accessed only by the thread indicated by owner_, so atomicity is |
| 386 | + // unnecessary. |
| 387 | + int recursion_depth_ = 0; |
| 388 | +}; |
| 389 | + |
310 | 390 | } // namespace mozc |
311 | 391 |
|
312 | 392 | #endif // MOZC_BASE_THREAD_H_ |
0 commit comments