diff --git a/lock_api/src/mutex.rs b/lock_api/src/mutex.rs index 7100f0e9..782c0673 100644 --- a/lock_api/src/mutex.rs +++ b/lock_api/src/mutex.rs @@ -79,14 +79,23 @@ pub unsafe trait RawMutex { /// unlocking, but may be necessary in certain circumstances. pub unsafe trait RawMutexFair: RawMutex { /// Unlocks this mutex using a fair unlock protocol. - fn unlock_fair(&self); + /// + /// # Safety + /// + /// This method may only be called if the mutex is held in the current context, see + /// the documentation of [`unlock`]. + /// + /// [`unlock`]: trait.RawMutex.html#tymethod.unlock + unsafe fn unlock_fair(&self); /// Temporarily yields the mutex to a waiting thread if there is one. /// /// This method is functionally equivalent to calling `unlock_fair` followed /// by `lock`, however it can be much more efficient in the case where there /// are no waiting threads. - fn bump(&self) { + /// + /// [`unlock`]: trait.RawMutex.html#tymethod.unlock + unsafe fn bump(&self) { self.unlock_fair(); self.lock(); } @@ -473,7 +482,10 @@ impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> { /// using this method instead of dropping the `MutexGuard` normally. #[inline] pub fn unlock_fair(s: Self) { - s.mutex.raw.unlock_fair(); + // Safety: A MutexGuard always holds the lock. + unsafe { + s.mutex.raw.unlock_fair(); + } mem::forget(s); } @@ -488,7 +500,10 @@ impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> { where F: FnOnce() -> U, { - s.mutex.raw.unlock_fair(); + // Safety: A MutexGuard always holds the lock. + unsafe { + s.mutex.raw.unlock_fair(); + } defer!(s.mutex.raw.lock()); f() } @@ -500,7 +515,10 @@ impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> { /// are no waiting threads. #[inline] pub fn bump(s: &mut Self) { - s.mutex.raw.bump(); + // Safety: A MutexGuard always holds the lock. + unsafe { + s.mutex.raw.bump(); + } } } @@ -634,7 +652,10 @@ impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> { /// using this method instead of dropping the `MutexGuard` normally. #[inline] pub fn unlock_fair(s: Self) { - s.raw.unlock_fair(); + // Safety: A MutexGuard always holds the lock. + unsafe { + s.raw.unlock_fair(); + } mem::forget(s); } } diff --git a/lock_api/src/remutex.rs b/lock_api/src/remutex.rs index c132b814..79b8f424 100644 --- a/lock_api/src/remutex.rs +++ b/lock_api/src/remutex.rs @@ -141,8 +141,12 @@ impl RawReentrantMutex { /// Unlocks this mutex using a fair unlock protocol. The inner mutex /// may not be unlocked if this mutex was acquired previously in the /// current thread. + /// + /// # Safety + /// + /// This method may only be called if the mutex is held by the current thread. #[inline] - pub fn unlock_fair(&self) { + pub unsafe fn unlock_fair(&self) { let lock_count = self.lock_count.get() - 1; self.lock_count.set(lock_count); if lock_count == 0 { @@ -156,8 +160,10 @@ impl RawReentrantMutex { /// This method is functionally equivalent to calling `unlock_fair` followed /// by `lock`, however it can be much more efficient in the case where there /// are no waiting threads. + /// + /// This method may only be called if the mutex is held by the current thread. #[inline] - pub fn bump(&self) { + pub unsafe fn bump(&self) { if self.lock_count.get() == 1 { let id = self.owner.load(Ordering::Relaxed); self.owner.store(0, Ordering::Relaxed); @@ -584,7 +590,10 @@ impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> /// using this method instead of dropping the `ReentrantMutexGuard` normally. #[inline] pub fn unlock_fair(s: Self) { - s.remutex.raw.unlock_fair(); + // Safety: A ReentrantMutexGuard always holds the lock + unsafe { + s.remutex.raw.unlock_fair(); + } mem::forget(s); } @@ -599,7 +608,10 @@ impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> where F: FnOnce() -> U, { - s.remutex.raw.unlock_fair(); + // Safety: A ReentrantMutexGuard always holds the lock + unsafe { + s.remutex.raw.unlock_fair(); + } defer!(s.remutex.raw.lock()); f() } @@ -611,7 +623,10 @@ impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> /// are no waiting threads. #[inline] pub fn bump(s: &mut Self) { - s.remutex.raw.bump(); + // Safety: A ReentrantMutexGuard always holds the lock + unsafe { + s.remutex.raw.bump(); + } } } @@ -752,7 +767,10 @@ impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> /// using this method instead of dropping the `ReentrantMutexGuard` normally. #[inline] pub fn unlock_fair(s: Self) { - s.raw.unlock_fair(); + // Safety: A MappedReentrantMutexGuard always holds the lock + unsafe { + s.raw.unlock_fair(); + } mem::forget(s); } } diff --git a/src/raw_fair_mutex.rs b/src/raw_fair_mutex.rs index 0ca2a439..0da6828e 100644 --- a/src/raw_fair_mutex.rs +++ b/src/raw_fair_mutex.rs @@ -39,12 +39,12 @@ unsafe impl lock_api::RawMutex for RawFairMutex { unsafe impl lock_api::RawMutexFair for RawFairMutex { #[inline] - fn unlock_fair(&self) { + unsafe fn unlock_fair(&self) { self.0.unlock_fair() } #[inline] - fn bump(&self) { + unsafe fn bump(&self) { self.0.bump() } } diff --git a/src/raw_mutex.rs b/src/raw_mutex.rs index a8b08f41..06667d32 100644 --- a/src/raw_mutex.rs +++ b/src/raw_mutex.rs @@ -118,8 +118,8 @@ unsafe impl lock_api::RawMutex for RawMutex { unsafe impl lock_api::RawMutexFair for RawMutex { #[inline] - fn unlock_fair(&self) { - unsafe { deadlock::release_resource(self as *const _ as usize) }; + unsafe fn unlock_fair(&self) { + deadlock::release_resource(self as *const _ as usize); if self .state .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) @@ -131,7 +131,7 @@ unsafe impl lock_api::RawMutexFair for RawMutex { } #[inline] - fn bump(&self) { + unsafe fn bump(&self) { if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 { self.bump_slow(); }