Skip to content

Commit

Permalink
Implement wide sequence lock
Browse files Browse the repository at this point in the history
  • Loading branch information
jeehoonkang committed Oct 16, 2019
1 parent a5859f3 commit eeb315a
Show file tree
Hide file tree
Showing 4 changed files with 213 additions and 88 deletions.
94 changes: 6 additions & 88 deletions crossbeam-utils/src/atomic/atomic_cell.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,12 @@ use core::cell::UnsafeCell;
use core::fmt;
use core::mem;
use core::ptr;
use core::sync::atomic::{self, AtomicBool, AtomicUsize, Ordering};
use core::sync::atomic::{self, AtomicBool, Ordering};

#[cfg(feature = "std")]
use std::panic::{RefUnwindSafe, UnwindSafe};

use Backoff;
use super::seq_lock::SeqLock;

/// A thread-safe mutable memory location.
///
Expand Down Expand Up @@ -631,87 +631,6 @@ fn can_transmute<A, B>() -> bool {
mem::size_of::<A>() == mem::size_of::<B>() && mem::align_of::<A>() >= mem::align_of::<B>()
}

/// A simple stamped lock.
struct Lock {
/// The current state of the lock.
///
/// All bits except the least significant one hold the current stamp. When locked, the state
/// equals 1 and doesn't contain a valid stamp.
state: AtomicUsize,
}

impl Lock {
/// If not locked, returns the current stamp.
///
/// This method should be called before optimistic reads.
#[inline]
fn optimistic_read(&self) -> Option<usize> {
let state = self.state.load(Ordering::Acquire);
if state == 1 {
None
} else {
Some(state)
}
}

/// Returns `true` if the current stamp is equal to `stamp`.
///
/// This method should be called after optimistic reads to check whether they are valid. The
/// argument `stamp` should correspond to the one returned by method `optimistic_read`.
#[inline]
fn validate_read(&self, stamp: usize) -> bool {
atomic::fence(Ordering::Acquire);
self.state.load(Ordering::Relaxed) == stamp
}

/// Grabs the lock for writing.
#[inline]
fn write(&'static self) -> WriteGuard {
let backoff = Backoff::new();
loop {
let previous = self.state.swap(1, Ordering::Acquire);

if previous != 1 {
atomic::fence(Ordering::Release);

return WriteGuard {
lock: self,
state: previous,
};
}

backoff.snooze();
}
}
}

/// A RAII guard that releases the lock and increments the stamp when dropped.
struct WriteGuard {
/// The parent lock.
lock: &'static Lock,

/// The stamp before locking.
state: usize,
}

impl WriteGuard {
/// Releases the lock without incrementing the stamp.
#[inline]
fn abort(self) {
self.lock.state.store(self.state, Ordering::Release);
}
}

impl Drop for WriteGuard {
#[inline]
fn drop(&mut self) {
// Release the lock and increment the stamp.
self.lock
.state
.store(self.state.wrapping_add(2), Ordering::Release);
}
}

/// Returns a reference to the global lock associated with the `AtomicCell` at address `addr`.
///
/// This function is used to protect atomic data which doesn't fit into any of the primitive atomic
Expand All @@ -722,7 +641,7 @@ impl Drop for WriteGuard {
/// scalability.
#[inline]
#[must_use]
fn lock(addr: usize) -> &'static Lock {
fn lock(addr: usize) -> &'static SeqLock {
// The number of locks is a prime number because we want to make sure `addr % LEN` gets
// dispersed across all locks.
//
Expand All @@ -747,10 +666,9 @@ fn lock(addr: usize) -> &'static Lock {
// In order to protect from such cases, we simply choose a large prime number for `LEN`.
const LEN: usize = 97;

const L: Lock = Lock {
state: AtomicUsize::new(0),
};
static LOCKS: [Lock; LEN] = [
const L: SeqLock = SeqLock::new();

static LOCKS: [SeqLock; LEN] = [
L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L,
L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L,
L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L,
Expand Down
11 changes: 11 additions & 0 deletions crossbeam-utils/src/atomic/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,16 @@
//! Atomic types.
cfg_if! {
// TODO(jeehoonkang): want to express `target_pointer_width >= "64"`, but it's not expressible
// in Rust for the time being.
if #[cfg(target_pointer_width = "64")] {
mod seq_lock;
} else {
#[path = "seq_lock_wide.rs"]
mod seq_lock;
}
}

mod atomic_cell;
mod consume;

Expand Down
90 changes: 90 additions & 0 deletions crossbeam-utils/src/atomic/seq_lock.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
use core::sync::atomic::{self, AtomicUsize, Ordering};

use Backoff;

/// A simple stamped lock.
pub struct SeqLock {
/// The current state of the lock.
///
/// All bits except the least significant one hold the current stamp. When locked, the state
/// equals 1 and doesn't contain a valid stamp.
state: AtomicUsize,
}

impl SeqLock {
pub const fn new() -> Self {
Self {
state: AtomicUsize::new(0),
}
}

/// If not locked, returns the current stamp.
///
/// This method should be called before optimistic reads.
#[inline]
pub fn optimistic_read(&self) -> Option<usize> {
let state = self.state.load(Ordering::Acquire);
if state == 1 {
None
} else {
Some(state)
}
}

/// Returns `true` if the current stamp is equal to `stamp`.
///
/// This method should be called after optimistic reads to check whether they are valid. The
/// argument `stamp` should correspond to the one returned by method `optimistic_read`.
#[inline]
pub fn validate_read(&self, stamp: usize) -> bool {
atomic::fence(Ordering::Acquire);
self.state.load(Ordering::Relaxed) == stamp
}

/// Grabs the lock for writing.
#[inline]
pub fn write(&'static self) -> SeqLockWriteGuard {
let backoff = Backoff::new();
loop {
let previous = self.state.swap(1, Ordering::Acquire);

if previous != 1 {
atomic::fence(Ordering::Release);

return SeqLockWriteGuard {
lock: self,
state: previous,
};
}

backoff.snooze();
}
}
}

/// A RAII guard that releases the lock and increments the stamp when dropped.
pub struct SeqLockWriteGuard {
/// The parent lock.
lock: &'static SeqLock,

/// The stamp before locking.
state: usize,
}

impl SeqLockWriteGuard {
/// Releases the lock without incrementing the stamp.
#[inline]
pub fn abort(self) {
self.lock.state.store(self.state, Ordering::Release);
}
}

impl Drop for SeqLockWriteGuard {
#[inline]
fn drop(&mut self) {
// Release the lock and increment the stamp.
self.lock
.state
.store(self.state.wrapping_add(2), Ordering::Release);
}
}
106 changes: 106 additions & 0 deletions crossbeam-utils/src/atomic/seq_lock_wide.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
use core::sync::atomic::{self, AtomicUsize, Ordering};

use Backoff;

/// A simple stamped lock.
///
/// The state is represented as two `AtomicUsize`: `state_hi` for high bits and `state_lo` for low
/// bits.
pub struct SeqLock {
/// The high bits of the current state of the lock.
state_hi: AtomicUsize,

/// The low bits of the current state of the lock.
///
/// All bits except the least significant one hold the current stamp. When locked, the state_lo
/// equals 1 and doesn't contain a valid stamp.
state_lo: AtomicUsize,
}

impl SeqLock {
pub const fn new() -> Self {
Self {
state_hi: AtomicUsize::new(0),
state_lo: AtomicUsize::new(0),
}
}

/// If not locked, returns the current stamp.
///
/// This method should be called before optimistic reads.
#[inline]
pub fn optimistic_read(&self) -> Option<(usize, usize)> {
let state_hi = self.state_hi.load(Ordering::Acquire);
let state_lo = self.state_lo.load(Ordering::Acquire);
if state_lo == 1 {
None
} else {
Some((state_hi, state_lo))
}
}

/// Returns `true` if the current stamp is equal to `stamp`.
///
/// This method should be called after optimistic reads to check whether they are valid. The
/// argument `stamp` should correspond to the one returned by method `optimistic_read`.
#[inline]
pub fn validate_read(&self, stamp: (usize, usize)) -> bool {
atomic::fence(Ordering::Acquire);
let state_lo = self.state_lo.load(Ordering::Acquire);
let state_hi = self.state_hi.load(Ordering::Relaxed);
(state_hi, state_lo) == stamp
}

/// Grabs the lock for writing.
#[inline]
pub fn write(&'static self) -> SeqLockWriteGuard {
let backoff = Backoff::new();
loop {
let previous = self.state_lo.swap(1, Ordering::Acquire);

if previous != 1 {
atomic::fence(Ordering::Release);

return SeqLockWriteGuard {
lock: self,
state_lo: previous,
};
}

backoff.snooze();
}
}
}

/// A RAII guard that releases the lock and increments the stamp when dropped.
pub struct SeqLockWriteGuard {
/// The parent lock.
lock: &'static SeqLock,

/// The stamp before locking.
state_lo: usize,
}

impl SeqLockWriteGuard {
/// Releases the lock without incrementing the stamp.
#[inline]
pub fn abort(self) {
self.lock.state_lo.store(self.state_lo, Ordering::Release);
}
}

impl Drop for SeqLockWriteGuard {
#[inline]
fn drop(&mut self) {
let state_lo = self.state_lo.wrapping_add(2);

// Increase the high bits if the low bits wrap around.
if state_lo == 0 {
let state_hi = self.lock.state_hi.load(Ordering::Relaxed);
self.lock.state_hi.store(state_hi.wrapping_add(1), Ordering::Release);
}

// Release the lock and increment the stamp.
self.lock.state_lo.store(state_lo, Ordering::Release);
}
}

0 comments on commit eeb315a

Please sign in to comment.