Notes on Locks

Spinlock 

  an non-scalable implementation in C++

# if defined __GNUC__
# include <pthread.h> class Spinlock { # if (__GNUC__ >= 4) private: pthread_spinlock_t _spinlock; public: Spinlock() { pthread_spin_init(&_spinlock, PTHREAD_PROCESS_PRIVATE); } ~Spinlock() { pthread_spin_destroy(&_spinlock); } void lock() { pthread_spin_lock(&_spinlock); } void unlock() { pthread_spin_unlock(&_spinlock); } # else private: volatile int _inter_lock; public: Spinlock() : _inter_lock(0) { } void lock() { register int temp; __asm__ __volatile__ ( "1: " " cmp $0, %0 " // cmp(0, _inter_lock) " je 2f " // if _inter_lock == 0, goto 2 " pause " " jmp 1b " // goto 1 "2: " " movl $1, %1 " // temp = 1 " xchg %0, %1 " // exchange(_inter_lock, temp) " test %1, %1 " // test(temp, temp), do AND " jne 1b " // if temp == 1, goto 1 : "=m"(_inter_lock), "=r"(temp) ); // A note on 'pause': // 'pause' gives hint to processor that improves performance // of spin-wait loops } void unlock() { // _inter_lock = 0; __asm__ __volatile__ ( " movl $0, %0 " : "=m"(_inter_lock) ); } # endif // __GNUC__ >= 4 private: // non-copyable Spinlock(const Spinlock&); Spinlock& operator=(const Spinlock&); }; # endif // __GNUC__

Pseudocode for ticket locks in Linux    (The ticket lock is the default lock since kernel version 2.6.25 , released in April 2008)

struct spinlock_t {
    int current_ticket; 
    int next_ticket; 
};

void spin_lock (spinlock_t *lock) { 
    int t = atomic_fetch_and_inc(&lock->next_ticket);
    while (t != lock->current_ticket) 
        ; /* spin */ 
} 

void spin_unlock (spinlock_t *lock) { 
    lock->current_ticket++; 
}

MCS spinlock (scalable, for implementing Linux mutex) 

//  atomic exchange:  
//    *ptr, val = val, *ptr
//    return val
static inline long xchg(long *ptr, long val)
{
    __asm__ volatile(
        "lock; xchgq %0, %1
	"
        : "+m" (*ptr), "+r" (val)
        :
        : "memory", "cc");
    return val;
}

// cmpxchg:
//     if (*ptr == old) {
//         *ptr = new;
//         return old;
//     } else 
//         return *ptr   
static inline long cmpxchg(long *ptr, long old, long val)
{
    uint64_t out;
    __asm__ volatile(
        "lock; cmpxchgq %2, %1"
        : "=a" (out), "+m" (*ptr)
        : "q" (val), "0"(old)
        : "memory");

    return out;
}

struct qnode {
    volatile void *next;
    volatile char locked; // 1 if lock acquired
    char __pad[0] __attribute__((aligned(CACHELINE)));
};


typedef struct {
    // tail of queue of threads holding or waiting the lock
    struct qnode *tail  __attribute__((aligned(64)));
    int lock_idx  __attribute__((aligned(64)));
} mcslock_t;


// initialize main lock
static inline void
mcs_init(mcslock_t *l)
{
    l->tail = NULL;
}

static inline void
mcs_lock(mcslock_t *l, volatile struct qnode *mynode)
{
    struct qnode *predecessor; 

    mynode->next = NULL;
    predecessor = (struct qnode *)xchg((long *)&l->tail, (long)mynode);

    if (predecessor) {
        mynode->locked = 1; // mark the lock as taken
        asm volatile("":::"memory")  // barrier
        predecessor->next = mynode; 
        while (mynode->locked) // busy waiting
            nop_pause();
    }
}

static inline void
mcs_unlock(mcslock_t *l, volatile struct qnode *mynode)
{
    if (!mynode->next) { // if no cores waiting
        // atomic lock-free pop: 
        //     if we are the only node in the queue, reset l->tail and return
        if (cmpxchg((long *)&l->tail, (long)mynode, 0) == (long)mynode)
            return;
        while (!mynode->next)
            nop_pause();
    }
    ((struct qnode *)mynode->next)->locked = 0; // handoff lock to the successor
}

For scalable locks

  See:

          Non-scalable locks are dangerous    

          my notes on scalable spinlocks 

Mutex

        Linux kernel bug: A surprise with mutexes and reference counts

        Russ Cox on debugging an interesting concurrency bug    

   adaptive mutex: 

   

原文地址:https://www.cnblogs.com/william-cheung/p/5878764.html