一、前言 #
bit_spinlock是内核里面比较原始的一种自旋锁的实现,只有0和1两种状态。使用原子操作TAS(test and set)来抢锁,线程之间无序抢锁,并不公平。
二、使用实例 #
1void test_hash(void) {
2 unsigned long lock = 0;
3 printk("lock: %lu, lock ptr: %pK\n", lock, &lock); // lock: 0, lock ptr: 0xffffaaabbb
4
5 // 使用变量第一位作为锁,锁了之后lock的值的第一位被设置为1
6 bit_spin_lock(0, &lock);
7 // 尝试申请锁,不阻塞,申请到返回true,否则返回false
8 t = bit_spin_trylock(0, &lock);
9 printk("entry: %lu, %pK, try %d\n", lock, &lock, t); // lock: 1, lock ptr: 0xffffaaabbb, 0
10
11 // 解锁
12 bit_spin_unlock(0, &lock);
13 t = bit_spin_trylock(0, &lock);
14 printk("entry: %lu, %pK, try %d\n", lock, &lock, t); // lock: 1, lock ptr: 0xffffaaabbb, 0
15
16 return;
17}
三、具体实现 #
1. bit_spin_lock #
就是最简单的test_and_set,将addr作为原子变量的指针操作
1// include/linux/bit_spinlock.h
2/*
3 * bit-based spin_lock()
4 *
5 * Don't use this unless you really need to: spin_lock() and spin_unlock()
6 * are significantly faster.
7 */
8static inline void bit_spin_lock(int bitnum, unsigned long *addr)
9{
10 /*
11 * Assuming the lock is uncontended, this never enters
12 * the body of the outer loop. If it is contended, then
13 * within the inner loop a non-atomic test is used to
14 * busywait with less bus contention for a good time to
15 * attempt to acquire the lock bit.
16 */
17 preempt_disable();
18#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
19 while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
20 preempt_enable();
21 do {
22 cpu_relax();
23 } while (test_bit(bitnum, addr));
24 preempt_disable();
25 }
26#endif
27 __acquire(bitlock);
28}
29
30// include/asm-generic/bitops/lock.h
31/**
32 * test_and_set_bit_lock - Set a bit and return its old value, for lock
33 * @nr: Bit to set
34 * @addr: Address to count from
35 *
36 * This operation is atomic and provides acquire barrier semantics if
37 * the returned value is 0.
38 * It can be used to implement bit locks.
39 */
40static inline int test_and_set_bit_lock(unsigned int nr,
41 volatile unsigned long *p)
42{
43 long old;
44 unsigned long mask = BIT_MASK(nr);
45
46 p += BIT_WORD(nr);
47 if (READ_ONCE(*p) & mask)
48 return 1;
49
50 old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
51 return !!(old & mask);
52}
2. bit_spin_trylock #
1// include/linux/bit_spinlock.h
2/*
3 * Return true if it was acquired
4 */
5static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
6{
7 preempt_disable();
8#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
9 if (unlikely(test_and_set_bit_lock(bitnum, addr))) {
10 preempt_enable();
11 return 0;
12 }
13#endif
14 __acquire(bitlock);
15 return 1;
16}
3. bit_spin_unlock #
1// include/linux/bit_spinlock.h
2/*
3 * bit-based spin_unlock()
4 */
5static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
6{
7#ifdef CONFIG_DEBUG_SPINLOCK
8 BUG_ON(!test_bit(bitnum, addr));
9#endif
10#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
11 clear_bit_unlock(bitnum, addr);
12#endif
13 preempt_enable();
14 __release(bitlock);
15}
4. bit_spin_is_locked #
1// include/linux/bit_spinlock.h
2/*
3 * Return true if the lock is held.
4 */
5static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
6{
7#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
8 return test_bit(bitnum, addr);
9#elif defined CONFIG_PREEMPT_COUNT
10 return preempt_count();
11#else
12 return 1;
13#endif
14}