以下為 Spin lock 不同的實作方式:
1. Uni-processor 且為 Non-preemptive 的系統 (CONFIG_SMP 及 CONFIG_PREEMPT 均未定義)
Spin lock 完全無作用——因為在這種系統中,共用資源在同一時間不可能會被兩個以上的行程所存取,故不需要實作出 lock 的保護功能。
typedef struct { } spinlock_t; /* 內容為空的結構 */
#define spin_lock(lock) _spin_lock(lock)
#define spin_unlock(lock) _spin_unlock(lock)
#define _spin_lock(lock) \
do { \
preempt_disable(); \
_raw_spin_lock(lock); \
__acquire(lock); \
} while(0)
#define _spin_unlock(lock) \
do { \
_raw_spin_unlock(lock); \
preempt_enable(); \
__release(lock); \
} while (0)
#define _raw_spin_lock(lock) do { (void)(lock); } while(0)
#define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
定義於 include/linux/spinlock.h
#define preempt_disable() do { } while (0)
#define preempt_enable() do { } while (0)
定義於 include/linux/preempt.h
#define __acquire(x) (void)0
#define __release(x) (void)0
定義於 include/linux/compiler.h
2. Uni-processor 且為 Preemptive 的系統 (定義 CONFIG_PREEMPT 且 CONFIG_SMP 未定義)
Spin lock 實作成開、關 Kernel preemption 的功能,目的也是讓共用資源在同一時間不會被兩個以上的行程所存取。
typedef struct { } spinlock_t; /* 內容為空的結構 */
#define spin_lock(lock) _spin_lock(lock)
#define spin_unlock(lock) _spin_unlock(lock)
#define _spin_lock(lock) \
do { \
preempt_disable(); \
_raw_spin_lock(lock); \
__acquire(lock); \
} while(0)
#define _spin_unlock(lock) \
do { \
_raw_spin_unlock(lock); \
preempt_enable(); \
__release(lock); \
} while (0)
#define _raw_spin_lock(lock) do { (void)(lock); } while(0)
#define _raw_spin_unlock(lock) do { (void)(lock); } while(0)
定義於 include/linux/spinlock.h
#define preempt_disable() \
do { \
inc_preempt_count(); \
barrier(); \
} while (0)
#define preempt_enable() \
do { \
preempt_enable_no_resched(); \
preempt_check_resched(); \
} while (0)
定義於 include/linux/preempt.h
#define __acquire(x) (void)0
#define __release(x) (void)0
定義於 include/linux/compiler.h
3. Multi-processor 且為 Non-preemptive 的系統 (定義 CONFIG_SMP 且 CONFIG_PREEMPT 未定義)
Spin lock 有實作出 "忙碌迴圈" 的功能,用來鎖住想要使用共用資源卻又無法取得 lock 的行程。
typedef struct {
volatile unsigned int slock;
} spinlock_t;
static inline void _raw_spin_lock(spinlock_t *lock)
{
__asm__ __volatile__(
spin_lock_string
:"=m" (lock->slock) : : "memory"
);
}
static inline void _raw_spin_unlock(spinlock_t *lock)
{
char oldval = 1;
__asm__ __volatile__(
spin_unlock_string
);
}
#define spin_lock_string \
"\n1:\t" \
"lock ; decb %0\n\t" \
"jns 3f\n" \
"2:\t" \
"rep;nop\n\t" \
"cmpb $0,%0\n\t" \
"jle 2b\n\t" \
"jmp 1b\n" \
"3:\n\t"
#define spin_unlock_string \
"xchgb %b0, %1" \
:"=q" (oldval), "=m" (lock->slock) \
:"0" (oldval) : "memory"
定義於 include/asm-i386/spinlock.h
#define spin_lock(lock) _spin_lock(lock)
#define spin_unlock(lock) _spin_unlock(lock)
定義於 include/linux/spinlock.h
void __lockfunc _spin_lock(spinlock_t *lock)
{
preempt_disable();
_raw_spin_lock(lock);
}
EXPORT_SYMBOL(_spin_lock);
void __lockfunc _spin_unlock(spinlock_t *lock)
{
_raw_spin_unlock(lock);
preempt_enable();
}
EXPORT_SYMBOL(_spin_unlock);
定義於 kernel/spinlock.c
#define preempt_disable() do { } while (0)
#define preempt_enable() do { } while (0)
定義於 include/linux/preempt.h
4. Multi-processor 且為 Preemptive 的系統 (定義 CONFIG_SMP 及 CONFIG_PREEMPT)
Spin lock 有實作出 "忙碌迴圈" 的功能,而且被鎖住的行程,可以讓其他行程搶走 CPU 的執行權。
typedef struct {
volatile unsigned int slock;
unsigned int break_lock; /* #ifdef CONFIG_PREEMPT */
} spinlock_t;
static inline int _raw_spin_trylock(spinlock_t *lock)
{
char oldval;
__asm__ __volatile__(
"xchgb %b0,%1"
:"=q" (oldval), "=m" (lock->slock)
:"0" (0) : "memory"
);
return oldval > 0;
}
static inline void _raw_spin_unlock(spinlock_t *lock)
{
char oldval = 1;
__asm__ __volatile__(
spin_unlock_string
);
}
#define spin_unlock_string \
"xchgb %b0, %1" \
:"=q" (oldval), "=m" (lock->slock) \
:"0" (oldval) : "memory"
#define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0)
定義於 include/asm-i386/spinlock.h
#define spin_lock(lock) _spin_lock(lock)
#define spin_unlock(lock) _spin_unlock(lock)
#define spin_can_lock(lock) (!spin_is_locked(lock))
定義於 include/linux/spinlock.h
#define BUILD_LOCK_OPS(op, locktype) \
void __lockfunc _##op##_lock(locktype##_t *lock) \
{ \
preempt_disable(); \
for (;;) { \
if (likely(_raw_##op##_trylock(lock))) \
break; \
preempt_enable(); \
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
while (!op##_can_lock(lock) && (lock)->break_lock) \
cpu_relax(); \
preempt_disable(); \
} \
} \
\
EXPORT_SYMBOL(_##op##_lock);
BUILD_LOCK_OPS(spin, spinlock); /* #ifdef CONFIG_PREEMPT */
void __lockfunc _spin_unlock(spinlock_t *lock)
{
_raw_spin_unlock(lock);
preempt_enable();
}
EXPORT_SYMBOL(_spin_unlock);
定義於 kernel/spinlock.c
#define preempt_disable() \
do { \
inc_preempt_count(); \
barrier(); \
} while (0)
#define preempt_enable() \
do { \
preempt_enable_no_resched(); \
preempt_check_resched(); \
} while (0)
定義於 include/linux/preempt.h
2010年2月21日 星期日
訂閱:
張貼留言 (Atom)
沒有留言:
張貼留言