/* $Id: semaphore-helper.h,v 1.3 1999/06/11 14:30:15 ralf Exp $ * * SMP- and interrupt-safe semaphores helper functions. * * (C) Copyright 1996 Linus Torvalds * (C) Copyright 1999 Andrea Arcangeli * (C) Copyright 1999 Ralf Baechle */ #ifndef __ASM_MIPS_SEMAPHORE_HELPER_H #define __ASM_MIPS_SEMAPHORE_HELPER_H #include /* * These two _must_ execute atomically wrt each other. */ static inline void wake_one_more(struct semaphore * sem) { atomic_inc(&sem->waking); } #if !defined(CONFIG_CPU_HAS_LLSC) /* * It doesn't make sense, IMHO, to endlessly turn interrupts off and on again. * Do it once and that's it. ll/sc *has* it's advantages. HK */ #define read(a) ((a)->counter) #define inc(a) (((a)->counter)++) #define dec(a) (((a)->counter)--) static inline int waking_non_zero(struct semaphore *sem) { unsigned long flags; int ret = 0; save_and_cli(flags); if (read(&sem->waking) > 0) { dec(&sem->waking); ret = 1; } restore_flags(flags); return ret; } static inline int waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk) { int ret = 0; unsigned long flags; save_and_cli(flags); if (read(&sem->waking) > 0) { dec(&sem->waking); ret = 1; } else if (signal_pending(tsk)) { inc(&sem->count); ret = -EINTR; } restore_flags(flags); return ret; } static inline int waking_non_zero_trylock(struct semaphore *sem) { int ret = 1; unsigned long flags; save_and_cli(flags); if (read(&sem->waking) <= 0) inc(&sem->count); else { dec(&sem->waking); ret = 0; } restore_flags(flags); return ret; } #else static inline int waking_non_zero(struct semaphore *sem) { int ret, tmp; __asm__ __volatile__( "1:\tll\t%1,%2\n\t" "blez\t%1,2f\n\t" "subu\t%0,%1,1\n\t" "sc\t%0,%2\n\t" "beqz\t%0,1b\n\t" "2:" ".text" : "=r"(ret), "=r"(tmp), "=m"(__atomic_fool_gcc(&sem->waking)) : "0"(0)); return ret; } /* * waking_non_zero_interruptible: * 1 got the lock * 0 go to sleep * -EINTR interrupted * * We must undo the sem->count down_interruptible decrement * simultaneously and atomicly with the sem->waking adjustment, * otherwise we can race with wake_one_more. * * This is accomplished by doing a 64-bit ll/sc on the 2 32-bit words. * * This is crazy. Normally it stricly forbidden to use 64-bit operation * in the 32-bit MIPS kernel. In this case it's however ok because if an * interrupt has destroyed the upper half of registers sc will fail. */ static inline int waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk) { long ret, tmp; #ifdef __MIPSEB__ __asm__ __volatile__(" .set mips3 .set push .set noat 0: lld %1,%2 li %0,0 bltz %1, 1f dli $1, 0xffffffff00000000 daddu %1, $1 li %0, 1 b 2f 1: beqz %3, 1f addiu $1, %1, 1 dsll32 $1, $1, 0 dsrl32 $1, $1, 0 dsrl32 %1, %1, 0 dsll32 %1, %1, 0 or %1, $1 li %0, %4 b 2f 1: scd %1, %2 2: beqz %1,0b .set pop .set mips0" : "=&r"(ret), "=&r"(tmp), "=m"(*sem) : "r"(signal_pending(tsk)), "i"(-EINTR)); #endif #ifdef __MIPSEL__ __asm__ __volatile__(" .set mips3 .set push .set noat 0: lld %1,%2 li %0,0 bltz %1, 1f dli $1, 0xffffffff00000000 daddu %1, $1 li %0, 1 b 2f 1: beqz %3, 1f addiu $1, %1, 1 dsrl32 %1, %1, 0 dsll32 %1, %1, 0 or %1, $1 li %0, %4 b 2f 1: scd %1, %2 2: beqz %1,0b .set pop .set mips0" : "=&r"(ret), "=&r"(tmp), "=m"(*sem) : "r"(signal_pending(tsk)), "i"(-EINTR)); #endif return ret; } /* * waking_non_zero_trylock: * 1 failed to lock * 0 got the lock * * XXX SMP ALERT */ #ifdef __SMP__ #error FIXME, waking_non_zero_trylock is broken for SMP. #endif static inline int waking_non_zero_trylock(struct semaphore *sem) { int ret = 1; if (atomic_read(&sem->waking) <= 0) atomic_inc(&sem->count); else { atomic_dec(&sem->waking); ret = 0; } return ret; } #endif #endif /* __ASM_MIPS_SEMAPHORE_HELPER_H */