summaryrefslogtreecommitdiffstats
path: root/arch/ppc/lib/locks.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1998-05-07 02:55:41 +0000
committerRalf Baechle <ralf@linux-mips.org>1998-05-07 02:55:41 +0000
commitdcec8a13bf565e47942a1751a9cec21bec5648fe (patch)
tree548b69625b18cc2e88c3e68d0923be546c9ebb03 /arch/ppc/lib/locks.c
parent2e0f55e79c49509b7ff70ff1a10e1e9e90a3dfd4 (diff)
o Merge with Linux 2.1.99.
o Fix ancient bug in the ELF loader making ldd crash. o Fix ancient bug in the keyboard code for SGI, SNI and Jazz.
Diffstat (limited to 'arch/ppc/lib/locks.c')
-rw-r--r--arch/ppc/lib/locks.c160
1 files changed, 109 insertions, 51 deletions
diff --git a/arch/ppc/lib/locks.c b/arch/ppc/lib/locks.c
index 5e2ceb889..55cc665d7 100644
--- a/arch/ppc/lib/locks.c
+++ b/arch/ppc/lib/locks.c
@@ -1,5 +1,5 @@
/*
- * $Id: locks.c,v 1.7 1998/01/06 06:44:59 cort Exp $
+ * $Id: locks.c,v 1.17 1998/03/26 22:19:38 cort Exp $
*
* Locks for smp ppc
*
@@ -9,53 +9,78 @@
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/delay.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/spinlock.h>
+#include <asm/io.h>
#define DEBUG_LOCKS 1
#undef INIT_STUCK
-#define INIT_STUCK 10000
-
-#undef STUCK
-#define STUCK \
-if(!--stuck) { printk("spin_lock(%p) CPU#%d nip %08lx\n", lock, cpu, nip); stuck = INIT_STUCK; }
+#define INIT_STUCK 1000000
void _spin_lock(spinlock_t *lock)
{
- unsigned long val, nip = (unsigned long)__builtin_return_address(0);
int cpu = smp_processor_id();
+#ifdef DEBUG_LOCKS
int stuck = INIT_STUCK;
-
-again:
+#endif /* DEBUG_LOCKS */
/* try expensive atomic load/store to get lock */
- __asm__ __volatile__(
- "10: \n\t"
- "lwarx %0,0,%1 \n\t"
- "stwcx. %2,0,%1 \n\t"
- "bne- 10b \n\t"
- : "=r" (val)
- : "r" (&(lock->lock)), "r" ( (cpu&3)|(nip&~3L) ));
- if(val) {
+ while((unsigned long )xchg_u32((void *)&lock->lock,0xffffffff)) {
/* try cheap load until it's free */
while(lock->lock) {
- STUCK;
+#ifdef DEBUG_LOCKS
+ if(!--stuck)
+ {
+ printk("_spin_lock(%p) CPU#%d NIP %p"
+ " holder: cpu %ld pc %08lX\n",
+ lock, cpu, __builtin_return_address(0),
+ lock->owner_cpu,lock->owner_pc);
+ stuck = INIT_STUCK;
+ /* steal the lock */
+ /*xchg_u32((void *)&lock->lock,0);*/
+ }
+#endif /* DEBUG_LOCKS */
barrier();
}
- goto again;
}
+ lock->owner_pc = (unsigned long)__builtin_return_address(0);
+ lock->owner_cpu = cpu;
+}
+
+int spin_trylock(spinlock_t *lock)
+{
+ unsigned long result;
+
+ result = (unsigned long )xchg_u32((void *)&lock->lock,0xffffffff);
+ if ( !result )
+ {
+ lock->owner_cpu = smp_processor_id();
+ lock->owner_pc = (unsigned long)__builtin_return_address(0);
+ }
+ return (result == 0);
}
+
+
void _spin_unlock(spinlock_t *lp)
{
+#ifdef DEBUG_LOCKS
+ if ( !lp->lock )
+ panic("_spin_unlock(%p): no lock cpu %d %s/%d\n", lp,
+ smp_processor_id(),current->comm,current->pid);
+ if ( lp->owner_cpu != smp_processor_id() )
+ panic("_spin_unlock(%p): cpu %d trying clear of cpu %d pc %lx val %lx\n",
+ lp, smp_processor_id(), (int)lp->owner_cpu,
+ lp->owner_pc,lp->lock);
+#endif /* DEBUG_LOCKS */
+ lp->owner_pc = lp->owner_cpu = 0;
+ eieio();
lp->lock = 0;
+ eieio();
}
-#undef STUCK
-#define STUCK \
-if(!--stuck) { printk("_read_lock(%p) CPU#%d\n", rw, cpu); stuck = INIT_STUCK; }
-
/*
* Just like x86, implement read-write locks as a 32-bit counter
* with the high bit (sign) being the "write" bit.
@@ -63,8 +88,10 @@ if(!--stuck) { printk("_read_lock(%p) CPU#%d\n", rw, cpu); stuck = INIT_STUCK; }
*/
void _read_lock(rwlock_t *rw)
{
+#ifdef DEBUG_LOCKS
unsigned long stuck = INIT_STUCK;
int cpu = smp_processor_id();
+#endif /* DEBUG_LOCKS */
again:
/* get our read lock in there */
@@ -76,7 +103,13 @@ again:
/* wait for the write lock to go away */
while ((signed long)((rw)->lock) < 0)
{
- STUCK;
+#ifdef DEBUG_LOCKS
+ if(!--stuck)
+ {
+ printk("_read_lock(%p) CPU#%d\n", rw, cpu);
+ stuck = INIT_STUCK;
+ }
+#endif /* DEBUG_LOCKS */
}
/* try to get the read lock again */
goto again;
@@ -87,33 +120,34 @@ void _read_unlock(rwlock_t *rw)
{
#ifdef DEBUG_LOCKS
if ( rw->lock == 0 )
- {
- if ( current)
printk("_read_unlock(): %s/%d (nip %08lX) lock %lx",
- current->comm,current->pid,current->tss.regs->nip,
+ current->comm,current->pid,current->tss.regs->nip,
rw->lock);
- else
- printk("no current\n");
- }
#endif /* DEBUG_LOCKS */
atomic_dec((atomic_t *) &(rw)->lock);
}
-#undef STUCK
-#define STUCK \
-if(!--stuck) { printk("write_lock(%p) CPU#%d lock %lx)\n", rw, cpu,rw->lock); stuck = INIT_STUCK; }
-
void _write_lock(rwlock_t *rw)
{
+#ifdef DEBUG_LOCKS
unsigned long stuck = INIT_STUCK;
int cpu = smp_processor_id();
+#endif /* DEBUG_LOCKS */
again:
if ( test_and_set_bit(31,&(rw)->lock) ) /* someone has a write lock */
{
while ( (rw)->lock & (1<<31) ) /* wait for write lock */
{
- STUCK;
+#ifdef DEBUG_LOCKS
+ if(!--stuck)
+ {
+ printk("write_lock(%p) CPU#%d lock %lx)\n",
+ rw, cpu,rw->lock);
+ stuck = INIT_STUCK;
+ }
+#endif /* DEBUG_LOCKS */
+ barrier();
}
goto again;
}
@@ -124,7 +158,15 @@ again:
clear_bit(31,&(rw)->lock);
while ( (rw)->lock & ~(1<<31) )
{
- STUCK;
+#ifdef DEBUG_LOCKS
+ if(!--stuck)
+ {
+ printk("write_lock(%p) 2 CPU#%d lock %lx)\n",
+ rw, cpu,rw->lock);
+ stuck = INIT_STUCK;
+ }
+#endif /* DEBUG_LOCKS */
+ barrier();
}
goto again;
}
@@ -134,14 +176,9 @@ void _write_unlock(rwlock_t *rw)
{
#ifdef DEBUG_LOCKS
if ( !(rw->lock & (1<<31)) )
- {
- if ( current)
printk("_write_lock(): %s/%d (nip %08lX) lock %lx",
current->comm,current->pid,current->tss.regs->nip,
rw->lock);
- else
- printk("no current\n");
- }
#endif /* DEBUG_LOCKS */
clear_bit(31,&(rw)->lock);
}
@@ -149,6 +186,8 @@ void _write_unlock(rwlock_t *rw)
void __lock_kernel(struct task_struct *task)
{
#ifdef DEBUG_LOCKS
+ unsigned long stuck = INIT_STUCK;
+
if ( (signed long)(task->lock_depth) < 0 )
{
printk("__lock_kernel(): %s/%d (nip %08lX) lock depth %x\n",
@@ -156,20 +195,40 @@ void __lock_kernel(struct task_struct *task)
task->lock_depth);
}
#endif /* DEBUG_LOCKS */
+
+ if ( atomic_inc_return((atomic_t *) &task->lock_depth) != 1 )
+ return;
/* mine! */
- if ( atomic_inc_return((atomic_t *) &task->lock_depth) == 1 )
- klock_info.akp = smp_processor_id();
+ while ( xchg_u32( (void *)&klock_info.kernel_flag, KLOCK_HELD) )
+ {
+ /* try cheap load until it's free */
+ while(klock_info.kernel_flag) {
+#ifdef DEBUG_LOCKS
+ if(!--stuck)
+ {
+ printk("_lock_kernel() CPU#%d NIP %p\n",
+ smp_processor_id(),
+ __builtin_return_address(0));
+ stuck = INIT_STUCK;
+ }
+#endif /* DEBUG_LOCKS */
+ barrier();
+ }
+ }
+
+ klock_info.akp = smp_processor_id();
/* my kernel mode! mine!!! */
}
-
+
void __unlock_kernel(struct task_struct *task)
{
#ifdef DEBUG_LOCKS
- if ( task->lock_depth == 0 )
+ if ( (task->lock_depth == 0) || (klock_info.kernel_flag != KLOCK_HELD) )
{
- printk("__unlock_kernel(): %s/%d (nip %08lX) lock depth %x\n",
- task->comm,task->pid,task->tss.regs->nip,
- task->lock_depth);
+ printk("__unlock_kernel(): %s/%d (nip %08lX) "
+ "lock depth %x flags %lx\n",
+ task->comm,task->pid,task->tss.regs->nip,
+ task->lock_depth, klock_info.kernel_flag);
klock_info.akp = NO_PROC_ID;
klock_info.kernel_flag = 0;
return;
@@ -177,8 +236,8 @@ void __unlock_kernel(struct task_struct *task)
#endif /* DEBUG_LOCKS */
if ( atomic_dec_and_test((atomic_t *) &task->lock_depth) )
{
- klock_info.akp = NO_PROC_ID;
- klock_info.kernel_flag = 0;
+ klock_info.akp = NO_PROC_ID;
+ klock_info.kernel_flag = KLOCK_CLEAR;
}
}
@@ -192,4 +251,3 @@ void reacquire_kernel_lock(struct task_struct *task, int cpu,int depth)
__sti();
}
}
-