summaryrefslogtreecommitdiffstats
path: root/arch/ppc/kernel/bitops.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-07-15 03:32:22 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-07-15 03:32:22 +0000
commitf1da2c3860e301527d56a1ef0b56c649ee7c4b1b (patch)
tree562b5d2e8b9cb62eb983d78ff6bcf9789e08fcf6 /arch/ppc/kernel/bitops.c
parent00f11569ac8ca73cbcdef8822de1583e79aee571 (diff)
Merge with Linux 2.4.0-test5-pre1. This works again on Origin UP.
The IP22 cache bugs which are plaguing some machines are still unfixed.
Diffstat (limited to 'arch/ppc/kernel/bitops.c')
-rw-r--r--arch/ppc/kernel/bitops.c197
1 files changed, 58 insertions, 139 deletions
diff --git a/arch/ppc/kernel/bitops.c b/arch/ppc/kernel/bitops.c
index fb5a19e3a..69e07057a 100644
--- a/arch/ppc/kernel/bitops.c
+++ b/arch/ppc/kernel/bitops.c
@@ -6,60 +6,58 @@
#include <asm/bitops.h>
/*
- * I left these here since the problems with "cc" make it difficult to keep
- * them in bitops.h -- Cort
+ * If the bitops are not inlined in bitops.h, they are defined here.
+ * -- paulus
*/
-void set_bit(int nr, volatile void *addr)
+#if !__INLINE_BITOPS
+void set_bit(int nr, volatile void * addr)
{
- unsigned int t;
- unsigned int mask = 1 << (nr & 0x1f);
- volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
-
- if ((unsigned long)addr & 3)
- printk(KERN_ERR "set_bit(%x, %p)\n", nr, addr);
- __asm__ __volatile__("\n\
-1: lwarx %0,0,%2
- or %0,%0,%1
- stwcx. %0,0,%2
+ unsigned long old;
+ unsigned long mask = 1 << (nr & 0x1f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
+
+ __asm__ __volatile__(SMP_WMB "\
+1: lwarx %0,0,%3
+ or %0,%0,%2
+ stwcx. %0,0,%3
bne 1b"
- : "=&r" (t) /*, "=m" (*p)*/
- : "r" (mask), "r" (p)
- : "cc");
+ SMP_MB
+ : "=&r" (old), "=m" (*p)
+ : "r" (mask), "r" (p), "m" (*p)
+ : "cc" );
}
void clear_bit(int nr, volatile void *addr)
{
- unsigned int t;
- unsigned int mask = 1 << (nr & 0x1f);
- volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
+ unsigned long old;
+ unsigned long mask = 1 << (nr & 0x1f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
- if ((unsigned long)addr & 3)
- printk(KERN_ERR "clear_bit(%x, %p)\n", nr, addr);
- __asm__ __volatile__("\n\
-1: lwarx %0,0,%2
- andc %0,%0,%1
- stwcx. %0,0,%2
+ __asm__ __volatile__(SMP_WMB "\
+1: lwarx %0,0,%3
+ andc %0,%0,%2
+ stwcx. %0,0,%3
bne 1b"
- : "=&r" (t) /*, "=m" (*p)*/
- : "r" (mask), "r" (p)
+ SMP_MB
+ : "=&r" (old), "=m" (*p)
+ : "r" (mask), "r" (p), "m" (*p)
: "cc");
}
void change_bit(int nr, volatile void *addr)
{
- unsigned int t;
- unsigned int mask = 1 << (nr & 0x1f);
- volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
+ unsigned long old;
+ unsigned long mask = 1 << (nr & 0x1f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
- if ((unsigned long)addr & 3)
- printk(KERN_ERR "change_bit(%x, %p)\n", nr, addr);
- __asm__ __volatile__("\n\
-1: lwarx %0,0,%2
- xor %0,%0,%1
- stwcx. %0,0,%2
+ __asm__ __volatile__(SMP_WMB "\
+1: lwarx %0,0,%3
+ xor %0,%0,%2
+ stwcx. %0,0,%3
bne 1b"
- : "=&r" (t) /*, "=m" (*p)*/
- : "r" (mask), "r" (p)
+ SMP_MB
+ : "=&r" (old), "=m" (*p)
+ : "r" (mask), "r" (p), "m" (*p)
: "cc");
}
@@ -69,15 +67,14 @@ int test_and_set_bit(int nr, volatile void *addr)
unsigned int mask = 1 << (nr & 0x1f);
volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
- if ((unsigned long)addr & 3)
- printk(KERN_ERR "test_and_set_bit(%x, %p)\n", nr, addr);
- __asm__ __volatile__("\n\
-1: lwarx %0,0,%3
- or %1,%0,%2
- stwcx. %1,0,%3
+ __asm__ __volatile__(SMP_WMB "\
+1: lwarx %0,0,%4
+ or %1,%0,%3
+ stwcx. %1,0,%4
bne 1b"
- : "=&r" (old), "=&r" (t) /*, "=m" (*p)*/
- : "r" (mask), "r" (p)
+ SMP_MB
+ : "=&r" (old), "=&r" (t), "=m" (*p)
+ : "r" (mask), "r" (p), "m" (*p)
: "cc");
return (old & mask) != 0;
@@ -89,15 +86,14 @@ int test_and_clear_bit(int nr, volatile void *addr)
unsigned int mask = 1 << (nr & 0x1f);
volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
- if ((unsigned long)addr & 3)
- printk(KERN_ERR "test_and_clear_bit(%x, %p)\n", nr, addr);
- __asm__ __volatile__("\n\
-1: lwarx %0,0,%3
- andc %1,%0,%2
- stwcx. %1,0,%3
+ __asm__ __volatile__(SMP_WMB "\
+1: lwarx %0,0,%4
+ andc %1,%0,%3
+ stwcx. %1,0,%4
bne 1b"
- : "=&r" (old), "=&r" (t) /*, "=m" (*p)*/
- : "r" (mask), "r" (p)
+ SMP_MB
+ : "=&r" (old), "=&r" (t), "=m" (*p)
+ : "r" (mask), "r" (p), "m" (*p)
: "cc");
return (old & mask) != 0;
@@ -109,93 +105,16 @@ int test_and_change_bit(int nr, volatile void *addr)
unsigned int mask = 1 << (nr & 0x1f);
volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
- if ((unsigned long)addr & 3)
- printk(KERN_ERR "test_and_change_bit(%x, %p)\n", nr, addr);
- __asm__ __volatile__("\n\
-1: lwarx %0,0,%3
- xor %1,%0,%2
- stwcx. %1,0,%3
+ __asm__ __volatile__(SMP_WMB "\
+1: lwarx %0,0,%4
+ xor %1,%0,%3
+ stwcx. %1,0,%4
bne 1b"
- : "=&r" (old), "=&r" (t) /*, "=m" (*p)*/
- : "r" (mask), "r" (p)
+ SMP_MB
+ : "=&r" (old), "=&r" (t), "=m" (*p)
+ : "r" (mask), "r" (p), "m" (*p)
: "cc");
return (old & mask) != 0;
}
-
-/* I put it in bitops.h -- Cort */
-#if 0
-int ffz(unsigned int x)
-{
- int n;
-
- x = ~x & (x+1); /* set LS zero to 1, other bits to 0 */
- __asm__ ("cntlzw %0,%1" : "=r" (n) : "r" (x));
- return 31 - n;
-}
-
-/*
- * This implementation of find_{first,next}_zero_bit was stolen from
- * Linus' asm-alpha/bitops.h.
- */
-
-int find_first_zero_bit(void * addr, int size)
-{
- unsigned int * p = ((unsigned int *) addr);
- unsigned int result = 0;
- unsigned int tmp;
-
- if (size == 0)
- return 0;
- while (size & ~31UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
- tmp |= ~0UL << size;
-found_middle:
- return result + ffz(tmp);
-}
-
-/*
- * Find next zero bit in a bitmap reasonably efficiently..
- */
-int find_next_zero_bit(void * addr, int size, int offset)
-{
- unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *(p++);
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
- goto found_first;
- if (~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size & ~31UL) {
- if (~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-found_first:
- tmp |= ~0UL << size;
-found_middle:
- return result + ffz(tmp);
-}
-#endif
+#endif /* !__INLINE_BITOPS */