summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2001-01-10 05:27:25 +0000
committerRalf Baechle <ralf@linux-mips.org>2001-01-10 05:27:25 +0000
commitc9c06167e7933d93a6e396174c68abf242294abb (patch)
treed9a8bb30663e9a3405a1ef37ffb62bc14b9f019f /include/asm-alpha
parentf79e8cc3c34e4192a3e5ef4cc9c6542fdef703c0 (diff)
Merge with Linux 2.4.0-test12.
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/bitops.h153
-rw-r--r--include/asm-alpha/byteorder.h38
-rw-r--r--include/asm-alpha/fpu.h33
-rw-r--r--include/asm-alpha/pgtable.h19
4 files changed, 113 insertions, 130 deletions
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index 649abd02d..78e0f58c3 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -20,31 +20,12 @@
* bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
*/
-#define BITOPS_NO_BRANCH
-
-extern __inline__ void set_bit(unsigned long nr, volatile void * addr)
+extern __inline__ void
+set_bit(unsigned long nr, volatile void * addr)
{
-#ifndef BITOPS_NO_BRANCH
- unsigned long oldbit;
-#endif
unsigned long temp;
- unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
+ int *m = ((int *) addr) + (nr >> 5);
-#ifndef BITOPS_NO_BRANCH
- __asm__ __volatile__(
- "1: ldl_l %0,%4\n"
- " and %0,%3,%2\n"
- " bne %2,2f\n"
- " xor %0,%3,%0\n"
- " stl_c %0,%1\n"
- " beq %0,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
- :"Ir" (1UL << (nr & 31)), "m" (*m));
-#else
__asm__ __volatile__(
"1: ldl_l %0,%3\n"
" bis %0,%2,%0\n"
@@ -55,58 +36,28 @@ extern __inline__ void set_bit(unsigned long nr, volatile void * addr)
".previous"
:"=&r" (temp), "=m" (*m)
:"Ir" (1UL << (nr & 31)), "m" (*m));
-#endif
}
/*
* WARNING: non atomic version.
*/
-extern __inline__ void __set_bit(unsigned long nr, volatile void * addr)
+extern __inline__ void
+__set_bit(unsigned long nr, volatile void * addr)
{
- unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
- /*
- * Asm and C produces the same thing so let
- * the compiler to do its good work.
- */
-#if 0
- int tmp;
+ int *m = ((int *) addr) + (nr >> 5);
- __asm__ __volatile__(
- "ldl %0,%3\n\t"
- "bis %0,%2,%0\n\t"
- "stl %0,%1"
- : "=&r" (tmp), "=m" (*m)
- : "Ir" (1UL << (nr & 31)), "m" (*m));
-#else
*m |= 1UL << (nr & 31);
-#endif
}
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
-extern __inline__ void clear_bit(unsigned long nr, volatile void * addr)
+
+extern __inline__ void
+clear_bit(unsigned long nr, volatile void * addr)
{
-#ifndef BITOPS_NO_BRANCH
- unsigned long oldbit;
-#endif
unsigned long temp;
- unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
+ int *m = ((int *) addr) + (nr >> 5);
-#ifndef BITOPS_NO_BRANCH
- __asm__ __volatile__(
- "1: ldl_l %0,%4\n"
- " and %0,%3,%2\n"
- " beq %2,2f\n"
- " xor %0,%3,%0\n"
- " stl_c %0,%1\n"
- " beq %0,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
- :"Ir" (1UL << (nr & 31)), "m" (*m));
-#else
__asm__ __volatile__(
"1: ldl_l %0,%3\n"
" and %0,%2,%0\n"
@@ -117,13 +68,13 @@ extern __inline__ void clear_bit(unsigned long nr, volatile void * addr)
".previous"
:"=&r" (temp), "=m" (*m)
:"Ir" (~(1UL << (nr & 31))), "m" (*m));
-#endif
}
-extern __inline__ void change_bit(unsigned long nr, volatile void * addr)
+extern __inline__ void
+change_bit(unsigned long nr, volatile void * addr)
{
unsigned long temp;
- unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
+ int *m = ((int *) addr) + (nr >> 5);
__asm__ __volatile__(
"1: ldl_l %0,%3\n"
@@ -137,12 +88,12 @@ extern __inline__ void change_bit(unsigned long nr, volatile void * addr)
:"Ir" (1UL << (nr & 31)), "m" (*m));
}
-extern __inline__ int test_and_set_bit(unsigned long nr,
- volatile void * addr)
+extern __inline__ int
+test_and_set_bit(unsigned long nr, volatile void *addr)
{
unsigned long oldbit;
unsigned long temp;
- unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
+ int *m = ((int *) addr) + (nr >> 5);
__asm__ __volatile__(
"1: ldl_l %0,%4\n"
@@ -151,10 +102,10 @@ extern __inline__ int test_and_set_bit(unsigned long nr,
" xor %0,%3,%0\n"
" stl_c %0,%1\n"
" beq %0,3f\n"
+ "2:\n"
#ifdef CONFIG_SMP
" mb\n"
#endif
- "2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
@@ -167,32 +118,23 @@ extern __inline__ int test_and_set_bit(unsigned long nr,
/*
* WARNING: non atomic version.
*/
-extern __inline__ int __test_and_set_bit(unsigned long nr,
- volatile void * addr)
+extern __inline__ int
+__test_and_set_bit(unsigned long nr, volatile void * addr)
{
- unsigned long oldbit;
- unsigned long temp;
- unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
-
- __asm__ __volatile__(
- " ldl %0,%4\n"
- " and %0,%3,%2\n"
- " bne %2,1f\n"
- " xor %0,%3,%0\n"
- " stl %0,%1\n"
- "1:\n"
- :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
- :"Ir" (1UL << (nr & 31)), "m" (*m));
+ unsigned long mask = 1 << (nr & 0x1f);
+ int *m = ((int *) addr) + (nr >> 5);
+ int old = *m;
- return oldbit != 0;
+ *m = old | mask;
+ return (old & mask) != 0;
}
-extern __inline__ int test_and_clear_bit(unsigned long nr,
- volatile void * addr)
+extern __inline__ int
+test_and_clear_bit(unsigned long nr, volatile void * addr)
{
unsigned long oldbit;
unsigned long temp;
- unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
+ int *m = ((int *) addr) + (nr >> 5);
__asm__ __volatile__(
"1: ldl_l %0,%4\n"
@@ -201,10 +143,10 @@ extern __inline__ int test_and_clear_bit(unsigned long nr,
" xor %0,%3,%0\n"
" stl_c %0,%1\n"
" beq %0,3f\n"
+ "2:\n"
#ifdef CONFIG_SMP
" mb\n"
#endif
- "2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
@@ -217,32 +159,23 @@ extern __inline__ int test_and_clear_bit(unsigned long nr,
/*
* WARNING: non atomic version.
*/
-extern __inline__ int __test_and_clear_bit(unsigned long nr,
- volatile void * addr)
+extern __inline__ int
+__test_and_clear_bit(unsigned long nr, volatile void * addr)
{
- unsigned long oldbit;
- unsigned long temp;
- unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
+ unsigned long mask = 1 << (nr & 0x1f);
+ int *m = ((int *) addr) + (nr >> 5);
+ int old = *m;
- __asm__ __volatile__(
- " ldl %0,%4\n"
- " and %0,%3,%2\n"
- " beq %2,1f\n"
- " xor %0,%3,%0\n"
- " stl %0,%1\n"
- "1:\n"
- :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
- :"Ir" (1UL << (nr & 31)), "m" (*m));
-
- return oldbit != 0;
+ *m = old & ~mask;
+ return (old & mask) != 0;
}
-extern __inline__ int test_and_change_bit(unsigned long nr,
- volatile void * addr)
+extern __inline__ int
+test_and_change_bit(unsigned long nr, volatile void * addr)
{
unsigned long oldbit;
unsigned long temp;
- unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
+ int *m = ((int *) addr) + (nr >> 5);
__asm__ __volatile__(
"1: ldl_l %0,%4\n"
@@ -262,7 +195,8 @@ extern __inline__ int test_and_change_bit(unsigned long nr,
return oldbit != 0;
}
-extern __inline__ int test_bit(int nr, volatile void * addr)
+extern __inline__ int
+test_bit(int nr, volatile void * addr)
{
return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
}
@@ -289,7 +223,7 @@ extern inline unsigned long ffz_b(unsigned long x)
extern inline unsigned long ffz(unsigned long word)
{
#if defined(__alpha_cix__) && defined(__alpha_fix__)
- /* Whee. EV6 can calculate it directly. */
+ /* Whee. EV67 can calculate it directly. */
unsigned long result;
__asm__("cttz %1,%0" : "=r"(result) : "r"(~word));
return result;
@@ -325,7 +259,7 @@ extern inline int ffs(int word)
*/
#if defined(__alpha_cix__) && defined(__alpha_fix__)
-/* Whee. EV6 can calculate it directly. */
+/* Whee. EV67 can calculate it directly. */
extern __inline__ unsigned long hweight64(unsigned long w)
{
unsigned long result;
@@ -347,7 +281,8 @@ extern __inline__ unsigned long hweight64(unsigned long w)
/*
* Find next zero bit in a bitmap reasonably efficiently..
*/
-extern inline unsigned long find_next_zero_bit(void * addr, unsigned long size, unsigned long offset)
+extern inline unsigned long
+find_next_zero_bit(void * addr, unsigned long size, unsigned long offset)
{
unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
unsigned long result = offset & ~63UL;
diff --git a/include/asm-alpha/byteorder.h b/include/asm-alpha/byteorder.h
index edc376a04..91b55ea3e 100644
--- a/include/asm-alpha/byteorder.h
+++ b/include/asm-alpha/byteorder.h
@@ -3,6 +3,44 @@
#include <asm/types.h>
+#ifdef __GNUC__
+
+static __inline __u32 __attribute__((__const)) __arch__swab32(__u32 x)
+{
+ /*
+ * Unfortunately, we can't use the 6 instruction sequence
+ * on ev6 since the latency of the UNPKBW is 3, which is
+ * pretty hard to hide. Just in case a future implementation
+ * has a lower latency, here's the sequence (also by Mike Burrows)
+ *
+ * UNPKBW a0, v0 v0: 00AA00BB00CC00DD
+ * SLL v0, 24, a0 a0: BB00CC00DD000000
+ * BIS v0, a0, a0 a0: BBAACCBBDDCC00DD
+ * EXTWL a0, 6, v0 v0: 000000000000BBAA
+ * ZAP a0, 0xf3, a0 a0: 00000000DDCC0000
+ * ADDL a0, v0, v0 v0: ssssssssDDCCBBAA
+ */
+
+ __u64 t0, t1, t2, t3;
+
+ __asm__("inslh %1, 7, %0" /* t0 : 0000000000AABBCC */
+ : "=r"(t0) : "r"(x));
+ __asm__("inswl %1, 3, %0" /* t1 : 000000CCDD000000 */
+ : "=r"(t1) : "r"(x));
+
+ t1 |= t0; /* t1 : 000000CCDDAABBCC */
+ t2 = t1 >> 16; /* t2 : 0000000000CCDDAA */
+ t0 = t1 & 0xFF00FF00; /* t0 : 00000000DD00BB00 */
+ t3 = t2 & 0x00FF00FF; /* t3 : 0000000000CC00AA */
+ t1 = t0 + t3; /* t1 : ssssssssDDCCBBAA */
+
+ return t1;
+}
+
+#define __arch__swab32 __arch__swab32
+
+#endif /* __GNUC__ */
+
#define __BYTEORDER_HAS_U64__
#include <linux/byteorder/little_endian.h>
diff --git a/include/asm-alpha/fpu.h b/include/asm-alpha/fpu.h
index b02a78594..acd1b9a03 100644
--- a/include/asm-alpha/fpu.h
+++ b/include/asm-alpha/fpu.h
@@ -131,17 +131,19 @@ rdfpcr(void)
unsigned long tmp, ret;
#if defined(__alpha_cix__) || defined(__alpha_fix__)
- __asm__ ("ftoit $f0,%0\n\t"
- "mf_fpcr $f0\n\t"
- "ftoit $f0,%1\n\t"
- "itoft %0,$f0"
- : "=r"(tmp), "=r"(ret));
+ __asm__ __volatile__ (
+ "ftoit $f0,%0\n\t"
+ "mf_fpcr $f0\n\t"
+ "ftoit $f0,%1\n\t"
+ "itoft %0,$f0"
+ : "=r"(tmp), "=r"(ret));
#else
- __asm__ ("stt $f0,%0\n\t"
- "mf_fpcr $f0\n\t"
- "stt $f0,%1\n\t"
- "ldt $f0,%0"
- : "=m"(tmp), "=m"(ret));
+ __asm__ __volatile__ (
+ "stt $f0,%0\n\t"
+ "mf_fpcr $f0\n\t"
+ "stt $f0,%1\n\t"
+ "ldt $f0,%0"
+ : "=m"(tmp), "=m"(ret));
#endif
return ret;
@@ -153,11 +155,12 @@ wrfpcr(unsigned long val)
unsigned long tmp;
#if defined(__alpha_cix__) || defined(__alpha_fix__)
- __asm__ __volatile__ ("ftoit $f0,%0\n\t"
- "itoft %1,$f0\n\t"
- "mt_fpcr $f0\n\t"
- "itoft %0,$f0"
- : "=&r"(tmp) : "r"(val));
+ __asm__ __volatile__ (
+ "ftoit $f0,%0\n\t"
+ "itoft %1,$f0\n\t"
+ "mt_fpcr $f0\n\t"
+ "itoft %0,$f0"
+ : "=&r"(tmp) : "r"(val));
#else
__asm__ __volatile__ (
"stt $f0,%0\n\t"
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index 54341fff1..b3f6e8141 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -59,6 +59,11 @@
#define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */
#define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */
#define _PAGE_ASM 0x0010
+#if defined(CONFIG_ALPHA_EV6) && !defined(CONFIG_SMP)
+#define _PAGE_MBE 0x0080 /* MB disable bit for EV6. */
+#else
+#define _PAGE_MBE 0x0000
+#endif
#define _PAGE_KRE 0x0100 /* xxx - see below on the "accessed" bit */
#define _PAGE_URE 0x0200 /* xxx */
#define _PAGE_KWE 0x1000 /* used to do the dirty bit in software */
@@ -85,19 +90,20 @@
#define _PFN_MASK 0xFFFFFFFF00000000
#define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
-#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
+#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS | _PAGE_MBE)
/*
- * All the normal masks have the "page accessed" bits on, as any time they are used,
- * the page is accessed. They are cleared only by the page-out routines
+ * All the normal masks have the "page accessed" bits on, as any time they
+ * are used, the page is accessed. They are cleared only by the page-out
+ * routines.
*/
#define PAGE_NONE __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
-#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE | _PAGE_MBE)
-#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_MBE | (x))
#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
#define _PAGE_S(x) _PAGE_NORMAL(x)
@@ -189,6 +195,7 @@ extern unsigned long __zero_page(void);
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
+
#define mk_pte(page, pgprot) \
({ \
pte_t pte; \
@@ -199,7 +206,7 @@ extern unsigned long __zero_page(void);
})
extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
-{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpage) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
+{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpage) << (32-PAGE_SHIFT)) | (pgprot_val(pgprot) & ~_PAGE_MBE); return pte; }
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }