summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-11-28 03:58:46 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-11-28 03:58:46 +0000
commitb63ad0882a16a5d28003e57f2b0b81dee3fb322b (patch)
tree0a343ce219e2b8b38a5d702d66032c57b83d9720 /include/asm-alpha
parenta9d7bff9a84dba79609a0002e5321b74c4d64c64 (diff)
Merge with 2.4.0-test11.
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/atomic.h4
-rw-r--r--include/asm-alpha/compiler.h9
-rw-r--r--include/asm-alpha/module.h11
-rw-r--r--include/asm-alpha/param.h4
-rw-r--r--include/asm-alpha/pgtable.h3
-rw-r--r--include/asm-alpha/semaphore-helper.h128
-rw-r--r--include/asm-alpha/semaphore.h482
-rw-r--r--include/asm-alpha/spinlock.h2
-rw-r--r--include/asm-alpha/xor.h855
9 files changed, 1031 insertions, 467 deletions
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index 4e8d0c410..a509f6c74 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -66,8 +66,8 @@ static __inline__ long atomic_add_return(int i, atomic_t * v)
long temp, result;
__asm__ __volatile__(
"1: ldl_l %0,%1\n"
+ " addl %0,%3,%2\n"
" addl %0,%3,%0\n"
- " mov %0,%2\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
" mb\n"
@@ -84,8 +84,8 @@ static __inline__ long atomic_sub_return(int i, atomic_t * v)
long temp, result;
__asm__ __volatile__(
"1: ldl_l %0,%1\n"
+ " subl %0,%3,%2\n"
" subl %0,%3,%0\n"
- " mov %0,%2\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
" mb\n"
diff --git a/include/asm-alpha/compiler.h b/include/asm-alpha/compiler.h
index 70d6ce775..7714bf2df 100644
--- a/include/asm-alpha/compiler.h
+++ b/include/asm-alpha/compiler.h
@@ -72,4 +72,13 @@
__asm__("stw %1,%0" : "=m"(mem) : "r"(val))
#endif
+/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented
+ a mechanism by which the user can annotate likely branch directions and
+ expect the blocks to be reordered appropriately. Define __builtin_expect
+ to nothing for earlier compilers. */
+
+#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
+#define __builtin_expect(x, expected_value) (x)
+#endif
+
#endif /* __ALPHA_COMPILER_H */
diff --git a/include/asm-alpha/module.h b/include/asm-alpha/module.h
new file mode 100644
index 000000000..6e0efe9ac
--- /dev/null
+++ b/include/asm-alpha/module.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_ALPHA_MODULE_H
+#define _ASM_ALPHA_MODULE_H
+/*
+ * This file contains the alpha architecture specific module code.
+ */
+
+#define module_map(x) vmalloc(x)
+#define module_unmap(x) vfree(x)
+#define module_arch_init(x) (0)
+
+#endif /* _ASM_ALPHA_MODULE_H */
diff --git a/include/asm-alpha/param.h b/include/asm-alpha/param.h
index 768c92e02..a398a3009 100644
--- a/include/asm-alpha/param.h
+++ b/include/asm-alpha/param.h
@@ -30,4 +30,8 @@
#define MAXHOSTNAMELEN 64 /* max length of hostname */
+#ifdef __KERNEL__
+# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
+#endif
+
#endif /* _ASM_ALPHA_PARAM_H */
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index 5c4373d3b..54341fff1 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -301,9 +301,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define swp_entry_to_pte(x) ((pte_t) { (x).val })
-#define module_map vmalloc
-#define module_unmap vfree
-
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define PageSkip(page) (0)
#define kern_addr_valid(addr) (1)
diff --git a/include/asm-alpha/semaphore-helper.h b/include/asm-alpha/semaphore-helper.h
deleted file mode 100644
index 52d8fb5f4..000000000
--- a/include/asm-alpha/semaphore-helper.h
+++ /dev/null
@@ -1,128 +0,0 @@
-#ifndef _ALPHA_SEMAPHORE_HELPER_H
-#define _ALPHA_SEMAPHORE_HELPER_H
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1999 Richard Henderson
- */
-
-/*
- * These two _must_ execute atomically wrt each other.
- *
- * This is trivially done with load_locked/store_cond,
- * which we have. Let the rest of the losers suck eggs.
- */
-
-static inline void
-wake_one_more(struct semaphore * sem)
-{
- atomic_inc(&sem->waking);
-}
-
-static inline int
-waking_non_zero(struct semaphore *sem)
-{
- long ret, tmp;
-
- /* An atomic conditional decrement. */
- __asm__ __volatile__(
- "1: ldl_l %1,%2\n"
- " blt %1,2f\n"
- " subl %1,1,%0\n"
- " stl_c %0,%2\n"
- " beq %0,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=r"(ret), "=r"(tmp), "=m"(sem->waking.counter)
- : "0"(0));
-
- return ret > 0;
-}
-
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- *
- * We must undo the sem->count down_interruptible decrement
- * simultaneously and atomicly with the sem->waking adjustment,
- * otherwise we can race with wake_one_more.
- *
- * This is accomplished by doing a 64-bit ll/sc on the 2 32-bit words.
- */
-
-static inline int
-waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
-{
- long ret, tmp, tmp2, tmp3;
-
- /* "Equivalent" C. Note that we have to do this all without
- (taken) branches in order to be a valid ll/sc sequence.
-
- do {
- tmp = ldq_l;
- ret = 0;
- if (tmp >= 0) {
- tmp += 0xffffffff00000000;
- ret = 1;
- }
- else if (pending) {
- // Since -1 + 1 carries into the high word, we have
- // to be more careful adding 1 here.
- tmp = (tmp & 0xffffffff00000000)
- | ((tmp + 1) & 0x00000000ffffffff;
- ret = -EINTR;
- }
- else {
- break; // ideally. we don't actually break
- // since this is a predicate we don't
- // have, and is more trouble to build
- // than to elide the noop stq_c.
- }
- tmp = stq_c = tmp;
- } while (tmp == 0);
- */
-
- __asm__ __volatile__(
- "1: ldq_l %1,%4\n"
- " lda %0,0\n"
- " cmovne %5,%6,%0\n"
- " addq %1,1,%2\n"
- " and %1,%7,%3\n"
- " andnot %2,%7,%2\n"
- " cmovge %1,1,%0\n"
- " or %3,%2,%2\n"
- " addq %1,%7,%3\n"
- " cmovne %5,%2,%1\n"
- " cmovge %2,%3,%1\n"
- " stq_c %1,%4\n"
- " beq %1,3f\n"
- "2:\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(tmp3), "=m"(*sem)
- : "r"(signal_pending(tsk)), "r"(-EINTR),
- "r"(0xffffffff00000000));
-
- return ret;
-}
-
-/*
- * waking_non_zero_trylock is unused. we do everything in
- * down_trylock and let non-ll/sc hosts bounce around.
- */
-
-static inline int
-waking_non_zero_trylock(struct semaphore *sem)
-{
- return 0;
-}
-
-#endif
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h
index 38bc05c6e..0e4a1e3a6 100644
--- a/include/asm-alpha/semaphore.h
+++ b/include/asm-alpha/semaphore.h
@@ -11,11 +11,16 @@
#include <asm/current.h>
#include <asm/system.h>
#include <asm/atomic.h>
+#include <asm/compiler.h> /* __builtin_expect */
+
+#define DEBUG_SEMAPHORE 0
+#define DEBUG_RW_SEMAPHORE 0
struct semaphore {
/* Careful, inline assembly knows about the position of these two. */
- atomic_t count;
+ atomic_t count __attribute__((aligned(8)));
atomic_t waking; /* biased by -1 */
+
wait_queue_head_t wait;
#if WAITQUEUE_DEBUG
long __magic;
@@ -42,7 +47,7 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
-extern inline void sema_init(struct semaphore *sem, int val)
+static inline void sema_init(struct semaphore *sem, int val)
{
/*
* Logically,
@@ -68,103 +73,33 @@ static inline void init_MUTEX_LOCKED (struct semaphore *sem)
sema_init(sem, 0);
}
-
-extern void __down(struct semaphore * sem);
-extern int __down_interruptible(struct semaphore * sem);
-extern int __down_trylock(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
-
-/* All have custom assembly linkages. */
-extern void __down_failed(struct semaphore * sem);
-extern void __down_failed_interruptible(struct semaphore * sem);
-extern void __down_failed_trylock(struct semaphore * sem);
-extern void __up_wakeup(struct semaphore * sem);
+extern void down(struct semaphore *);
+extern void __down_failed(struct semaphore *);
+extern int down_interruptible(struct semaphore *);
+extern int __down_failed_interruptible(struct semaphore *);
+extern int down_trylock(struct semaphore *);
+extern void up(struct semaphore *);
+extern void __up_wakeup(struct semaphore *);
/*
- * Whee. Hidden out of line code is fun. The contention cases are
- * handled out of line in kernel/sched.c; arch/alpha/lib/semaphore.S
- * takes care of making sure we can call it without clobbering regs.
+ * Hidden out of line code is fun, but extremely messy. Rely on newer
+ * compilers to do a respectable job with this. The contention cases
+ * are handled out of line in arch/alpha/kernel/semaphore.c.
*/
-extern inline void down(struct semaphore * sem)
+static inline void __down(struct semaphore *sem)
{
- /* Given that we have to use particular hard registers to
- communicate with __down_failed anyway, reuse them in
- the atomic operation as well.
-
- __down_failed takes the semaphore address in $24, and
- it's return address in $28. The pv is loaded as usual.
- The gp is clobbered (in the module case) as usual. */
-
- /* This little bit of silliness is to get the GP loaded for
- a function that ordinarily wouldn't. Otherwise we could
- have it done by the macro directly, which can be optimized
- the linker. */
- register void *pv __asm__("$27");
-
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- pv = __down_failed;
- __asm__ __volatile__ (
- "/* semaphore down operation */\n"
- "1: ldl_l $24,%1\n"
- " subl $24,1,$28\n"
- " subl $24,1,$24\n"
- " stl_c $28,%1\n"
- " beq $28,2f\n"
- " blt $24,3f\n"
- "4: mb\n"
- ".subsection 2\n"
- "2: br 1b\n"
- "3: lda $24,%1\n"
- " jsr $28,($27),__down_failed\n"
- " ldgp $29,0($28)\n"
- " br 4b\n"
- ".previous"
- : "=r"(pv)
- : "m"(sem->count), "r"(pv)
- : "$24", "$28", "memory");
+ long count = atomic_dec_return(&sem->count);
+ if (__builtin_expect(count < 0, 0))
+ __down_failed(sem);
}
-extern inline int down_interruptible(struct semaphore * sem)
+static inline int __down_interruptible(struct semaphore *sem)
{
- /* __down_failed_interruptible takes the semaphore address in $24,
- and it's return address in $28. The pv is loaded as usual.
- The gp is clobbered (in the module case) as usual. The return
- value is in $24. */
-
- register int ret __asm__("$24");
- register void *pv __asm__("$27");
-
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- pv = __down_failed_interruptible;
- __asm__ __volatile__ (
- "/* semaphore down interruptible operation */\n"
- "1: ldl_l $24,%2\n"
- " subl $24,1,$28\n"
- " subl $24,1,$24\n"
- " stl_c $28,%2\n"
- " beq $28,2f\n"
- " blt $24,3f\n"
- " mov $31,%0\n"
- "4: mb\n"
- ".subsection 2\n"
- "2: br 1b\n"
- "3: lda $24,%2\n"
- " jsr $28,($27),__down_failed_interruptible\n"
- " ldgp $29,0($28)\n"
- " br 4b\n"
- ".previous"
- : "=r"(ret), "=r"(pv)
- : "m"(sem->count), "r"(pv)
- : "$28", "memory");
-
- return ret;
+ long count = atomic_dec_return(&sem->count);
+ if (__builtin_expect(count < 0, 0))
+ return __down_failed_interruptible(sem);
+ return 0;
}
/*
@@ -174,7 +109,7 @@ extern inline int down_interruptible(struct semaphore * sem)
* Do this by using ll/sc on the pair of 32-bit words.
*/
-extern inline int down_trylock(struct semaphore * sem)
+static inline int __down_trylock(struct semaphore * sem)
{
long ret, tmp, tmp2, sub;
@@ -182,25 +117,21 @@ extern inline int down_trylock(struct semaphore * sem)
(taken) branches in order to be a valid ll/sc sequence.
do {
- tmp = ldq_l;
- sub = 0x0000000100000000;
- ret = ((int)tmp <= 0); // count =< 0 ?
- if ((int)tmp >= 0) sub = 0; // count >= 0 ?
- // note that if count=0 subq overflows to the high
- // longword (i.e waking)
- ret &= ((long)tmp < 0); // waking < 0 ?
- sub += 1;
- if (ret)
- break;
- tmp -= sub;
- tmp = stq_c = tmp;
+ tmp = ldq_l;
+ sub = 0x0000000100000000;
+ ret = ((int)tmp <= 0); // count <= 0 ?
+ // Note that if count=0, the decrement overflows into
+ // waking, so cancel the 1 loaded above. Also cancel
+ // it if the lock was already free.
+ if ((int)tmp >= 0) sub = 0; // count >= 0 ?
+ ret &= ((long)tmp < 0); // waking < 0 ?
+ sub += 1;
+ if (ret) break;
+ tmp -= sub;
+ tmp = stq_c = tmp;
} while (tmp == 0);
*/
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
__asm__ __volatile__(
"1: ldq_l %1,%4\n"
" lda %3,1\n"
@@ -215,7 +146,7 @@ extern inline int down_trylock(struct semaphore * sem)
" subq %1,%3,%1\n"
" stq_c %1,%4\n"
" beq %1,3f\n"
- "2:\n"
+ "2: mb\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
@@ -226,45 +157,70 @@ extern inline int down_trylock(struct semaphore * sem)
return ret;
}
-extern inline void up(struct semaphore * sem)
+static inline void __up(struct semaphore *sem)
{
- /* Given that we have to use particular hard registers to
- communicate with __up_wakeup anyway, reuse them in
- the atomic operation as well.
+ long ret, tmp, tmp2, tmp3;
- __up_wakeup takes the semaphore address in $24, and
- it's return address in $28. The pv is loaded as usual.
- The gp is clobbered (in the module case) as usual. */
+ /* We must manipulate count and waking simultaneously and atomically.
+ Otherwise we have races between up and __down_failed_interruptible
+ waking up on a signal.
- register void *pv __asm__("$27");
+ "Equivalent" C. Note that we have to do this all without
+ (taken) branches in order to be a valid ll/sc sequence.
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- pv = __up_wakeup;
- __asm__ __volatile__ (
- "/* semaphore up operation */\n"
+ do {
+ tmp = ldq_l;
+ ret = (int)tmp + 1; // count += 1;
+ tmp2 = tmp & 0xffffffff00000000; // extract waking
+ if (ret <= 0) // still sleepers?
+ tmp2 += 0x0000000100000000; // waking += 1;
+ tmp = ret & 0x00000000ffffffff; // insert count
+ tmp |= tmp2; // insert waking;
+ tmp = stq_c = tmp;
+ } while (tmp == 0);
+ */
+
+ __asm__ __volatile__(
" mb\n"
- "1: ldl_l $24,%1\n"
- " addl $24,1,$28\n"
- " addl $24,1,$24\n"
- " stl_c $28,%1\n"
- " beq $28,2f\n"
- " ble $24,3f\n"
- "4:\n"
+ "1: ldq_l %1,%4\n"
+ " addl %1,1,%0\n"
+ " zapnot %1,0xf0,%2\n"
+ " addq %2,%5,%3\n"
+ " cmovle %0,%3,%2\n"
+ " zapnot %0,0x0f,%1\n"
+ " bis %1,%2,%1\n"
+ " stq_c %1,%4\n"
+ " beq %1,3f\n"
+ "2:\n"
".subsection 2\n"
- "2: br 1b\n"
- "3: lda $24,%1\n"
- " jsr $28,($27),__up_wakeup\n"
- " ldgp $29,0($28)\n"
- " br 4b\n"
+ "3: br 1b\n"
".previous"
- : "=r"(pv)
- : "m"(sem->count), "r"(pv)
- : "$24", "$28", "memory");
+ : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(tmp3)
+ : "m"(*sem), "r"(0x0000000100000000)
+ : "memory");
+
+ if (__builtin_expect(ret <= 0, 0))
+ __up_wakeup(sem);
}
+#if !WAITQUEUE_DEBUG && !DEBUG_SEMAPHORE
+extern inline void down(struct semaphore *sem)
+{
+ __down(sem);
+}
+extern inline int down_interruptible(struct semaphore *sem)
+{
+ return __down_interruptible(sem);
+}
+extern inline int down_trylock(struct semaphore *sem)
+{
+ return __down_trylock(sem);
+}
+extern inline void up(struct semaphore *sem)
+{
+ __up(sem);
+}
+#endif
/* rw mutexes (should that be mutices? =) -- throw rw
* spinlocks and semaphores together, and this is what we
@@ -297,7 +253,7 @@ extern inline void up(struct semaphore * sem)
#define RW_LOCK_BIAS 0x01000000
struct rw_semaphore {
- int count;
+ atomic_t count;
/* bit 0 means read bias granted;
bit 1 means write bias granted. */
unsigned granted;
@@ -317,7 +273,7 @@ struct rw_semaphore {
#endif
#define __RWSEM_INITIALIZER(name,count) \
- { (count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
+ { ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
__WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
__SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
@@ -331,9 +287,9 @@ struct rw_semaphore {
#define DECLARE_RWSEM_WRITE_LOCKED(name) \
__DECLARE_RWSEM_GENERIC(name, 0)
-extern inline void init_rwsem(struct rw_semaphore *sem)
+static inline void init_rwsem(struct rw_semaphore *sem)
{
- sem->count = RW_LOCK_BIAS;
+ atomic_set (&sem->count, RW_LOCK_BIAS);
sem->granted = 0;
init_waitqueue_head(&sem->wait);
init_waitqueue_head(&sem->write_bias_wait);
@@ -344,213 +300,73 @@ extern inline void init_rwsem(struct rw_semaphore *sem)
#endif
}
-/* All have custom assembly linkages. */
-extern void __down_read_failed(struct rw_semaphore *sem);
-extern void __down_write_failed(struct rw_semaphore *sem);
-extern void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers);
+extern void down_read(struct rw_semaphore *);
+extern void down_write(struct rw_semaphore *);
+extern void up_read(struct rw_semaphore *);
+extern void up_write(struct rw_semaphore *);
+extern void __down_read_failed(struct rw_semaphore *, int);
+extern void __down_write_failed(struct rw_semaphore *, int);
+extern void __rwsem_wake(struct rw_semaphore *, int);
-extern inline void down_read(struct rw_semaphore *sem)
+static inline void __down_read(struct rw_semaphore *sem)
{
- /* Given that we have to use particular hard registers to
- communicate with __down_read_failed anyway, reuse them in
- the atomic operation as well.
+ long count = atomic_dec_return(&sem->count);
+ if (__builtin_expect(count < 0, 0))
+ __down_read_failed(sem, count);
+}
- __down_read_failed takes the semaphore address in $24, the count
- we read in $25, and it's return address in $28. The pv is loaded
- as usual. The gp is clobbered (in the module case) as usual. */
+static inline void __down_write(struct rw_semaphore *sem)
+{
+ long count = atomic_sub_return(RW_LOCK_BIAS, &sem->count);
+ if (__builtin_expect(count != 0, 0))
+ __down_write_failed(sem, count);
+}
- /* This little bit of silliness is to get the GP loaded for
- a function that ordinarily wouldn't. Otherwise we could
- have it done by the macro directly, which can be optimized
- the linker. */
- register void *pv __asm__("$27");
+/* When a reader does a release, the only significant case is when there
+ was a writer waiting, and we've bumped the count to 0, then we must
+ wake the writer up. */
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
+static inline void __up_read(struct rw_semaphore *sem)
+{
+ long count;
+ mb();
+ count = atomic_inc_return(&sem->count);
+ if (__builtin_expect(count == 0, 0))
+ __rwsem_wake(sem, 0);
+}
- pv = __down_read_failed;
- __asm__ __volatile__(
- "/* semaphore down_read operation */\n"
- "1: ldl_l $24,%1\n"
- " subl $24,1,$28\n"
- " subl $24,1,$25\n"
- " stl_c $28,%1\n"
- " beq $28,2f\n"
- " blt $25,3f\n"
- "4: mb\n"
- ".subsection 2\n"
- "2: br 1b\n"
- "3: lda $24,%1\n"
- " jsr $28,($27),__down_read_failed\n"
- " ldgp $29,0($28)\n"
- " br 4b\n"
- ".previous"
- : "=r"(pv)
- : "m"(sem->count), "r"(pv)
- : "$24", "$25", "$28", "memory");
+/* Releasing the writer is easy -- just release it and wake up
+ any sleepers. */
-#if WAITQUEUE_DEBUG
- if (sem->granted & 2)
- BUG();
- if (atomic_read(&sem->writers))
- BUG();
- atomic_inc(&sem->readers);
-#endif
+static inline void __up_write(struct rw_semaphore *sem)
+{
+ long count, wake;
+ mb();
+ count = atomic_add_return(RW_LOCK_BIAS, &sem->count);
+
+ /* Only do the wake if we were, but are no longer, negative. */
+ wake = ((int)(count - RW_LOCK_BIAS) < 0) && count >= 0;
+ if (__builtin_expect(wake, 0))
+ __rwsem_wake(sem, count);
}
+#if !WAITQUEUE_DEBUG && !DEBUG_RW_SEMAPHORE
+extern inline void down_read(struct rw_semaphore *sem)
+{
+ __down_read(sem);
+}
extern inline void down_write(struct rw_semaphore *sem)
{
- /* Given that we have to use particular hard registers to
- communicate with __down_write_failed anyway, reuse them in
- the atomic operation as well.
-
- __down_write_failed takes the semaphore address in $24, the count
- we read in $25, and it's return address in $28. The pv is loaded
- as usual. The gp is clobbered (in the module case) as usual. */
-
- /* This little bit of silliness is to get the GP loaded for
- a function that ordinarily wouldn't. Otherwise we could
- have it done by the macro directly, which can be optimized
- the linker. */
- register void *pv __asm__("$27");
-
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
-#endif
-
- pv = __down_write_failed;
- __asm__ __volatile__(
- "/* semaphore down_write operation */\n"
- "1: ldl_l $24,%1\n"
- " ldah $28,%3($24)\n"
- " ldah $25,%3($24)\n"
- " stl_c $28,%1\n"
- " beq $28,2f\n"
- " bne $25,3f\n"
- "4: mb\n"
- ".subsection 2\n"
- "2: br 1b\n"
- "3: lda $24,%1\n"
- " jsr $28,($27),__down_write_failed\n"
- " ldgp $29,0($28)\n"
- " br 4b\n"
- ".previous"
- : "=r"(pv)
- : "m"(sem->count), "r"(pv), "i"(-(RW_LOCK_BIAS >> 16))
- : "$24", "$25", "$28", "memory");
-
-#if WAITQUEUE_DEBUG
- if (atomic_read(&sem->writers))
- BUG();
- if (atomic_read(&sem->readers))
- BUG();
- if (sem->granted & 3)
- BUG();
- atomic_inc(&sem->writers);
-#endif
+ __down_write(sem);
}
-
-/* When a reader does a release, the only significant case is when
- there was a writer waiting, and we've * bumped the count to 0: we must
-wake the writer up. */
-
extern inline void up_read(struct rw_semaphore *sem)
{
- /* Given that we have to use particular hard registers to
- communicate with __rwsem_wake anyway, reuse them in
- the atomic operation as well.
-
- __rwsem_wake takes the semaphore address in $24, the
- number of waiting readers in $25, and it's return address
- in $28. The pv is loaded as usual. The gp is clobbered
- (in the module case) as usual. */
-
- register void *pv __asm__("$27");
-
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
- if (sem->granted & 2)
- BUG();
- if (atomic_read(&sem->writers))
- BUG();
- atomic_dec(&sem->readers);
-#endif
-
- pv = __rwsem_wake;
- __asm__ __volatile__(
- "/* semaphore up_read operation */\n"
- " mb\n"
- "1: ldl_l $24,%1\n"
- " addl $24,1,$28\n"
- " addl $24,1,$24\n"
- " stl_c $28,%1\n"
- " beq $28,2f\n"
- " beq $24,3f\n"
- "4:\n"
- ".subsection 2\n"
- "2: br 1b\n"
- "3: lda $24,%1\n"
- " mov 0,$25\n"
- " jsr $28,($27),__rwsem_wake\n"
- " ldgp $29,0($28)\n"
- " br 4b\n"
- ".previous"
- : "=r"(pv)
- : "m"(sem->count), "r"(pv)
- : "$24", "$25", "$28", "memory");
+ __up_read(sem);
}
-
-/* releasing the writer is easy -- just release it and
- * wake up any sleepers.
- */
extern inline void up_write(struct rw_semaphore *sem)
{
- /* Given that we have to use particular hard registers to
- communicate with __rwsem_wake anyway, reuse them in
- the atomic operation as well.
-
- __rwsem_wake takes the semaphore address in $24, the
- number of waiting readers in $25, and it's return address
- in $28. The pv is loaded as usual. The gp is clobbered
- (in the module case) as usual. */
-
- register void *pv __asm__("$27");
-
-#if WAITQUEUE_DEBUG
- CHECK_MAGIC(sem->__magic);
- if (sem->granted & 3)
- BUG();
- if (atomic_read(&sem->readers))
- BUG();
- if (atomic_read(&sem->writers) != 1)
- BUG();
- atomic_dec(&sem->writers);
-#endif
-
- pv = __rwsem_wake;
- __asm__ __volatile__(
- "/* semaphore up_write operation */\n"
- " mb\n"
- "1: ldl_l $24,%1\n"
- " ldah $28,%3($24)\n"
- " stl_c $28,%1\n"
- " beq $28,2f\n"
- " blt $24,3f\n"
- "4:\n"
- ".subsection 2\n"
- "2: br 1b\n"
- "3: ldah $25,%3($24)\n"
- /* Only do the wake if we're no longer negative. */
- " blt $25,4b\n"
- " lda $24,%1\n"
- " jsr $28,($27),__rwsem_wake\n"
- " ldgp $29,0($28)\n"
- " br 4b\n"
- ".previous"
- : "=r"(pv)
- : "m"(sem->count), "r"(pv), "i"(RW_LOCK_BIAS >> 16)
- : "$24", "$25", "$28", "memory");
+ __up_write(sem);
}
+#endif
#endif
diff --git a/include/asm-alpha/spinlock.h b/include/asm-alpha/spinlock.h
index 64e05d17b..e1d809880 100644
--- a/include/asm-alpha/spinlock.h
+++ b/include/asm-alpha/spinlock.h
@@ -80,7 +80,7 @@ static inline void spin_lock(spinlock_t * lock)
" blbs %0,2b\n"
" br 1b\n"
".previous"
- : "=r" (tmp), "=m" (lock->lock)
+ : "=&r" (tmp), "=m" (lock->lock)
: "m"(lock->lock) : "memory");
}
diff --git a/include/asm-alpha/xor.h b/include/asm-alpha/xor.h
new file mode 100644
index 000000000..e11477f33
--- /dev/null
+++ b/include/asm-alpha/xor.h
@@ -0,0 +1,855 @@
+/*
+ * include/asm-alpha/xor.h
+ *
+ * Optimized RAID-5 checksumming functions for alpha EV5 and EV6
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+extern void xor_alpha_2(unsigned long, unsigned long *, unsigned long *);
+extern void xor_alpha_3(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *);
+extern void xor_alpha_4(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *, unsigned long *);
+extern void xor_alpha_5(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *, unsigned long *, unsigned long *);
+
+extern void xor_alpha_prefetch_2(unsigned long, unsigned long *,
+ unsigned long *);
+extern void xor_alpha_prefetch_3(unsigned long, unsigned long *,
+ unsigned long *, unsigned long *);
+extern void xor_alpha_prefetch_4(unsigned long, unsigned long *,
+ unsigned long *, unsigned long *,
+ unsigned long *);
+extern void xor_alpha_prefetch_5(unsigned long, unsigned long *,
+ unsigned long *, unsigned long *,
+ unsigned long *, unsigned long *);
+
+asm("
+ .text
+ .align 3
+ .ent xor_alpha_2
+xor_alpha_2:
+ .prologue 0
+ srl $16, 6, $16
+ .align 4
+2:
+ ldq $0,0($17)
+ ldq $1,0($18)
+ ldq $2,8($17)
+ ldq $3,8($18)
+
+ ldq $4,16($17)
+ ldq $5,16($18)
+ ldq $6,24($17)
+ ldq $7,24($18)
+
+ ldq $19,32($17)
+ ldq $20,32($18)
+ ldq $21,40($17)
+ ldq $22,40($18)
+
+ ldq $23,48($17)
+ ldq $24,48($18)
+ ldq $25,56($17)
+ xor $0,$1,$0 # 7 cycles from $1 load
+
+ ldq $27,56($18)
+ xor $2,$3,$2
+ stq $0,0($17)
+ xor $4,$5,$4
+
+ stq $2,8($17)
+ xor $6,$7,$6
+ stq $4,16($17)
+ xor $19,$20,$19
+
+ stq $6,24($17)
+ xor $21,$22,$21
+ stq $19,32($17)
+ xor $23,$24,$23
+
+ stq $21,40($17)
+ xor $25,$27,$25
+ stq $23,48($17)
+ subq $16,1,$16
+
+ stq $25,56($17)
+ addq $17,64,$17
+ addq $18,64,$18
+ bgt $16,2b
+
+ ret
+ .end xor_alpha_2
+
+ .align 3
+ .ent xor_alpha_3
+xor_alpha_3:
+ .prologue 0
+ srl $16, 6, $16
+ .align 4
+3:
+ ldq $0,0($17)
+ ldq $1,0($18)
+ ldq $2,0($19)
+ ldq $3,8($17)
+
+ ldq $4,8($18)
+ ldq $6,16($17)
+ ldq $7,16($18)
+ ldq $21,24($17)
+
+ ldq $22,24($18)
+ ldq $24,32($17)
+ ldq $25,32($18)
+ ldq $5,8($19)
+
+ ldq $20,16($19)
+ ldq $23,24($19)
+ ldq $27,32($19)
+ nop
+
+ xor $0,$1,$1 # 8 cycles from $0 load
+ xor $3,$4,$4 # 6 cycles from $4 load
+ xor $6,$7,$7 # 6 cycles from $7 load
+ xor $21,$22,$22 # 5 cycles from $22 load
+
+ xor $1,$2,$2 # 9 cycles from $2 load
+ xor $24,$25,$25 # 5 cycles from $25 load
+ stq $2,0($17)
+ xor $4,$5,$5 # 6 cycles from $5 load
+
+ stq $5,8($17)
+ xor $7,$20,$20 # 7 cycles from $20 load
+ stq $20,16($17)
+ xor $22,$23,$23 # 7 cycles from $23 load
+
+ stq $23,24($17)
+ xor $25,$27,$27 # 7 cycles from $27 load
+ stq $27,32($17)
+ nop
+
+ ldq $0,40($17)
+ ldq $1,40($18)
+ ldq $3,48($17)
+ ldq $4,48($18)
+
+ ldq $6,56($17)
+ ldq $7,56($18)
+ ldq $2,40($19)
+ ldq $5,48($19)
+
+ ldq $20,56($19)
+ xor $0,$1,$1 # 4 cycles from $1 load
+ xor $3,$4,$4 # 5 cycles from $4 load
+ xor $6,$7,$7 # 5 cycles from $7 load
+
+ xor $1,$2,$2 # 4 cycles from $2 load
+ xor $4,$5,$5 # 5 cycles from $5 load
+ stq $2,40($17)
+ xor $7,$20,$20 # 4 cycles from $20 load
+
+ stq $5,48($17)
+ subq $16,1,$16
+ stq $20,56($17)
+ addq $19,64,$19
+
+ addq $18,64,$18
+ addq $17,64,$17
+ bgt $16,3b
+ ret
+ .end xor_alpha_3
+
+ .align 3
+ .ent xor_alpha_4
+xor_alpha_4:
+ .prologue 0
+ srl $16, 6, $16
+ .align 4
+4:
+ ldq $0,0($17)
+ ldq $1,0($18)
+ ldq $2,0($19)
+ ldq $3,0($20)
+
+ ldq $4,8($17)
+ ldq $5,8($18)
+ ldq $6,8($19)
+ ldq $7,8($20)
+
+ ldq $21,16($17)
+ ldq $22,16($18)
+ ldq $23,16($19)
+ ldq $24,16($20)
+
+ ldq $25,24($17)
+ xor $0,$1,$1 # 6 cycles from $1 load
+ ldq $27,24($18)
+ xor $2,$3,$3 # 6 cycles from $3 load
+
+ ldq $0,24($19)
+ xor $1,$3,$3
+ ldq $1,24($20)
+ xor $4,$5,$5 # 7 cycles from $5 load
+
+ stq $3,0($17)
+ xor $6,$7,$7
+ xor $21,$22,$22 # 7 cycles from $22 load
+ xor $5,$7,$7
+
+ stq $7,8($17)
+ xor $23,$24,$24 # 7 cycles from $24 load
+ ldq $2,32($17)
+ xor $22,$24,$24
+
+ ldq $3,32($18)
+ ldq $4,32($19)
+ ldq $5,32($20)
+ xor $25,$27,$27 # 8 cycles from $27 load
+
+ ldq $6,40($17)
+ ldq $7,40($18)
+ ldq $21,40($19)
+ ldq $22,40($20)
+
+ stq $24,16($17)
+ xor $0,$1,$1 # 9 cycles from $1 load
+ xor $2,$3,$3 # 5 cycles from $3 load
+ xor $27,$1,$1
+
+ stq $1,24($17)
+ xor $4,$5,$5 # 5 cycles from $5 load
+ ldq $23,48($17)
+ ldq $24,48($18)
+
+ ldq $25,48($19)
+ xor $3,$5,$5
+ ldq $27,48($20)
+ ldq $0,56($17)
+
+ ldq $1,56($18)
+ ldq $2,56($19)
+ xor $6,$7,$7 # 8 cycles from $6 load
+ ldq $3,56($20)
+
+ stq $5,32($17)
+ xor $21,$22,$22 # 8 cycles from $22 load
+ xor $7,$22,$22
+ xor $23,$24,$24 # 5 cycles from $24 load
+
+ stq $22,40($17)
+ xor $25,$27,$27 # 5 cycles from $27 load
+ xor $24,$27,$27
+ xor $0,$1,$1 # 5 cycles from $1 load
+
+ stq $27,48($17)
+ xor $2,$3,$3 # 4 cycles from $3 load
+ xor $1,$3,$3
+ subq $16,1,$16
+
+ stq $3,56($17)
+ addq $20,64,$20
+ addq $19,64,$19
+ addq $18,64,$18
+
+ addq $17,64,$17
+ bgt $16,4b
+ ret
+ .end xor_alpha_4
+
+ .align 3
+ .ent xor_alpha_5
+xor_alpha_5:
+ .prologue 0
+ srl $16, 6, $16
+ .align 4
+5:
+ ldq $0,0($17)
+ ldq $1,0($18)
+ ldq $2,0($19)
+ ldq $3,0($20)
+
+ ldq $4,0($21)
+ ldq $5,8($17)
+ ldq $6,8($18)
+ ldq $7,8($19)
+
+ ldq $22,8($20)
+ ldq $23,8($21)
+ ldq $24,16($17)
+ ldq $25,16($18)
+
+ ldq $27,16($19)
+ xor $0,$1,$1 # 6 cycles from $1 load
+ ldq $28,16($20)
+ xor $2,$3,$3 # 6 cycles from $3 load
+
+ ldq $0,16($21)
+ xor $1,$3,$3
+ ldq $1,24($17)
+ xor $3,$4,$4 # 7 cycles from $4 load
+
+ stq $4,0($17)
+ xor $5,$6,$6 # 7 cycles from $6 load
+ xor $7,$22,$22 # 7 cycles from $22 load
+ xor $6,$23,$23 # 7 cycles from $23 load
+
+ ldq $2,24($18)
+ xor $22,$23,$23
+ ldq $3,24($19)
+ xor $24,$25,$25 # 8 cycles from $25 load
+
+ stq $23,8($17)
+ xor $25,$27,$27 # 8 cycles from $27 load
+ ldq $4,24($20)
+ xor $28,$0,$0 # 7 cycles from $0 load
+
+ ldq $5,24($21)
+ xor $27,$0,$0
+ ldq $6,32($17)
+ ldq $7,32($18)
+
+ stq $0,16($17)
+ xor $1,$2,$2 # 6 cycles from $2 load
+ ldq $22,32($19)
+ xor $3,$4,$4 # 4 cycles from $4 load
+
+ ldq $23,32($20)
+ xor $2,$4,$4
+ ldq $24,32($21)
+ ldq $25,40($17)
+
+ ldq $27,40($18)
+ ldq $28,40($19)
+ ldq $0,40($20)
+ xor $4,$5,$5 # 7 cycles from $5 load
+
+ stq $5,24($17)
+ xor $6,$7,$7 # 7 cycles from $7 load
+ ldq $1,40($21)
+ ldq $2,48($17)
+
+ ldq $3,48($18)
+ xor $7,$22,$22 # 7 cycles from $22 load
+ ldq $4,48($19)
+ xor $23,$24,$24 # 6 cycles from $24 load
+
+ ldq $5,48($20)
+ xor $22,$24,$24
+ ldq $6,48($21)
+ xor $25,$27,$27 # 7 cycles from $27 load
+
+ stq $24,32($17)
+ xor $27,$28,$28 # 8 cycles from $28 load
+ ldq $7,56($17)
+ xor $0,$1,$1 # 6 cycles from $1 load
+
+ ldq $22,56($18)
+ ldq $23,56($19)
+ ldq $24,56($20)
+ ldq $25,56($21)
+
+ xor $28,$1,$1
+ xor $2,$3,$3 # 9 cycles from $3 load
+ xor $3,$4,$4 # 9 cycles from $4 load
+ xor $5,$6,$6 # 8 cycles from $6 load
+
+ stq $1,40($17)
+ xor $4,$6,$6
+ xor $7,$22,$22 # 7 cycles from $22 load
+ xor $23,$24,$24 # 6 cycles from $24 load
+
+ stq $6,48($17)
+ xor $22,$24,$24
+ subq $16,1,$16
+ xor $24,$25,$25 # 8 cycles from $25 load
+
+ stq $25,56($17)
+ addq $21,64,$21
+ addq $20,64,$20
+ addq $19,64,$19
+
+ addq $18,64,$18
+ addq $17,64,$17
+ bgt $16,5b
+ ret
+ .end xor_alpha_5
+
+ .align 3
+ .ent xor_alpha_prefetch_2
+xor_alpha_prefetch_2:
+ .prologue 0
+ srl $16, 6, $16
+
+ ldq $31, 0($17)
+ ldq $31, 0($18)
+
+ ldq $31, 64($17)
+ ldq $31, 64($18)
+
+ ldq $31, 128($17)
+ ldq $31, 128($18)
+
+ ldq $31, 192($17)
+ ldq $31, 192($18)
+ .align 4
+2:
+ ldq $0,0($17)
+ ldq $1,0($18)
+ ldq $2,8($17)
+ ldq $3,8($18)
+
+ ldq $4,16($17)
+ ldq $5,16($18)
+ ldq $6,24($17)
+ ldq $7,24($18)
+
+ ldq $19,32($17)
+ ldq $20,32($18)
+ ldq $21,40($17)
+ ldq $22,40($18)
+
+ ldq $23,48($17)
+ ldq $24,48($18)
+ ldq $25,56($17)
+ ldq $27,56($18)
+
+ ldq $31,256($17)
+ xor $0,$1,$0 # 8 cycles from $1 load
+ ldq $31,256($18)
+ xor $2,$3,$2
+
+ stq $0,0($17)
+ xor $4,$5,$4
+ stq $2,8($17)
+ xor $6,$7,$6
+
+ stq $4,16($17)
+ xor $19,$20,$19
+ stq $6,24($17)
+ xor $21,$22,$21
+
+ stq $19,32($17)
+ xor $23,$24,$23
+ stq $21,40($17)
+ xor $25,$27,$25
+
+ stq $23,48($17)
+ subq $16,1,$16
+ stq $25,56($17)
+ addq $17,64,$17
+
+ addq $18,64,$18
+ bgt $16,2b
+ ret
+ .end xor_alpha_prefetch_2
+
+ .align 3
+ .ent xor_alpha_prefetch_3
+xor_alpha_prefetch_3:
+ .prologue 0
+ srl $16, 6, $16
+
+ ldq $31, 0($17)
+ ldq $31, 0($18)
+ ldq $31, 0($19)
+
+ ldq $31, 64($17)
+ ldq $31, 64($18)
+ ldq $31, 64($19)
+
+ ldq $31, 128($17)
+ ldq $31, 128($18)
+ ldq $31, 128($19)
+
+ ldq $31, 192($17)
+ ldq $31, 192($18)
+ ldq $31, 192($19)
+ .align 4
+3:
+ ldq $0,0($17)
+ ldq $1,0($18)
+ ldq $2,0($19)
+ ldq $3,8($17)
+
+ ldq $4,8($18)
+ ldq $6,16($17)
+ ldq $7,16($18)
+ ldq $21,24($17)
+
+ ldq $22,24($18)
+ ldq $24,32($17)
+ ldq $25,32($18)
+ ldq $5,8($19)
+
+ ldq $20,16($19)
+ ldq $23,24($19)
+ ldq $27,32($19)
+ nop
+
+ xor $0,$1,$1 # 8 cycles from $0 load
+ xor $3,$4,$4 # 7 cycles from $4 load
+ xor $6,$7,$7 # 6 cycles from $7 load
+ xor $21,$22,$22 # 5 cycles from $22 load
+
+ xor $1,$2,$2 # 9 cycles from $2 load
+ xor $24,$25,$25 # 5 cycles from $25 load
+ stq $2,0($17)
+ xor $4,$5,$5 # 6 cycles from $5 load
+
+ stq $5,8($17)
+ xor $7,$20,$20 # 7 cycles from $20 load
+ stq $20,16($17)
+ xor $22,$23,$23 # 7 cycles from $23 load
+
+ stq $23,24($17)
+ xor $25,$27,$27 # 7 cycles from $27 load
+ stq $27,32($17)
+ nop
+
+ ldq $0,40($17)
+ ldq $1,40($18)
+ ldq $3,48($17)
+ ldq $4,48($18)
+
+ ldq $6,56($17)
+ ldq $7,56($18)
+ ldq $2,40($19)
+ ldq $5,48($19)
+
+ ldq $20,56($19)
+ ldq $31,256($17)
+ ldq $31,256($18)
+ ldq $31,256($19)
+
+ xor $0,$1,$1 # 6 cycles from $1 load
+ xor $3,$4,$4 # 5 cycles from $4 load
+ xor $6,$7,$7 # 5 cycles from $7 load
+ xor $1,$2,$2 # 4 cycles from $2 load
+
+ xor $4,$5,$5 # 5 cycles from $5 load
+ xor $7,$20,$20 # 4 cycles from $20 load
+ stq $2,40($17)
+ subq $16,1,$16
+
+ stq $5,48($17)
+ addq $19,64,$19
+ stq $20,56($17)
+ addq $18,64,$18
+
+ addq $17,64,$17
+ bgt $16,3b
+ ret
+ .end xor_alpha_prefetch_3
+
+ .align 3
+ .ent xor_alpha_prefetch_4
+xor_alpha_prefetch_4:
+ .prologue 0
+ srl $16, 6, $16
+
+ ldq $31, 0($17)
+ ldq $31, 0($18)
+ ldq $31, 0($19)
+ ldq $31, 0($20)
+
+ ldq $31, 64($17)
+ ldq $31, 64($18)
+ ldq $31, 64($19)
+ ldq $31, 64($20)
+
+ ldq $31, 128($17)
+ ldq $31, 128($18)
+ ldq $31, 128($19)
+ ldq $31, 128($20)
+
+ ldq $31, 192($17)
+ ldq $31, 192($18)
+ ldq $31, 192($19)
+ ldq $31, 192($20)
+ .align 4
+4:
+ ldq $0,0($17)
+ ldq $1,0($18)
+ ldq $2,0($19)
+ ldq $3,0($20)
+
+ ldq $4,8($17)
+ ldq $5,8($18)
+ ldq $6,8($19)
+ ldq $7,8($20)
+
+ ldq $21,16($17)
+ ldq $22,16($18)
+ ldq $23,16($19)
+ ldq $24,16($20)
+
+ ldq $25,24($17)
+ xor $0,$1,$1 # 6 cycles from $1 load
+ ldq $27,24($18)
+ xor $2,$3,$3 # 6 cycles from $3 load
+
+ ldq $0,24($19)
+ xor $1,$3,$3
+ ldq $1,24($20)
+ xor $4,$5,$5 # 7 cycles from $5 load
+
+ stq $3,0($17)
+ xor $6,$7,$7
+ xor $21,$22,$22 # 7 cycles from $22 load
+ xor $5,$7,$7
+
+ stq $7,8($17)
+ xor $23,$24,$24 # 7 cycles from $24 load
+ ldq $2,32($17)
+ xor $22,$24,$24
+
+ ldq $3,32($18)
+ ldq $4,32($19)
+ ldq $5,32($20)
+ xor $25,$27,$27 # 8 cycles from $27 load
+
+ ldq $6,40($17)
+ ldq $7,40($18)
+ ldq $21,40($19)
+ ldq $22,40($20)
+
+ stq $24,16($17)
+ xor $0,$1,$1 # 9 cycles from $1 load
+ xor $2,$3,$3 # 5 cycles from $3 load
+ xor $27,$1,$1
+
+ stq $1,24($17)
+ xor $4,$5,$5 # 5 cycles from $5 load
+ ldq $23,48($17)
+ xor $3,$5,$5
+
+ ldq $24,48($18)
+ ldq $25,48($19)
+ ldq $27,48($20)
+ ldq $0,56($17)
+
+ ldq $1,56($18)
+ ldq $2,56($19)
+ ldq $3,56($20)
+ xor $6,$7,$7 # 8 cycles from $6 load
+
+ ldq $31,256($17)
+ xor $21,$22,$22 # 8 cycles from $22 load
+ ldq $31,256($18)
+ xor $7,$22,$22
+
+ ldq $31,256($19)
+ xor $23,$24,$24 # 6 cycles from $24 load
+ ldq $31,256($20)
+ xor $25,$27,$27 # 6 cycles from $27 load
+
+ stq $5,32($17)
+ xor $24,$27,$27
+ xor $0,$1,$1 # 7 cycles from $1 load
+ xor $2,$3,$3 # 6 cycles from $3 load
+
+ stq $22,40($17)
+ xor $1,$3,$3
+ stq $27,48($17)
+ subq $16,1,$16
+
+ stq $3,56($17)
+ addq $20,64,$20
+ addq $19,64,$19
+ addq $18,64,$18
+
+ addq $17,64,$17
+ bgt $16,4b
+ ret
+ .end xor_alpha_prefetch_4
+
+ .align 3
+ .ent xor_alpha_prefetch_5
+xor_alpha_prefetch_5:
+ .prologue 0
+ srl $16, 6, $16
+
+ ldq $31, 0($17)
+ ldq $31, 0($18)
+ ldq $31, 0($19)
+ ldq $31, 0($20)
+ ldq $31, 0($21)
+
+ ldq $31, 64($17)
+ ldq $31, 64($18)
+ ldq $31, 64($19)
+ ldq $31, 64($20)
+ ldq $31, 64($21)
+
+ ldq $31, 128($17)
+ ldq $31, 128($18)
+ ldq $31, 128($19)
+ ldq $31, 128($20)
+ ldq $31, 128($21)
+
+ ldq $31, 192($17)
+ ldq $31, 192($18)
+ ldq $31, 192($19)
+ ldq $31, 192($20)
+ ldq $31, 192($21)
+ .align 4
+5:
+ ldq $0,0($17)
+ ldq $1,0($18)
+ ldq $2,0($19)
+ ldq $3,0($20)
+
+ ldq $4,0($21)
+ ldq $5,8($17)
+ ldq $6,8($18)
+ ldq $7,8($19)
+
+ ldq $22,8($20)
+ ldq $23,8($21)
+ ldq $24,16($17)
+ ldq $25,16($18)
+
+ ldq $27,16($19)
+ xor $0,$1,$1 # 6 cycles from $1 load
+ ldq $28,16($20)
+ xor $2,$3,$3 # 6 cycles from $3 load
+
+ ldq $0,16($21)
+ xor $1,$3,$3
+ ldq $1,24($17)
+ xor $3,$4,$4 # 7 cycles from $4 load
+
+ stq $4,0($17)
+ xor $5,$6,$6 # 7 cycles from $6 load
+ xor $7,$22,$22 # 7 cycles from $22 load
+ xor $6,$23,$23 # 7 cycles from $23 load
+
+ ldq $2,24($18)
+ xor $22,$23,$23
+ ldq $3,24($19)
+ xor $24,$25,$25 # 8 cycles from $25 load
+
+ stq $23,8($17)
+ xor $25,$27,$27 # 8 cycles from $27 load
+ ldq $4,24($20)
+ xor $28,$0,$0 # 7 cycles from $0 load
+
+ ldq $5,24($21)
+ xor $27,$0,$0
+ ldq $6,32($17)
+ ldq $7,32($18)
+
+ stq $0,16($17)
+ xor $1,$2,$2 # 6 cycles from $2 load
+ ldq $22,32($19)
+ xor $3,$4,$4 # 4 cycles from $4 load
+
+ ldq $23,32($20)
+ xor $2,$4,$4
+ ldq $24,32($21)
+ ldq $25,40($17)
+
+ ldq $27,40($18)
+ ldq $28,40($19)
+ ldq $0,40($20)
+ xor $4,$5,$5 # 7 cycles from $5 load
+
+ stq $5,24($17)
+ xor $6,$7,$7 # 7 cycles from $7 load
+ ldq $1,40($21)
+ ldq $2,48($17)
+
+ ldq $3,48($18)
+ xor $7,$22,$22 # 7 cycles from $22 load
+ ldq $4,48($19)
+ xor $23,$24,$24 # 6 cycles from $24 load
+
+ ldq $5,48($20)
+ xor $22,$24,$24
+ ldq $6,48($21)
+ xor $25,$27,$27 # 7 cycles from $27 load
+
+ stq $24,32($17)
+ xor $27,$28,$28 # 8 cycles from $28 load
+ ldq $7,56($17)
+ xor $0,$1,$1 # 6 cycles from $1 load
+
+ ldq $22,56($18)
+ ldq $23,56($19)
+ ldq $24,56($20)
+ ldq $25,56($21)
+
+ ldq $31,256($17)
+ xor $28,$1,$1
+ ldq $31,256($18)
+ xor $2,$3,$3 # 9 cycles from $3 load
+
+ ldq $31,256($19)
+ xor $3,$4,$4 # 9 cycles from $4 load
+ ldq $31,256($20)
+ xor $5,$6,$6 # 8 cycles from $6 load
+
+ stq $1,40($17)
+ xor $4,$6,$6
+ xor $7,$22,$22 # 7 cycles from $22 load
+ xor $23,$24,$24 # 6 cycles from $24 load
+
+ stq $6,48($17)
+ xor $22,$24,$24
+ ldq $31,256($21)
+ xor $24,$25,$25 # 8 cycles from $25 load
+
+ stq $25,56($17)
+ subq $16,1,$16
+ addq $21,64,$21
+ addq $20,64,$20
+
+ addq $19,64,$19
+ addq $18,64,$18
+ addq $17,64,$17
+ bgt $16,5b
+
+ ret
+ .end xor_alpha_prefetch_5
+");
+
+static struct xor_block_template xor_block_alpha = {
+ name: "alpha",
+ do_2: xor_alpha_2,
+ do_3: xor_alpha_3,
+ do_4: xor_alpha_4,
+ do_5: xor_alpha_5,
+};
+
+static struct xor_block_template xor_block_alpha_prefetch = {
+ name: "alpha prefetch",
+ do_2: xor_alpha_prefetch_2,
+ do_3: xor_alpha_prefetch_3,
+ do_4: xor_alpha_prefetch_4,
+ do_5: xor_alpha_prefetch_5,
+};
+
+/* For grins, also test the generic routines. */
+#include <asm-generic/xor.h>
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_alpha); \
+ xor_speed(&xor_block_alpha_prefetch); \
+ } while (0)
+
+/* Force the use of alpha_prefetch if EV6, as it is significantly
+ faster in the cold cache case. */
+#define XOR_SELECT_TEMPLATE(FASTEST) \
+ (implver() == IMPLVER_EV6 ? &xor_block_alpha_prefetch : FASTEST)