summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/bitops.h10
-rw-r--r--include/asm-alpha/ide.h88
-rw-r--r--include/asm-alpha/init.h2
-rw-r--r--include/asm-alpha/io.h6
-rw-r--r--include/asm-alpha/irq.h4
-rw-r--r--include/asm-alpha/processor.h8
-rw-r--r--include/asm-alpha/semaphore.h80
-rw-r--r--include/asm-alpha/smp.h15
-rw-r--r--include/asm-alpha/softirq.h10
-rw-r--r--include/asm-alpha/spinlock.h66
-rw-r--r--include/asm-alpha/string.h1
-rw-r--r--include/asm-alpha/system.h12
12 files changed, 199 insertions, 103 deletions
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index b7c805511..adaf2fac2 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -172,7 +172,10 @@ extern inline unsigned long ffz_b(unsigned long x)
extern inline unsigned long ffz(unsigned long word)
{
-#ifdef __alpha_cix__
+#if 0 && defined(__alpha_cix__)
+ /* Swine architects -- a year after they publish v3 of the
+ handbook, in the 21264 data sheet they quietly change CIX
+ to FIX and remove the spiffy counting instructions. */
/* Whee. EV6 can calculate it directly. */
unsigned long result;
__asm__("ctlz %1,%0" : "=r"(result) : "r"(~word));
@@ -208,7 +211,10 @@ extern inline int ffs(int word)
* of bits set) of a N-bit word
*/
-#ifdef __alpha_cix__
+#if 0 && defined(__alpha_cix__)
+/* Swine architects -- a year after they publish v3 of the handbook, in
+ the 21264 data sheet they quietly change CIX to FIX and remove the
+ spiffy counting instructions. */
/* Whee. EV6 can calculate it directly. */
extern __inline__ unsigned long hweight64(unsigned long w)
{
diff --git a/include/asm-alpha/ide.h b/include/asm-alpha/ide.h
index e8de11f12..2ef5de935 100644
--- a/include/asm-alpha/ide.h
+++ b/include/asm-alpha/ide.h
@@ -13,8 +13,6 @@
#ifdef __KERNEL__
-typedef unsigned short ide_ioreg_t;
-
#ifndef MAX_HWIFS
#define MAX_HWIFS 4
#endif
@@ -45,18 +43,42 @@ static __inline__ ide_ioreg_t ide_default_io_base(int index)
}
}
-static __inline__ void ide_init_hwif_ports (ide_ioreg_t *p, ide_ioreg_t base, int *irq)
+static __inline__ void ide_init_hwif_ports(hw_regs_t *hw, ide_ioreg_t data_port, ide_ioreg_t ctrl_port, int *irq)
{
- ide_ioreg_t port = base;
- int i = 8;
+ ide_ioreg_t reg = data_port;
+ int i;
- while (i--)
- *p++ = port++;
- *p++ = base + 0x206;
+ for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
+ hw->io_ports[i] = reg;
+ reg += 1;
+ }
+ if (ctrl_port) {
+ hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
+ } else {
+ hw->io_ports[IDE_CONTROL_OFFSET] = hw->io_ports[IDE_DATA_OFFSET] + 0x206;
+ }
if (irq != NULL)
*irq = 0;
}
+/*
+ * This registers the standard ports for this architecture with the IDE
+ * driver.
+ */
+static __inline__ void ide_init_default_hwifs(void)
+{
+#ifdef __DO_I_NEED_THIS
+ hw_regs_t hw;
+ int index;
+
+ for (index = 0; index < MAX_HWIFS; index++) {
+ ide_init_hwif_ports(&hw, ide_default_io_base(index), 0, 0);
+ hw.irq = ide_default_irq(ide_default_io_base(index));
+ ide_register_hw(&hw, NULL);
+ }
+#endif /* __DO_I_NEED_THIS */
+}
+
typedef union {
unsigned all : 8; /* all of the bits together */
struct {
@@ -68,51 +90,19 @@ typedef union {
} b;
} select_t;
-static __inline__ int ide_request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
- unsigned long flags, const char *device, void *dev_id)
-{
- return request_irq(irq, handler, flags, device, dev_id);
-}
-
-static __inline__ void ide_free_irq(unsigned int irq, void *dev_id)
-{
- free_irq(irq, dev_id);
-}
-
-static __inline__ int ide_check_region (ide_ioreg_t from, unsigned int extent)
-{
- return check_region(from, extent);
-}
-
-static __inline__ void ide_request_region (ide_ioreg_t from, unsigned int extent, const char *name)
-{
- request_region(from, extent, name);
-}
-
-static __inline__ void ide_release_region (ide_ioreg_t from, unsigned int extent)
-{
- release_region(from, extent);
-}
+#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id))
+#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id))
+#define ide_check_region(from,extent) check_region((from), (extent))
+#define ide_request_region(from,extent,name) request_region((from), (extent), (name))
+#define ide_release_region(from,extent) release_region((from), (extent))
/*
* The following are not needed for the non-m68k ports
*/
-static __inline__ int ide_ack_intr (ide_ioreg_t status_port, ide_ioreg_t irq_port)
-{
- return(1);
-}
-
-static __inline__ void ide_fix_driveid(struct hd_driveid *id)
-{
-}
-
-static __inline__ void ide_release_lock (int *ide_lock)
-{
-}
-
-static __inline__ void ide_get_lock (int *ide_lock, void (*handler)(int, void *, struct pt_regs *), void *data)
-{
-}
+#define ide_ack_intr(hwif) (1)
+#define ide_fix_driveid(id) do {} while (0)
+#define ide_release_lock(lock) do {} while (0)
+#define ide_get_lock(lock, hdlr, data) do {} while (0)
#endif /* __KERNEL__ */
diff --git a/include/asm-alpha/init.h b/include/asm-alpha/init.h
index f4a08c9f2..a85501cbb 100644
--- a/include/asm-alpha/init.h
+++ b/include/asm-alpha/init.h
@@ -12,6 +12,6 @@
#define __FINIT .previous
#define __INITDATA .section .data.init,"a"
-#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
+#define __cacheline_aligned __attribute__((__aligned__(32)))
#endif
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h
index dc976cb5f..f908f7464 100644
--- a/include/asm-alpha/io.h
+++ b/include/asm-alpha/io.h
@@ -356,6 +356,12 @@ out:
#endif
#define RTC_ALWAYS_BCD 0
+/* Nothing to do */
+
+#define dma_cache_inv(_start,_size) do { } while (0)
+#define dma_cache_wback(_start,_size) do { } while (0)
+#define dma_cache_wback_inv(_start,_size) do { } while (0)
+
#endif /* __KERNEL__ */
#endif /* __ALPHA_IO_H */
diff --git a/include/asm-alpha/irq.h b/include/asm-alpha/irq.h
index fc9e8019a..ad1c917a3 100644
--- a/include/asm-alpha/irq.h
+++ b/include/asm-alpha/irq.h
@@ -92,8 +92,12 @@ static __inline__ int irq_cannonicalize(int irq)
}
extern void disable_irq(unsigned int);
+extern void disable_irq_nosync(unsigned int);
extern void enable_irq(unsigned int);
+extern void irq_enter(int cpu, int irq);
+extern void irq_exit(int cpu, int irq);
+
struct pt_regs;
extern void (*perf_irq)(unsigned long, struct pt_regs *);
diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h
index 2af7a8806..f6097cf7c 100644
--- a/include/asm-alpha/processor.h
+++ b/include/asm-alpha/processor.h
@@ -8,6 +8,12 @@
#define __ASM_ALPHA_PROCESSOR_H
/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+
+/*
* We have a 42-bit user address space: 4TB user VM...
*/
#define TASK_SIZE (0x40000000000UL)
@@ -85,7 +91,7 @@ struct thread_struct {
* is the frame pointer in schedule() and $15 is saved at offset 48 by
* entry.S:do_switch_stack).
*
- * Under heavy swap load I've seen this loose in an ugly way. So do
+ * Under heavy swap load I've seen this lose in an ugly way. So do
* some extra sanity checking on the ranges we expect these pointers
* to be in so that we can fail gracefully. This is just for ps after
* all. -- r~
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h
index 698ce1821..255888e8a 100644
--- a/include/asm-alpha/semaphore.h
+++ b/include/asm-alpha/semaphore.h
@@ -16,15 +16,58 @@ struct semaphore {
/* Careful, inline assembly knows about the position of these two. */
atomic_t count;
atomic_t waking; /* biased by -1 */
- struct wait_queue *wait;
+ wait_queue_head_t wait;
+#if WAITQUEUE_DEBUG
+ long __magic;
+#endif
};
-#define MUTEX ((struct semaphore) \
- { ATOMIC_INIT(1), ATOMIC_INIT(-1), NULL })
-#define MUTEX_LOCKED ((struct semaphore) \
- { ATOMIC_INIT(0), ATOMIC_INIT(-1), NULL })
+#if WAITQUEUE_DEBUG
+# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
+#else
+# define __SEM_DEBUG_INIT(name)
+#endif
+
+#define __SEMAPHORE_INITIALIZER(name,count) \
+ { ATOMIC_INIT(count), ATOMIC_INIT(-1), \
+ __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+ __SEM_DEBUG_INIT(name) }
+
+#define __MUTEX_INITIALIZER(name) \
+ __SEMAPHORE_INITIALIZER(name,1)
+
+#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
+#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
+
+extern inline void sema_init (struct semaphore *sem, int val)
+{
+ /*
+ * Logically,
+ * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
+ * except that gcc produces better initializing by parts yet.
+ */
+
+ atomic_set(&sem->count, val);
+ atomic_set(&sem->waking, -1);
+ init_waitqueue_head(&sem->wait);
+#if WAITQUEUE_DEBUG
+ sem->__magic = (long)&sem->__magic;
+#endif
+}
+
+static inline void init_MUTEX (struct semaphore *sem)
+{
+ sema_init(sem, 1);
+}
+
+static inline void init_MUTEX_LOCKED (struct semaphore *sem)
+{
+ sema_init(sem, 0);
+}
-#define sema_init(sem, val) atomic_set(&((sem)->count), val)
extern void __down(struct semaphore * sem);
extern int __down_interruptible(struct semaphore * sem);
@@ -57,8 +100,13 @@ extern inline void down(struct semaphore * sem)
a function that ordinarily wouldn't. Otherwise we could
have it done by the macro directly, which can be optimized
the linker. */
- register void *pv __asm__("$27") = __down_failed;
+ register void *pv __asm__("$27");
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+ pv = __down_failed;
__asm__ __volatile__ (
"/* semaphore down operation */\n"
"1: ldl_l $24,%1\n"
@@ -88,8 +136,13 @@ extern inline int down_interruptible(struct semaphore * sem)
value is in $24. */
register int ret __asm__("$24");
- register void *pv __asm__("$27") = __down_failed_interruptible;
+ register void *pv __asm__("$27");
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ pv = __down_failed_interruptible;
__asm__ __volatile__ (
"/* semaphore down interruptible operation */\n"
"1: ldl_l $24,%2\n"
@@ -144,6 +197,10 @@ extern inline int down_trylock(struct semaphore * sem)
} while (tmp == 0);
*/
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
__asm__ __volatile__(
"1: ldq_l %1,%4\n"
" lda %3,1\n"
@@ -179,8 +236,13 @@ extern inline void up(struct semaphore * sem)
it's return address in $28. The pv is loaded as usual.
The gp is clobbered (in the module case) as usual. */
- register void *pv __asm__("$27") = __up_wakeup;
+ register void *pv __asm__("$27");
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ pv = __up_wakeup;
__asm__ __volatile__ (
"/* semaphore up operation */\n"
" mb\n"
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index 04be8487d..d53142bb1 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -4,22 +4,30 @@
#ifdef __SMP__
#include <linux/tasks.h>
+#include <asm/init.h>
#include <asm/pal.h>
struct cpuinfo_alpha {
unsigned long loops_per_sec;
- unsigned int next;
unsigned long *pgd_cache;
unsigned long *pte_cache;
unsigned long pgtable_cache_sz;
unsigned long ipi_count;
-} __attribute__((aligned(32)));
+ unsigned long prof_multiplier;
+ unsigned long prof_counter;
+} __cacheline_aligned;
extern struct cpuinfo_alpha cpu_data[NR_CPUS];
#define PROC_CHANGE_PENALTY 20
-extern __volatile__ int cpu_number_map[NR_CPUS];
+/* Map from cpu id to sequential logical cpu number. This will only
+ not be idempotent when cpus failed to come on-line. */
+extern int cpu_number_map[NR_CPUS];
+
+/* The reverse map from sequential logical cpu number to cpu id. */
+extern int __cpu_logical_map[NR_CPUS];
+#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
/* HACK: Cabrio WHAMI return value is bogus if more than 8 bits used.. :-( */
@@ -35,7 +43,6 @@ static __inline__ unsigned char hard_smp_processor_id(void)
}
#define smp_processor_id() (current->processor)
-#define cpu_logical_map(cpu) (cpu)
#endif /* __SMP__ */
diff --git a/include/asm-alpha/softirq.h b/include/asm-alpha/softirq.h
index 41ccc29c9..cb89c5328 100644
--- a/include/asm-alpha/softirq.h
+++ b/include/asm-alpha/softirq.h
@@ -5,6 +5,16 @@
#include <asm/atomic.h>
#include <asm/hardirq.h>
+/*
+ * This works but is wrong - on SMP it should disable only on the
+ * current CPU and shouldn't synchronize like the heavy global
+ * disable does. Oh, well.
+ *
+ * See the x86 version for an example.
+ */
+#define local_bh_enable() start_bh_atomic()
+#define local_bh_disable() end_bh_atomic()
+
extern unsigned int local_bh_count[NR_CPUS];
#define get_active_bhs() (bh_mask & bh_active)
diff --git a/include/asm-alpha/spinlock.h b/include/asm-alpha/spinlock.h
index b5fe62ddf..bbc8de52b 100644
--- a/include/asm-alpha/spinlock.h
+++ b/include/asm-alpha/spinlock.h
@@ -3,6 +3,35 @@
#include <asm/system.h>
+/*
+ * These are the generic versions of the spinlocks
+ * and read-write locks.. We should actually do a
+ * <linux/spinlock.h> with all of this. Oh, well.
+ */
+#define spin_lock_irqsave(lock, flags) do { local_irq_save(flags); spin_lock(lock); } while (0)
+#define spin_lock_irq(lock) do { local_irq_disable(); spin_lock(lock); } while (0)
+#define spin_lock_bh(lock) do { local_bh_disable(); spin_lock(lock); } while (0)
+
+#define read_lock_irqsave(lock, flags) do { local_irq_save(flags); read_lock(lock); } while (0)
+#define read_lock_irq(lock) do { local_irq_disable(); read_lock(lock); } while (0)
+#define read_lock_bh(lock) do { local_bh_disable(); read_lock(lock); } while (0)
+
+#define write_lock_irqsave(lock, flags) do { local_irq_save(flags); write_lock(lock); } while (0)
+#define write_lock_irq(lock) do { local_irq_disable(); write_lock(lock); } while (0)
+#define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0)
+
+#define spin_unlock_irqrestore(lock, flags) do { spin_unlock(lock); local_irq_restore(flags); } while (0)
+#define spin_unlock_irq(lock) do { spin_unlock(lock); local_irq_enable(); } while (0)
+#define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0)
+
+#define read_unlock_irqrestore(lock, flags) do { read_unlock(lock); local_irq_restore(flags); } while (0)
+#define read_unlock_irq(lock) do { read_unlock(lock); local_irq_enable(); } while (0)
+#define read_unlock_bh(lock) do { read_unlock(lock); local_bh_enable(); } while (0)
+
+#define write_unlock_irqrestore(lock, flags) do { write_unlock(lock); local_irq_restore(flags); } while (0)
+#define write_unlock_irq(lock) do { write_unlock(lock); local_irq_enable(); } while (0)
+#define write_unlock_bh(lock) do { write_unlock(lock); local_bh_enable(); } while (0)
+
#ifndef __SMP__
/*
@@ -23,11 +52,6 @@
#define spin_trylock(lock) ((void) 0)
#define spin_unlock_wait(lock) ((void) 0)
#define spin_unlock(lock) ((void) 0)
-#define spin_lock_irq(lock) cli()
-#define spin_unlock_irq(lock) sti()
-
-#define spin_lock_irqsave(lock, flags) save_and_cli(flags)
-#define spin_unlock_irqrestore(lock, flags) restore_flags(flags)
/*
* Read-write spinlocks, allowing multiple readers
@@ -53,15 +77,6 @@
#define read_unlock(lock) ((void) 0)
#define write_lock(lock) ((void) 0)
#define write_unlock(lock) ((void) 0)
-#define read_lock_irq(lock) cli()
-#define read_unlock_irq(lock) sti()
-#define write_lock_irq(lock) cli()
-#define write_unlock_irq(lock) sti()
-
-#define read_lock_irqsave(lock, flags) save_and_cli(flags)
-#define read_unlock_irqrestore(lock, flags) restore_flags(flags)
-#define write_lock_irqsave(lock, flags) save_and_cli(flags)
-#define write_unlock_irqrestore(lock, flags) restore_flags(flags)
#else /* __SMP__ */
@@ -150,15 +165,6 @@ static inline void spin_lock(spinlock_t * lock)
#define spin_lock_own(LOCK, LOCATION) ((void)0)
#endif /* DEBUG_SPINLOCK */
-#define spin_lock_irq(lock) \
- (__cli(), spin_lock(lock))
-#define spin_unlock_irq(lock) \
- (spin_unlock(lock), __sti())
-#define spin_lock_irqsave(lock, flags) \
- (__save_and_cli(flags), spin_lock(lock))
-#define spin_unlock_irqrestore(lock, flags) \
- (spin_unlock(lock), __restore_flags(flags))
-
/***********************************************************/
typedef struct { volatile int write_lock:1, read_counter:31; } rwlock_t;
@@ -233,19 +239,5 @@ static inline void read_unlock(rwlock_t * lock)
: "m" (__dummy_lock(lock)));
}
-#define read_lock_irq(lock) (__cli(), read_lock(lock))
-#define read_unlock_irq(lock) (read_unlock(lock), __sti())
-#define write_lock_irq(lock) (__cli(), write_lock(lock))
-#define write_unlock_irq(lock) (write_unlock(lock), __sti())
-
-#define read_lock_irqsave(lock, flags) \
- (__save_and_cli(flags), read_lock(lock))
-#define read_unlock_irqrestore(lock, flags) \
- (read_unlock(lock), __restore_flags(flags))
-#define write_lock_irqsave(lock, flags) \
- (__save_and_cli(flags), write_lock(lock))
-#define write_unlock_irqrestore(lock, flags) \
- (write_unlock(lock), __restore_flags(flags))
-
#endif /* SMP */
#endif /* _ALPHA_SPINLOCK_H */
diff --git a/include/asm-alpha/string.h b/include/asm-alpha/string.h
index 11495a0b6..f53d2ff74 100644
--- a/include/asm-alpha/string.h
+++ b/include/asm-alpha/string.h
@@ -11,6 +11,7 @@
*/
#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMMOVE
/* For backward compatibility with modules. Unused otherwise. */
extern void * __memcpy(void *, const void *, size_t);
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index b37be73b2..2be0ced69 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -147,6 +147,13 @@ enum implver_enum {
#endif
#endif
+enum amask_enum {
+ AMASK_BWX = (1UL << 0),
+ AMASK_FIX = (1UL << 1),
+ AMASK_MAX = (1UL << 8),
+ AMASK_PRECISE_TRAP = (1UL << 9),
+};
+
#define amask(mask) \
({ unsigned long __amask, __input = (mask); \
__asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \
@@ -220,6 +227,11 @@ wrperfmon(unsigned long perf_fun, unsigned long arg)
#define __save_and_cli(flags) ((flags) = swpipl(7))
#define __restore_flags(flags) setipl(flags)
+#define local_irq_save(flags) __save_and_cli(flags)
+#define local_irq_restore(flags) __restore_flags(flags)
+#define local_irq_disable() __cli()
+#define local_irq_enable() __sti()
+
#ifdef __SMP__
extern int global_irq_holder;