summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/atomic.h2
-rw-r--r--include/asm-alpha/core_apecs.h4
-rw-r--r--include/asm-alpha/core_cia.h10
-rw-r--r--include/asm-alpha/core_lca.h8
-rw-r--r--include/asm-alpha/core_mcpcia.h41
-rw-r--r--include/asm-alpha/core_polaris.h241
-rw-r--r--include/asm-alpha/core_pyxis.h4
-rw-r--r--include/asm-alpha/core_t2.h2
-rw-r--r--include/asm-alpha/core_tsunami.h4
-rw-r--r--include/asm-alpha/delay.h46
-rw-r--r--include/asm-alpha/dma.h10
-rw-r--r--include/asm-alpha/init.h2
-rw-r--r--include/asm-alpha/io.h30
-rw-r--r--include/asm-alpha/irq.h5
-rw-r--r--include/asm-alpha/jensen.h3
-rw-r--r--include/asm-alpha/keyboard.h9
-rw-r--r--include/asm-alpha/machvec.h7
-rw-r--r--include/asm-alpha/pci.h2
-rw-r--r--include/asm-alpha/pgtable.h3
-rw-r--r--include/asm-alpha/processor.h3
-rw-r--r--include/asm-alpha/semaphore.h187
-rw-r--r--include/asm-alpha/smp.h6
-rw-r--r--include/asm-alpha/softirq.h7
-rw-r--r--include/asm-alpha/spinlock.h10
-rw-r--r--include/asm-alpha/system.h29
-rw-r--r--include/asm-alpha/termbits.h11
-rw-r--r--include/asm-alpha/termios.h2
-rw-r--r--include/asm-alpha/timex.h19
-rw-r--r--include/asm-alpha/unistd.h3
29 files changed, 615 insertions, 95 deletions
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index 16366d055..2dccf3521 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -15,7 +15,7 @@ typedef struct { volatile int counter; } atomic_t;
typedef struct { int counter; } atomic_t;
#endif
-#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
diff --git a/include/asm-alpha/core_apecs.h b/include/asm-alpha/core_apecs.h
index a8f0bd6ef..3346346f9 100644
--- a/include/asm-alpha/core_apecs.h
+++ b/include/asm-alpha/core_apecs.h
@@ -458,7 +458,7 @@ __EXTERN_INLINE unsigned int apecs_inb(unsigned long addr)
__EXTERN_INLINE void apecs_outb(unsigned char b, unsigned long addr)
{
- unsigned int w;
+ unsigned long w;
w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + APECS_IO + 0x00) = w;
@@ -473,7 +473,7 @@ __EXTERN_INLINE unsigned int apecs_inw(unsigned long addr)
__EXTERN_INLINE void apecs_outw(unsigned short b, unsigned long addr)
{
- unsigned int w;
+ unsigned long w;
w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + APECS_IO + 0x08) = w;
diff --git a/include/asm-alpha/core_cia.h b/include/asm-alpha/core_cia.h
index 32fd81f2e..3407d0159 100644
--- a/include/asm-alpha/core_cia.h
+++ b/include/asm-alpha/core_cia.h
@@ -326,9 +326,9 @@ __EXTERN_INLINE unsigned int cia_inb(unsigned long addr)
__EXTERN_INLINE void cia_outb(unsigned char b, unsigned long addr)
{
- unsigned int w = __kernel_insbl(b, addr & 3);
+ unsigned long w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + CIA_IO + 0x00) = w;
- wmb();
+ mb();
}
__EXTERN_INLINE unsigned int cia_inw(unsigned long addr)
@@ -340,9 +340,9 @@ __EXTERN_INLINE unsigned int cia_inw(unsigned long addr)
__EXTERN_INLINE void cia_outw(unsigned short b, unsigned long addr)
{
- unsigned int w = __kernel_inswl(b, addr & 3);
+ unsigned long w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + CIA_IO + 0x08) = w;
- wmb();
+ mb();
}
__EXTERN_INLINE unsigned int cia_inl(unsigned long addr)
@@ -353,7 +353,7 @@ __EXTERN_INLINE unsigned int cia_inl(unsigned long addr)
__EXTERN_INLINE void cia_outl(unsigned int b, unsigned long addr)
{
*(vuip) ((addr << 5) + CIA_IO + 0x18) = b;
- wmb();
+ mb();
}
diff --git a/include/asm-alpha/core_lca.h b/include/asm-alpha/core_lca.h
index bce449fa4..63f258924 100644
--- a/include/asm-alpha/core_lca.h
+++ b/include/asm-alpha/core_lca.h
@@ -262,7 +262,7 @@ __EXTERN_INLINE unsigned int lca_inb(unsigned long addr)
__EXTERN_INLINE void lca_outb(unsigned char b, unsigned long addr)
{
- unsigned int w;
+ unsigned long w;
w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + LCA_IO + 0x00) = w;
@@ -277,7 +277,7 @@ __EXTERN_INLINE unsigned int lca_inw(unsigned long addr)
__EXTERN_INLINE void lca_outw(unsigned short b, unsigned long addr)
{
- unsigned int w;
+ unsigned long w;
w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + LCA_IO + 0x08) = w;
@@ -340,7 +340,7 @@ __EXTERN_INLINE unsigned long lca_readq(unsigned long addr)
__EXTERN_INLINE void lca_writeb(unsigned char b, unsigned long addr)
{
unsigned long msb;
- unsigned int w;
+ unsigned long w;
if (addr >= (1UL << 24)) {
msb = addr & 0xf8000000;
@@ -354,7 +354,7 @@ __EXTERN_INLINE void lca_writeb(unsigned char b, unsigned long addr)
__EXTERN_INLINE void lca_writew(unsigned short b, unsigned long addr)
{
unsigned long msb;
- unsigned int w;
+ unsigned long w;
if (addr >= (1UL << 24)) {
msb = addr & 0xf8000000;
diff --git a/include/asm-alpha/core_mcpcia.h b/include/asm-alpha/core_mcpcia.h
index 33e67b462..84eab12d8 100644
--- a/include/asm-alpha/core_mcpcia.h
+++ b/include/asm-alpha/core_mcpcia.h
@@ -71,9 +71,7 @@
*
*/
-#define MCPCIA_MEM_R1_MASK 0x1fffffff /* SPARSE Mem region 1 mask is 29 bits */
-#define MCPCIA_MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
-#define MCPCIA_MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
+#define MCPCIA_MEM_MASK 0x07ffffff /* SPARSE Mem region mask is 27 bits */
#define MCPCIA_DMA_WIN_BASE_DEFAULT (2*1024*1024*1024U)
#define MCPCIA_DMA_WIN_SIZE_DEFAULT (2*1024*1024*1024U)
@@ -264,7 +262,7 @@ __EXTERN_INLINE void mcpcia_outb(unsigned char b, unsigned long in_addr)
{
unsigned long addr = in_addr & 0xffffffffUL;
unsigned long hose = (in_addr >> 32) & 3;
- unsigned int w;
+ unsigned long w;
w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + MCPCIA_IO(hose) + 0x00) = w;
@@ -283,7 +281,7 @@ __EXTERN_INLINE void mcpcia_outw(unsigned short b, unsigned long in_addr)
{
unsigned long addr = in_addr & 0xffffffffUL;
unsigned long hose = (in_addr >> 32) & 3;
- unsigned int w;
+ unsigned long w;
w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + MCPCIA_IO(hose) + 0x08) = w;
@@ -386,23 +384,10 @@ __EXTERN_INLINE unsigned long mcpcia_srm_base(unsigned long addr)
unsigned long hose = (addr >> 32) & 3;
if (addr >= alpha_mv.sm_base_r1
- && addr <= alpha_mv.sm_base_r1 + MCPCIA_MEM_R1_MASK) {
- mask = MCPCIA_MEM_R1_MASK;
+ && addr <= alpha_mv.sm_base_r1 + MCPCIA_MEM_MASK) {
+ mask = MCPCIA_MEM_MASK;
base = MCPCIA_SPARSE(hose);
}
-#if 0
- /* FIXME FIXME FIXME: SPARSE_MEM_R2 and R3 are not defined? */
- else if (addr >= alpha_mv.sm_base_r2
- && addr <= alpha_mv.sm_base_r2 + MCPCIA_MEM_R2_MASK) {
- mask = MCPCIA_MEM_R2_MASK;
- base = MCPCIA_SPARSE_MEM_R2;
- }
- else if (addr >= alpha_mv.sm_base_r3
- && addr <= alpha_mv.sm_base_r3 + MCPCIA_MEM_R3_MASK) {
- mask = MCPCIA_MEM_R3_MASK;
- base = MCPCIA_SPARSE_MEM_R3;
- }
-#endif
else
{
#if 0
@@ -462,8 +447,8 @@ __EXTERN_INLINE unsigned long mcpcia_readb(unsigned long in_addr)
unsigned long hose = (in_addr >> 32) & 3;
unsigned long result, msb, work, temp;
- msb = addr & 0xE0000000UL;
- temp = addr & MCPCIA_MEM_R1_MASK;
+ msb = addr & ~MCPCIA_MEM_MASK;
+ temp = addr & MCPCIA_MEM_MASK;
set_hae(msb);
work = ((temp << 5) + MCPCIA_SPARSE(hose) + 0x00);
@@ -477,8 +462,8 @@ __EXTERN_INLINE unsigned long mcpcia_readw(unsigned long in_addr)
unsigned long hose = (in_addr >> 32) & 3;
unsigned long result, msb, work, temp;
- msb = addr & 0xE0000000UL;
- temp = addr & MCPCIA_MEM_R1_MASK ;
+ msb = addr & ~MCPCIA_MEM_MASK;
+ temp = addr & MCPCIA_MEM_MASK ;
set_hae(msb);
work = ((temp << 5) + MCPCIA_SPARSE(hose) + 0x08);
@@ -492,8 +477,8 @@ __EXTERN_INLINE void mcpcia_writeb(unsigned char b, unsigned long in_addr)
unsigned long hose = (in_addr >> 32) & 3;
unsigned long msb;
- msb = addr & 0xE0000000;
- addr &= MCPCIA_MEM_R1_MASK;
+ msb = addr & ~MCPCIA_MEM_MASK;
+ addr &= MCPCIA_MEM_MASK;
set_hae(msb);
*(vuip) ((addr << 5) + MCPCIA_SPARSE(hose) + 0x00) = b * 0x01010101;
@@ -505,8 +490,8 @@ __EXTERN_INLINE void mcpcia_writew(unsigned short b, unsigned long in_addr)
unsigned long hose = (in_addr >> 32) & 3;
unsigned long msb ;
- msb = addr & 0xE0000000 ;
- addr &= MCPCIA_MEM_R1_MASK ;
+ msb = addr & ~MCPCIA_MEM_MASK ;
+ addr &= MCPCIA_MEM_MASK ;
set_hae(msb);
*(vuip) ((addr << 5) + MCPCIA_SPARSE(hose) + 0x08) = b * 0x00010001;
diff --git a/include/asm-alpha/core_polaris.h b/include/asm-alpha/core_polaris.h
new file mode 100644
index 000000000..8caf50622
--- /dev/null
+++ b/include/asm-alpha/core_polaris.h
@@ -0,0 +1,241 @@
+#ifndef __ALPHA_POLARIS__H__
+#define __ALPHA_POLARIS__H__
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <asm/compiler.h>
+
+/*
+ * POLARIS is the internal name for a core logic chipset which provides
+ * memory controller and PCI access for the 21164PC chip based systems.
+ *
+ * This file is based on:
+ *
+ * Polaris System Controller
+ * Device Functional Specification
+ * 22-Jan-98
+ * Rev. 4.2
+ *
+ */
+
+/* Polaris memory regions */
+#define POLARIS_SPARSE_MEM_BASE (IDENT_ADDR + 0xf800000000)
+#define POLARIS_DENSE_MEM_BASE (IDENT_ADDR + 0xf900000000)
+#define POLARIS_SPARSE_IO_BASE (IDENT_ADDR + 0xf980000000)
+#define POLARIS_SPARSE_CONFIG_BASE (IDENT_ADDR + 0xf9c0000000)
+#define POLARIS_IACK_BASE (IDENT_ADDR + 0xf9f8000000)
+#define POLARIS_DENSE_IO_BASE (IDENT_ADDR + 0xf9fc000000)
+#define POLARIS_DENSE_CONFIG_BASE (IDENT_ADDR + 0xf9fe000000)
+
+#define POLARIS_IACK_SC POLARIS_IACK_BASE
+
+/* The Polaris command/status registers live in PCI Config space for
+ * bus 0/device 0. As such, they may be bytes, words, or doublewords.
+ */
+#define POLARIS_W_VENID (POLARIS_DENSE_CONFIG_BASE)
+#define POLARIS_W_DEVID (POLARIS_DENSE_CONFIG_BASE+2)
+#define POLARIS_W_CMD (POLARIS_DENSE_CONFIG_BASE+4)
+#define POLARIS_W_STATUS (POLARIS_DENSE_CONFIG_BASE+6)
+
+/* No HAE address. Polaris has no concept of an HAE, since it
+ * supports transfers of all sizes in dense space.
+ */
+
+#define POLARIS_DMA_WIN_BASE_DEFAULT 0x80000000 /* fixed, 2G @ 2G */
+#define POLARIS_DMA_WIN_SIZE_DEFAULT 0x80000000 /* fixed, 2G @ 2G */
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM_SETUP)
+#define POLARIS_DMA_WIN_BASE alpha_mv.dma_win_base
+#define POLARIS_DMA_WIN_SIZE alpha_mv.dma_win_size
+#else
+#define POLARIS_DMA_WIN_BASE POLARIS_DMA_WIN_BASE_DEFAULT
+#define POLARIS_DMA_WIN_SIZE POLARIS_DMA_WIN_SIZE_DEFAULT
+#endif
+
+/*
+ * Data structure for handling POLARIS machine checks:
+ */
+struct el_POLARIS_sysdata_mcheck {
+ u_long psc_status;
+ u_long psc_pcictl0;
+ u_long psc_pcictl1;
+ u_long psc_pcictl2;
+};
+
+ #ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
+__EXTERN_INLINE unsigned long polaris_virt_to_bus(void * address)
+{
+ return virt_to_phys(address) + POLARIS_DMA_WIN_BASE;
+}
+
+__EXTERN_INLINE void * polaris_bus_to_virt(unsigned long address)
+{
+ return phys_to_virt(address - POLARIS_DMA_WIN_BASE);
+}
+
+/*
+ * I/O functions:
+ *
+ * POLARIS, the PCI/memory support chipset for the PCA56 (21164PC)
+ * processors, can use either a sparse address mapping scheme, or the
+ * so-called byte-word PCI address space, to get at PCI memory and I/O.
+ *
+ * However, we will support only the BWX form.
+ */
+
+#define vucp volatile unsigned char *
+#define vusp volatile unsigned short *
+#define vuip volatile unsigned int *
+#define vulp volatile unsigned long *
+
+__EXTERN_INLINE unsigned int polaris_inb(unsigned long addr)
+{
+ return __kernel_ldbu(*(vucp)(addr + POLARIS_DENSE_IO_BASE));
+}
+
+__EXTERN_INLINE void polaris_outb(unsigned char b, unsigned long addr)
+{
+ __kernel_stb(b, *(vucp)(addr + POLARIS_DENSE_IO_BASE));
+ mb();
+}
+
+__EXTERN_INLINE unsigned int polaris_inw(unsigned long addr)
+{
+ return __kernel_ldwu(*(vusp)(addr + POLARIS_DENSE_IO_BASE));
+}
+
+__EXTERN_INLINE void polaris_outw(unsigned short b, unsigned long addr)
+{
+ __kernel_stw(b, *(vusp)(addr + POLARIS_DENSE_IO_BASE));
+ mb();
+}
+
+__EXTERN_INLINE unsigned int polaris_inl(unsigned long addr)
+{
+ return *(vuip)(addr + POLARIS_DENSE_IO_BASE);
+}
+
+__EXTERN_INLINE void polaris_outl(unsigned int b, unsigned long addr)
+{
+ *(vuip)(addr + POLARIS_DENSE_IO_BASE) = b;
+ mb();
+}
+
+/*
+ * Memory functions. Polaris allows all accesses (byte/word
+ * as well as long/quad) to be done through dense space.
+ *
+ * We will only support DENSE access via BWX insns.
+ */
+
+__EXTERN_INLINE unsigned long polaris_readb(unsigned long addr)
+{
+ return __kernel_ldbu(*(vucp)(addr + POLARIS_DENSE_MEM_BASE));
+}
+
+__EXTERN_INLINE unsigned long polaris_readw(unsigned long addr)
+{
+ return __kernel_ldwu(*(vusp)(addr + POLARIS_DENSE_MEM_BASE));
+}
+
+__EXTERN_INLINE unsigned long polaris_readl(unsigned long addr)
+{
+ return *(vuip)(addr + POLARIS_DENSE_MEM_BASE);
+}
+
+__EXTERN_INLINE unsigned long polaris_readq(unsigned long addr)
+{
+ return *(vulp)(addr + POLARIS_DENSE_MEM_BASE);
+}
+
+__EXTERN_INLINE void polaris_writeb(unsigned char b, unsigned long addr)
+{
+ __kernel_stb(b, *(vucp)(addr + POLARIS_DENSE_MEM_BASE));
+ mb();
+}
+
+__EXTERN_INLINE void polaris_writew(unsigned short b, unsigned long addr)
+{
+ __kernel_stw(b, *(vusp)(addr + POLARIS_DENSE_MEM_BASE));
+ mb();
+}
+
+__EXTERN_INLINE void polaris_writel(unsigned int b, unsigned long addr)
+{
+ *(vuip)(addr + POLARIS_DENSE_MEM_BASE) = b;
+ mb();
+}
+
+__EXTERN_INLINE void polaris_writeq(unsigned long b, unsigned long addr)
+{
+ *(vulp)(addr + POLARIS_DENSE_MEM_BASE) = b;
+ mb();
+}
+
+/* Find the DENSE memory area for a given bus address. */
+
+__EXTERN_INLINE unsigned long polaris_dense_mem(unsigned long addr)
+{
+ return POLARIS_DENSE_MEM_BASE;
+}
+
+#undef vucp
+#undef vusp
+#undef vuip
+#undef vulp
+
+#ifdef __WANT_IO_DEF
+
+#define virt_to_bus polaris_virt_to_bus
+#define bus_to_virt polaris_bus_to_virt
+
+#define __inb polaris_inb
+#define __inw polaris_inw
+#define __inl polaris_inl
+#define __outb polaris_outb
+#define __outw polaris_outw
+#define __outl polaris_outl
+#define __readb polaris_readb
+#define __readw polaris_readw
+#define __writeb polaris_writeb
+#define __writew polaris_writew
+#define __readl polaris_readl
+#define __readq polaris_readq
+#define __writel polaris_writel
+#define __writeq polaris_writeq
+#define dense_mem polaris_dense_mem
+
+#define inb(port) __inb((port))
+#define inw(port) __inw((port))
+#define inl(port) __inl((port))
+
+#define outb(v, port) __outb((v),(port))
+#define outw(v, port) __outw((v),(port))
+#define outl(v, port) __outl((v),(port))
+
+#define readb(a) __readb((unsigned long)(a))
+#define readw(a) __readw((unsigned long)(a))
+#define readl(a) __readl((unsigned long)(a))
+#define readq(a) __readq((unsigned long)(a))
+
+#define writeb(v,a) __writeb((v),(unsigned long)(a))
+#define writew(v,a) __writew((v),(unsigned long)(a))
+#define writel(v,a) __writel((v),(unsigned long)(a))
+#define writeq(v,a) __writeq((v),(unsigned long)(a))
+
+#endif /* __WANT_IO_DEF */
+
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_POLARIS__H__ */
diff --git a/include/asm-alpha/core_pyxis.h b/include/asm-alpha/core_pyxis.h
index da80e501e..213adf4ba 100644
--- a/include/asm-alpha/core_pyxis.h
+++ b/include/asm-alpha/core_pyxis.h
@@ -326,7 +326,7 @@ __EXTERN_INLINE unsigned int pyxis_inb(unsigned long addr)
__EXTERN_INLINE void pyxis_outb(unsigned char b, unsigned long addr)
{
- unsigned int w;
+ unsigned long w;
w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + PYXIS_IO + 0x00) = w;
@@ -341,7 +341,7 @@ __EXTERN_INLINE unsigned int pyxis_inw(unsigned long addr)
__EXTERN_INLINE void pyxis_outw(unsigned short b, unsigned long addr)
{
- unsigned int w;
+ unsigned long w;
w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + PYXIS_IO + 0x08) = w;
diff --git a/include/asm-alpha/core_t2.h b/include/asm-alpha/core_t2.h
index 1f0984b38..fdb0f82fe 100644
--- a/include/asm-alpha/core_t2.h
+++ b/include/asm-alpha/core_t2.h
@@ -378,7 +378,7 @@ __EXTERN_INLINE unsigned int t2_inw(unsigned long addr)
__EXTERN_INLINE void t2_outw(unsigned short b, unsigned long addr)
{
- unsigned int w;
+ unsigned long w;
w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + T2_IO + 0x08) = w;
diff --git a/include/asm-alpha/core_tsunami.h b/include/asm-alpha/core_tsunami.h
index ee62c175f..68d30cf86 100644
--- a/include/asm-alpha/core_tsunami.h
+++ b/include/asm-alpha/core_tsunami.h
@@ -16,8 +16,8 @@
*
*/
-#define TSUNAMI_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
-#define TSUNAMI_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
+#define TSUNAMI_DMA_WIN_BASE_DEFAULT (1024*1024*1024U)
+#define TSUNAMI_DMA_WIN_SIZE_DEFAULT (1024*1024*1024U)
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM_SETUP)
#define TSUNAMI_DMA_WIN_BASE alpha_mv.dma_win_base
diff --git a/include/asm-alpha/delay.h b/include/asm-alpha/delay.h
index 87e69f657..a55752abb 100644
--- a/include/asm-alpha/delay.h
+++ b/include/asm-alpha/delay.h
@@ -1,7 +1,7 @@
#ifndef __ALPHA_DELAY_H
#define __ALPHA_DELAY_H
-extern unsigned long loops_per_sec;
+#include <asm/smp.h>
/*
* Copyright (C) 1993 Linus Torvalds
@@ -9,7 +9,8 @@ extern unsigned long loops_per_sec;
* Delay routines, using a pre-computed "loops_per_second" value.
*/
-extern __inline__ void __delay(unsigned long loops)
+extern __inline__ void
+__delay(unsigned long loops)
{
__asm__ __volatile__(".align 3\n"
"1:\tsubq %0,1,%0\n\t"
@@ -24,15 +25,46 @@ extern __inline__ void __delay(unsigned long loops)
* lookup table, really, as the multiplications take much too long with
* short delays. This is a "reasonable" implementation, though (and the
* first constant multiplications gets optimized away if the delay is
- * a constant)
+ * a constant).
+ *
+ * Optimize small constants further by exposing the second multiplication
+ * to the compiler. In addition, mulq is 2 cycles faster than umulh.
*/
-extern __inline__ void udelay(unsigned long usecs)
+
+extern __inline__ void
+__udelay(unsigned long usecs, unsigned long lps)
{
+ /* compute (usecs * 2**64 / 10**6) * loops_per_sec / 2**64 */
+
usecs *= 0x000010c6f7a0b5edUL; /* 2**64 / 1000000 */
- __asm__("umulh %1,%2,%0"
- :"=r" (usecs)
- :"r" (usecs),"r" (loops_per_sec));
+ __asm__("umulh %1,%2,%0" :"=r" (usecs) :"r" (usecs),"r" (lps));
+ __delay(usecs);
+}
+
+extern __inline__ void
+__small_const_udelay(unsigned long usecs, unsigned long lps)
+{
+ /* compute (usecs * 2**32 / 10**6) * loops_per_sec / 2**32 */
+
+ usecs *= 0x10c6; /* 2^32 / 10^6 */
+ usecs *= lps;
+ usecs >>= 32;
__delay(usecs);
}
+#ifdef __SMP__
+#define udelay(usecs) \
+ (__builtin_constant_p(usecs) && usecs < 0x100000000UL \
+ ? __small_const_udelay(usecs, \
+ cpu_data[smp_processor_id()].loops_per_sec) \
+ : __udelay(usecs, \
+ cpu_data[smp_processor_id()].loops_per_sec))
+#else
+#define udelay(usecs) \
+ (__builtin_constant_p(usecs) && usecs < 0x100000000UL \
+ ? __small_const_udelay(usecs, loops_per_sec) \
+ : __udelay(usecs, loops_per_sec))
+#endif
+
+
#endif /* defined(__ALPHA_DELAY_H) */
diff --git a/include/asm-alpha/dma.h b/include/asm-alpha/dma.h
index f16bdc6de..d415b2a7e 100644
--- a/include/asm-alpha/dma.h
+++ b/include/asm-alpha/dma.h
@@ -173,6 +173,8 @@
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
+#define DMA_AUTOINIT 0x10
+
extern spinlock_t dma_spin_lock;
static __inline__ unsigned long claim_dma_lock(void)
@@ -342,4 +344,12 @@ extern void free_dma(unsigned int dmanr); /* release it again */
#define KERNEL_HAVE_CHECK_DMA
extern int check_dma(unsigned int dmanr);
+/* From PCI */
+
+#ifdef CONFIG_PCI_QUIRKS
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
#endif /* _ASM_DMA_H */
diff --git a/include/asm-alpha/init.h b/include/asm-alpha/init.h
index 7d769dfcd..f4a08c9f2 100644
--- a/include/asm-alpha/init.h
+++ b/include/asm-alpha/init.h
@@ -12,4 +12,6 @@
#define __FINIT .previous
#define __INITDATA .section .data.init,"a"
+#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
+
#endif
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h
index 51d2af596..3074ba8fb 100644
--- a/include/asm-alpha/io.h
+++ b/include/asm-alpha/io.h
@@ -3,7 +3,6 @@
#include <linux/config.h>
#include <asm/system.h>
-#include <asm/machvec.h>
/* We don't use IO slowdowns on the Alpha, but.. */
#define __SLOW_DOWN_IO do { } while (0)
@@ -19,6 +18,7 @@
#endif
#ifdef __KERNEL__
+#include <asm/machvec.h>
/*
* We try to avoid hae updates (thus the cache), but when we
@@ -78,6 +78,7 @@ extern void _sethae (unsigned long addr); /* cached version */
* There are different chipsets to interface the Alpha CPUs to the world.
*/
+#ifdef __KERNEL__
#ifdef CONFIG_ALPHA_GENERIC
/* In a generic kernel, we always go through the machine vector. */
@@ -140,6 +141,8 @@ extern void _sethae (unsigned long addr); /* cached version */
# include <asm/core_tsunami.h>
#elif defined(CONFIG_ALPHA_JENSEN)
# include <asm/jensen.h>
+#elif defined(CONFIG_ALPHA_RX164)
+# include <asm/core_polaris.h>
#else
#error "What system is this?"
#endif
@@ -147,6 +150,7 @@ extern void _sethae (unsigned long addr); /* cached version */
#undef __WANT_IO_DEF
#endif /* GENERIC */
+#endif /* __KERNEL__ */
/*
* The convention used for inb/outb etc. is that names starting with
@@ -172,6 +176,7 @@ extern void _writew(unsigned short b, unsigned long addr);
extern void _writel(unsigned int b, unsigned long addr);
extern void _writeq(unsigned long b, unsigned long addr);
+#ifdef __KERNEL__
/*
* The platform header files may define some of these macros to use
* the inlined versions where appropriate. These macros may also be
@@ -216,6 +221,27 @@ extern void _writeq(unsigned long b, unsigned long addr);
# define outl_p outl
#endif
+#else
+
+/* Userspace declarations. */
+
+extern unsigned int inb (unsigned long port);
+extern unsigned int inw (unsigned long port);
+extern unsigned int inl (unsigned long port);
+extern void outb (unsigned char b,unsigned long port);
+extern void outw (unsigned short w,unsigned long port);
+extern void outl (unsigned int l,unsigned long port);
+extern unsigned long readb(unsigned long addr);
+extern unsigned long readw(unsigned long addr);
+extern unsigned long readl(unsigned long addr);
+extern void writeb(unsigned char b, unsigned long addr);
+extern void writew(unsigned short b, unsigned long addr);
+extern void writel(unsigned int b, unsigned long addr);
+
+#endif /* __KERNEL__ */
+
+#ifdef __KERNEL__
+
/*
* The "address" in IO memory space is not clearly either an integer or a
* pointer. We will accept both, thus the casts.
@@ -257,8 +283,6 @@ static inline void iounmap(void *addr)
# define writeq(v,a) _writeq((v),(unsigned long)(a))
#endif
-#ifdef __KERNEL__
-
/*
* String version of IO memory access ops:
*/
diff --git a/include/asm-alpha/irq.h b/include/asm-alpha/irq.h
index 7f8853c55..fc9e8019a 100644
--- a/include/asm-alpha/irq.h
+++ b/include/asm-alpha/irq.h
@@ -34,6 +34,7 @@
defined(CONFIG_ALPHA_XLT) || \
defined(CONFIG_ALPHA_MIATA) || \
defined(CONFIG_ALPHA_RUFFIAN) || \
+ defined(CONFIG_ALPHA_RX164) || \
defined(CONFIG_ALPHA_NORITAKE)
# define NR_IRQS 48
@@ -93,4 +94,8 @@ static __inline__ int irq_cannonicalize(int irq)
extern void disable_irq(unsigned int);
extern void enable_irq(unsigned int);
+struct pt_regs;
+extern void (*perf_irq)(unsigned long, struct pt_regs *);
+
+
#endif /* _ALPHA_IRQ_H */
diff --git a/include/asm-alpha/jensen.h b/include/asm-alpha/jensen.h
index 75f99ea33..874511e94 100644
--- a/include/asm-alpha/jensen.h
+++ b/include/asm-alpha/jensen.h
@@ -7,6 +7,9 @@
* Defines for the AlphaPC EISA IO and memory address space.
*/
+/* The Jensen is strange */
+#define AUX_IRQ (9)
+
/*
* NOTE! The memory operations do not set any memory barriers, as it's
* not needed for cases like a frame buffer that is essentially memory-like.
diff --git a/include/asm-alpha/keyboard.h b/include/asm-alpha/keyboard.h
index 0c6f04d6b..85a1ea9ae 100644
--- a/include/asm-alpha/keyboard.h
+++ b/include/asm-alpha/keyboard.h
@@ -3,7 +3,7 @@
*
* Created 3 Nov 1996 by Geert Uytterhoeven
*
- * $Id: keyboard.h,v 1.6 1998/10/28 12:39:58 ralf Exp $
+ * $Id: keyboard.h,v 1.7 1999/02/10 16:02:26 ralf Exp $
*/
/*
@@ -68,11 +68,10 @@ extern unsigned char pckbd_sysrq_xlate[128];
/*
* Machine specific bits for the PS/2 driver
*/
-
-#if defined(CONFIG_PCI)
-#define AUX_IRQ 12
+#if defined(__alpha__) && !defined(CONFIG_PCI)
+# define AUX_IRQ 9 /* Jensen is odd indeed */
#else
-#define AUX_IRQ 9 /* Jensen is odd indeed */
+# define AUX_IRQ 12
#endif
#define aux_request_irq(handler, dev_id) request_irq(AUX_IRQ, handler, 0, \
diff --git a/include/asm-alpha/machvec.h b/include/asm-alpha/machvec.h
index 17d50c777..035ffa4e2 100644
--- a/include/asm-alpha/machvec.h
+++ b/include/asm-alpha/machvec.h
@@ -4,6 +4,12 @@
#include <linux/config.h>
#include <linux/types.h>
+/*
+ * This file gets pulled in by asm/io.h from user space. We don't
+ * want most of this escaping.
+ */
+
+#ifdef __KERNEL__
/* The following structure vectors all of the I/O and IRQ manipulation
from the generic kernel to the hardware specific backend. */
@@ -122,4 +128,5 @@ extern int alpha_use_srm_setup;
#endif
#endif /* GENERIC */
+#endif
#endif /* __ALPHA_MACHVEC_H */
diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h
index 2ed4cacc2..fa6bd17e1 100644
--- a/include/asm-alpha/pci.h
+++ b/include/asm-alpha/pci.h
@@ -8,7 +8,7 @@
/*
* The following structure is used to manage multiple PCI busses.
*
- * XXX: We should solve thos problem in an architecture independant
+ * XXX: We should solve this problem in an architecture independent
* way, rather than hacking something up here.
*/
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index 47432c051..b74744207 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -14,6 +14,7 @@
#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu_context.h>
#include <asm/machvec.h>
+#include <asm/spinlock.h> /* For the task lock */
/* Caches aren't brain-dead on the Alpha. */
@@ -174,7 +175,7 @@ struct ipi_msg_flush_tb_struct {
struct vm_area_struct * flush_vma;
} p;
unsigned long flush_addr;
- /* unsigned long flush_end; */ /* not used by local_flush_tlb_range */
+ unsigned long flush_end;
};
extern struct ipi_msg_flush_tb_struct ipi_msg_flush_tb;
diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h
index 141075ef6..2af7a8806 100644
--- a/include/asm-alpha/processor.h
+++ b/include/asm-alpha/processor.h
@@ -65,7 +65,7 @@ struct thread_struct {
};
#define INIT_MMAP { &init_mm, PAGE_OFFSET, PAGE_OFFSET+0x10000000, \
- PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, NULL, &init_mm.mmap }
+ NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
#define INIT_TSS { \
0, 0, 0, \
@@ -113,6 +113,7 @@ extern void release_thread(struct task_struct *);
#define copy_segments(nr, tsk, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
+#define forget_segments() do { } while (0)
/* NOTE: The task struct and the stack go together! */
#define alloc_task_struct() \
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h
index 29cbb4a33..a172211c1 100644
--- a/include/asm-alpha/semaphore.h
+++ b/include/asm-alpha/semaphore.h
@@ -7,28 +7,80 @@
* (C) Copyright 1996 Linus Torvalds
*/
+#include <asm/current.h>
+#include <asm/system.h>
#include <asm/atomic.h>
+/*
+ * Semaphores are recursive: we allow the holder process to recursively do
+ * down() operations on a semaphore that the process already owns. In order
+ * to do that, we need to keep a semaphore-local copy of the owner and the
+ * "depth of ownership".
+ *
+ * NOTE! Nasty memory ordering rules:
+ * - "owner" and "owner_count" may only be modified once you hold the lock.
+ * - "owner_count" must be written _after_ modifying owner, and must be
+ * read _before_ reading owner. There must be appropriate write and read
+ * barriers to enforce this.
+ */
+
struct semaphore {
atomic_t count;
atomic_t waking;
+ struct task_struct *owner;
+ long owner_depth;
struct wait_queue * wait;
};
-#define MUTEX ((struct semaphore) { ATOMIC_INIT(1), ATOMIC_INIT(0), NULL })
-#define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), ATOMIC_INIT(0), NULL })
+#define MUTEX ((struct semaphore) \
+ { ATOMIC_INIT(1), ATOMIC_INIT(0), NULL, 0, NULL })
+#define MUTEX_LOCKED ((struct semaphore) \
+ { ATOMIC_INIT(0), ATOMIC_INIT(0), NULL, 1, NULL })
+
+#define semaphore_owner(sem) ((sem)->owner)
+#define sema_init(sem, val) atomic_set(&((sem)->count), val)
extern void __down(struct semaphore * sem);
extern int __down_interruptible(struct semaphore * sem);
extern void __up(struct semaphore * sem);
-#define sema_init(sem, val) atomic_set(&((sem)->count), val)
+/* All three have custom assembly linkages. */
+extern void __down_failed(struct semaphore * sem);
+extern void __down_failed_interruptible(struct semaphore * sem);
+extern void __up_wakeup(struct semaphore * sem);
+
/*
* These two _must_ execute atomically wrt each other.
*
* This is trivially done with load_locked/store_cond,
* which we have. Let the rest of the losers suck eggs.
+ *
+ * Tricky bits --
+ *
+ * (1) One task does two downs, no other contention
+ * initial state:
+ * count = 1, waking = 0, depth = undef;
+ * down(&sem)
+ * count = 0, waking = 0, depth = 1;
+ * down(&sem)
+ * atomic dec and test sends us to waking_non_zero via __down
+ * count = -1, waking = 0;
+ * conditional atomic dec on waking discovers no free slots
+ * count = -1, waking = 0;
+ * test for owner succeeeds and we return ok.
+ * count = -1, waking = 0, depth = 2;
+ * up(&sem)
+ * dec depth
+ * count = -1, waking = 0, depth = 1;
+ * atomic inc and test sends us to slow path
+ * count = 0, waking = 0, depth = 1;
+ * notice !(depth < 0) and don't call __up.
+ * up(&sem)
+ * dec depth
+ * count = 0, waking = 0, depth = 0;
+ * atomic inc and test succeeds.
+ * count = 1, waking = 0, depth = 0;
*/
static inline void wake_one_more(struct semaphore * sem)
@@ -36,48 +88,153 @@ static inline void wake_one_more(struct semaphore * sem)
atomic_inc(&sem->waking);
}
-static inline int waking_non_zero(struct semaphore *sem)
+static inline int waking_non_zero(struct semaphore *sem,
+ struct task_struct *tsk)
{
+ long owner_depth;
int ret, tmp;
+ owner_depth = sem->owner_depth;
+
+ /* Atomic decrement, iff the value is > 0. */
__asm__ __volatile__(
"1: ldl_l %1,%2\n"
" ble %1,2f\n"
" subl %1,1,%0\n"
" stl_c %0,%2\n"
" beq %0,3f\n"
- "2:\n"
+ "2: mb\n"
".section .text2,\"ax\"\n"
"3: br 1b\n"
".previous"
: "=r"(ret), "=r"(tmp), "=m"(__atomic_fool_gcc(&sem->waking))
: "0"(0));
+ ret |= ((owner_depth != 0) & (sem->owner == tsk));
+ if (ret) {
+ sem->owner = tsk;
+ wmb();
+ /* Don't use the old value, which is stale in the
+ !owner case. */
+ sem->owner_depth++;
+ }
+
return ret;
}
/*
- * This isn't quite as clever as the x86 side, but the gp register
- * makes things a bit more complicated on the alpha..
+ * Whee. Hidden out of line code is fun. The contention cases are
+ * handled out of line in kernel/sched.c; arch/alpha/lib/semaphore.S
+ * takes care of making sure we can call it without clobbering regs.
*/
+
extern inline void down(struct semaphore * sem)
{
- if (atomic_dec_return(&sem->count) < 0)
- __down(sem);
+ /* Given that we have to use particular hard registers to
+ communicate with __down_failed anyway, reuse them in
+ the atomic operation as well.
+
+ __down_failed takes the semaphore address in $24, and
+ it's return address in $28. The pv is loaded as usual.
+ The gp is clobbered (in the module case) as usual. */
+
+ __asm__ __volatile__ (
+ "/* semaphore down operation */\n"
+ "1: ldl_l $27,%3\n"
+ " subl $27,1,$27\n"
+ " mov $27,$28\n"
+ " stl_c $28,%0\n"
+ " beq $28,2f\n"
+ " blt $27,3f\n"
+ /* Got the semaphore no contention. Set owner and depth. */
+ " stq $8,%1\n"
+ " lda $28,1\n"
+ " wmb\n"
+ " stq $28,%2\n"
+ "4: mb\n"
+ ".section .text2,\"ax\"\n"
+ "2: br 1b\n"
+ "3: lda $24,%3\n"
+ " jsr $28,__down_failed\n"
+ " ldgp $29,0($28)\n"
+ " br 4b\n"
+ ".previous"
+ : "=m"(sem->count), "=m"(sem->owner), "=m"(sem->owner_depth)
+ : "m"(sem->count)
+ : "$24", "$27", "$28", "memory");
}
extern inline int down_interruptible(struct semaphore * sem)
{
- int ret = 0;
- if (atomic_dec_return(&sem->count) < 0)
- ret = __down_interruptible(sem);
+ /* __down_failed_interruptible takes the semaphore address in $24,
+ and it's return address in $28. The pv is loaded as usual.
+ The gp is clobbered (in the module case) as usual. The return
+ value is in $24. */
+
+ register int ret __asm__("$24");
+
+ __asm__ __volatile__ (
+ "/* semaphore down interruptible operation */\n"
+ "1: ldl_l $27,%4\n"
+ " subl $27,1,$27\n"
+ " mov $27,$28\n"
+ " stl_c $28,%1\n"
+ " beq $28,2f\n"
+ " blt $27,3f\n"
+ /* Got the semaphore no contention. Set owner and depth. */
+ " stq $8,%2\n"
+ " lda $28,1\n"
+ " wmb\n"
+ " stq $28,%3\n"
+ " mov $31,$24\n"
+ "4: mb\n"
+ ".section .text2,\"ax\"\n"
+ "2: br 1b\n"
+ "3: lda $24,%4\n"
+ " jsr $28,__down_failed_interruptible\n"
+ " ldgp $29,0($28)\n"
+ " br 4b\n"
+ ".previous"
+ : "=r"(ret), "=m"(sem->count), "=m"(sem->owner),
+ "=m"(sem->owner_depth)
+ : "m"(sem->count)
+ : "$27", "$28", "memory");
+
return ret;
}
extern inline void up(struct semaphore * sem)
{
- if (atomic_inc_return(&sem->count) <= 0)
- __up(sem);
-}
+ /* Given that we have to use particular hard registers to
+ communicate with __up_wakeup anyway, reuse them in
+ the atomic operation as well.
+
+ __up_wakeup takes the semaphore address in $24, and
+ it's return address in $28. The pv is loaded as usual.
+ The gp is clobbered (in the module case) as usual. */
+
+ __asm__ __volatile__ (
+ "/* semaphore up operation */\n"
+ " mb\n"
+ "1: ldl_l $27,%1\n"
+ " addl $27,1,$27\n"
+ " mov $27,$28\n"
+ " stl_c $28,%0\n"
+ " beq $28,2f\n"
+ " mb\n"
+ " ble $27,3f\n"
+ "4:\n"
+ ".section .text2,\"ax\"\n"
+ "2: br 1b\n"
+ "3: lda $24,%1\n"
+ " bgt %2,4b\n"
+ " jsr $28,__up_wakeup\n"
+ " ldgp $29,0($28)\n"
+ " br 4b\n"
+ ".previous"
+ : "=m"(sem->count)
+ : "m"(sem->count), "r"(--sem->owner_depth)
+ : "$24", "$27", "$28", "memory");
+}
#endif
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index 9d5e8e084..04be8487d 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -12,7 +12,8 @@ struct cpuinfo_alpha {
unsigned long *pgd_cache;
unsigned long *pte_cache;
unsigned long pgtable_cache_sz;
-};
+ unsigned long ipi_count;
+} __attribute__((aligned(32)));
extern struct cpuinfo_alpha cpu_data[NR_CPUS];
@@ -36,9 +37,6 @@ static __inline__ unsigned char hard_smp_processor_id(void)
#define smp_processor_id() (current->processor)
#define cpu_logical_map(cpu) (cpu)
-/* For the benefit of panic. */
-void smp_message_pass(int target, int msg, unsigned long data, int wait);
-
#endif /* __SMP__ */
#define NO_PROC_ID (-1)
diff --git a/include/asm-alpha/softirq.h b/include/asm-alpha/softirq.h
index b47281a3b..41ccc29c9 100644
--- a/include/asm-alpha/softirq.h
+++ b/include/asm-alpha/softirq.h
@@ -27,7 +27,7 @@ static inline void clear_active_bhs(unsigned long x)
extern inline void init_bh(int nr, void (*routine)(void))
{
bh_base[nr] = routine;
- bh_mask_count[nr] = 0;
+ atomic_set(&bh_mask_count[nr], 0);
bh_mask |= 1 << nr;
}
@@ -116,12 +116,13 @@ extern inline void end_bh_atomic(void)
extern inline void disable_bh(int nr)
{
bh_mask &= ~(1 << nr);
- bh_mask_count[nr]++;
+ atomic_inc(&bh_mask_count[nr]);
+ synchronize_bh();
}
extern inline void enable_bh(int nr)
{
- if (!--bh_mask_count[nr])
+ if (atomic_dec_and_test(&bh_mask_count[nr]))
bh_mask |= 1 << nr;
}
diff --git a/include/asm-alpha/spinlock.h b/include/asm-alpha/spinlock.h
index 63837b195..b5fe62ddf 100644
--- a/include/asm-alpha/spinlock.h
+++ b/include/asm-alpha/spinlock.h
@@ -88,12 +88,12 @@ typedef struct {
} spinlock_t;
#if DEBUG_SPINLOCK
-#define SPIN_LOCK_UNLOCKED {0, 1, 0, 0, 0, 0}
+#define SPIN_LOCK_UNLOCKED (spinlock_t) {0, 1, 0, 0, 0, 0}
#define spin_lock_init(x) \
((x)->lock = 0, (x)->target_ipl = 0, (x)->debug_state = 1, \
(x)->previous = 0, (x)->task = 0)
#else
-#define SPIN_LOCK_UNLOCKED { 0 }
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) ((x)->lock = 0)
#endif
@@ -163,7 +163,7 @@ static inline void spin_lock(spinlock_t * lock)
typedef struct { volatile int write_lock:1, read_counter:31; } rwlock_t;
-#define RW_LOCK_UNLOCKED { 0, 0 }
+#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
#if DEBUG_RWLOCK
extern void write_lock(rwlock_t * lock);
@@ -207,7 +207,7 @@ static inline void read_lock(rwlock_t * lock)
" br 1b\n"
".previous"
: "=m" (__dummy_lock(lock)), "=&r" (regx)
- : "0" (__dummy_lock(lock))
+ : "m" (__dummy_lock(lock))
);
}
#endif /* DEBUG_RWLOCK */
@@ -230,7 +230,7 @@ static inline void read_unlock(rwlock_t * lock)
"6: br 1b\n"
".previous"
: "=m" (__dummy_lock(lock)), "=&r" (regx)
- : "0" (__dummy_lock(lock)));
+ : "m" (__dummy_lock(lock)));
}
#define read_lock_irq(lock) (__cli(), read_lock(lock))
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index 798207747..626495adf 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -107,6 +107,9 @@ extern void alpha_switch_to(unsigned long pctxp);
#define mb() \
__asm__ __volatile__("mb": : :"memory")
+#define rmb() \
+__asm__ __volatile__("mb": : :"memory")
+
#define wmb() \
__asm__ __volatile__("wmb": : :"memory")
@@ -116,6 +119,24 @@ __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
#define draina() \
__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory")
+
+static inline unsigned long
+wrperfmon(unsigned long perf_fun, unsigned long arg)
+{
+ register unsigned long __r0 __asm__("$0");
+ register unsigned long __r16 __asm__("$16");
+ register unsigned long __r17 __asm__("$17");
+ __r16 = perf_fun;
+ __r17 = arg;
+ __asm__ __volatile__(
+ "call_pal %1"
+ : "=r"(__r0)
+ : "i"(PAL_wrperfmon), "r"(__r16), "r"(__r17)
+ : "$1", "$22", "$23", "$24", "$25", "$26");
+ return __r0;
+}
+
+
#define call_pal1(palno,arg) \
({ \
register unsigned long __r0 __asm__("$0"); \
@@ -226,14 +247,14 @@ extern __inline__ unsigned long xchg_u32(volatile int *m, unsigned long val)
__asm__ __volatile__(
"1: ldl_l %0,%2\n"
- " bis %3,%3,%1\n"
+ " bis $31,%3,%1\n"
" stl_c %1,%2\n"
" beq %1,2f\n"
".section .text2,\"ax\"\n"
"2: br 1b\n"
".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
- : "r" (val), "m" (*m));
+ : "rI" (val), "m" (*m));
return val;
}
@@ -244,14 +265,14 @@ extern __inline__ unsigned long xchg_u64(volatile long * m, unsigned long val)
__asm__ __volatile__(
"1: ldq_l %0,%2\n"
- " bis %3,%3,%1\n"
+ " bis $31,%3,%1\n"
" stq_c %1,%2\n"
" beq %1,2f\n"
".section .text2,\"ax\"\n"
"2: br 1b\n"
".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
- : "r" (val), "m" (*m));
+ : "rI" (val), "m" (*m));
return val;
}
diff --git a/include/asm-alpha/termbits.h b/include/asm-alpha/termbits.h
index 0d0a33251..675231bca 100644
--- a/include/asm-alpha/termbits.h
+++ b/include/asm-alpha/termbits.h
@@ -125,6 +125,17 @@ struct termios {
#define B115200 00021
#define B230400 00022
#define B460800 00023
+#define B500000 00024
+#define B576000 00025
+#define B921600 00026
+#define B1000000 00027
+#define B1152000 00030
+#define B1500000 00031
+#define B2000000 00032
+#define B2500000 00033
+#define B3000000 00034
+#define B3500000 00035
+#define B4000000 00036
#define CSIZE 00001400
#define CS5 00000000
diff --git a/include/asm-alpha/termios.h b/include/asm-alpha/termios.h
index a16407c8e..015e2debb 100644
--- a/include/asm-alpha/termios.h
+++ b/include/asm-alpha/termios.h
@@ -77,6 +77,8 @@ struct termio {
#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */
#define N_R3964 9 /* Reserved for Simatic R3964 module */
#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
+#define N_IRDA 11 /* Linux IrDa - http://www.cs.uit.no/~dagb/irda/irda.html */
+#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */
#ifdef __KERNEL__
/* eof=^D eol=\0 eol2=\0 erase=del
diff --git a/include/asm-alpha/timex.h b/include/asm-alpha/timex.h
index c0bfb799c..2576553b3 100644
--- a/include/asm-alpha/timex.h
+++ b/include/asm-alpha/timex.h
@@ -8,4 +8,23 @@
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+/*
+ * Standard way to access the cycle counter.
+ * Currently only used on SMP for scheduling.
+ *
+ * Only the low 32 bits are available as a continuously counting entity.
+ * But this only means we'll force a reschedule every 8 seconds or so,
+ * which isn't an evil thing.
+ */
+
+typedef unsigned int cycles_t;
+extern cycles_t cacheflush_time;
+
+static inline cycles_t get_cycles (void)
+{
+ cycles_t ret;
+ __asm__ __volatile__ ("rpcc %0" : "=r"(ret));
+ return ret;
+}
+
#endif
diff --git a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h
index 9de6e47ad..df20edc7f 100644
--- a/include/asm-alpha/unistd.h
+++ b/include/asm-alpha/unistd.h
@@ -67,7 +67,7 @@
#define __NR_getpgrp 63
#define __NR_getpagesize 64
#define __NR_osf_mremap 65 /* not implemented */
-#define __NR_osf_vfork 66
+#define __NR_vfork 66
#define __NR_stat 67
#define __NR_lstat 68
#define __NR_osf_sbrk 69 /* not implemented */
@@ -307,6 +307,7 @@
#define __NR_getcwd 367
#define __NR_capget 368
#define __NR_capset 369
+#define __NR_sendfile 370
#if defined(__LIBRARY__) && defined(__GNUC__)