summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/apecs.h10
-rw-r--r--include/asm-alpha/bitops.h17
-rw-r--r--include/asm-alpha/checksum.h7
-rw-r--r--include/asm-alpha/cia.h164
-rw-r--r--include/asm-alpha/hardirq.h60
-rw-r--r--include/asm-alpha/hwrpb.h19
-rw-r--r--include/asm-alpha/io.h12
-rw-r--r--include/asm-alpha/irq.h51
-rw-r--r--include/asm-alpha/keyboard.h4
-rw-r--r--include/asm-alpha/lca.h11
-rw-r--r--include/asm-alpha/mcpcia.h668
-rw-r--r--include/asm-alpha/mmu_context.h67
-rw-r--r--include/asm-alpha/pgtable.h27
-rw-r--r--include/asm-alpha/processor.h3
-rw-r--r--include/asm-alpha/pyxis.h356
-rw-r--r--include/asm-alpha/serial.h75
-rw-r--r--include/asm-alpha/smp.h47
-rw-r--r--include/asm-alpha/smp_lock.h105
-rw-r--r--include/asm-alpha/softirq.h60
-rw-r--r--include/asm-alpha/spinlock.h158
-rw-r--r--include/asm-alpha/string.h43
-rw-r--r--include/asm-alpha/system.h92
-rw-r--r--include/asm-alpha/t2.h228
-rw-r--r--include/asm-alpha/termios.h2
-rw-r--r--include/asm-alpha/tsunami.h483
-rw-r--r--include/asm-alpha/unistd.h6
26 files changed, 2571 insertions, 204 deletions
diff --git a/include/asm-alpha/apecs.h b/include/asm-alpha/apecs.h
index 161813515..36ac35922 100644
--- a/include/asm-alpha/apecs.h
+++ b/include/asm-alpha/apecs.h
@@ -77,8 +77,18 @@
#else /* CONFIG_ALPHA_XL */
/* these are for normal APECS family machines, AVANTI/MUSTANG/EB64/PC64 */
+#ifdef CONFIG_ALPHA_SRM_SETUP
+/* if we are using the SRM PCI setup, we'll need to use variables instead */
+#define APECS_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
+#define APECS_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
+
+extern unsigned int APECS_DMA_WIN_BASE;
+extern unsigned int APECS_DMA_WIN_SIZE;
+
+#else /* SRM_SETUP */
#define APECS_DMA_WIN_BASE (1024*1024*1024)
#define APECS_DMA_WIN_SIZE (1024*1024*1024)
+#endif /* SRM_SETUP */
#endif /* CONFIG_ALPHA_XL */
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index bec44fe54..70c857af7 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -17,7 +17,7 @@
* bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
*/
-extern __inline__ void set_bit(unsigned long nr, void * addr)
+extern __inline__ void set_bit(unsigned long nr, volatile void * addr)
{
unsigned long oldbit;
unsigned long temp;
@@ -38,7 +38,7 @@ extern __inline__ void set_bit(unsigned long nr, void * addr)
:"Ir" (1UL << (nr & 31)), "m" (*m));
}
-extern __inline__ void clear_bit(unsigned long nr, void * addr)
+extern __inline__ void clear_bit(unsigned long nr, volatile void * addr)
{
unsigned long oldbit;
unsigned long temp;
@@ -59,7 +59,7 @@ extern __inline__ void clear_bit(unsigned long nr, void * addr)
:"Ir" (1UL << (nr & 31)), "m" (*m));
}
-extern __inline__ void change_bit(unsigned long nr, void * addr)
+extern __inline__ void change_bit(unsigned long nr, volatile void * addr)
{
unsigned long temp;
unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
@@ -76,7 +76,8 @@ extern __inline__ void change_bit(unsigned long nr, void * addr)
:"Ir" (1UL << (nr & 31)), "m" (*m));
}
-extern __inline__ unsigned long test_and_set_bit(unsigned long nr, void * addr)
+extern __inline__ unsigned long test_and_set_bit(unsigned long nr,
+ volatile void * addr)
{
unsigned long oldbit;
unsigned long temp;
@@ -99,7 +100,8 @@ extern __inline__ unsigned long test_and_set_bit(unsigned long nr, void * addr)
return oldbit != 0;
}
-extern __inline__ unsigned long test_and_clear_bit(unsigned long nr, void * addr)
+extern __inline__ unsigned long test_and_clear_bit(unsigned long nr,
+ volatile void * addr)
{
unsigned long oldbit;
unsigned long temp;
@@ -122,7 +124,8 @@ extern __inline__ unsigned long test_and_clear_bit(unsigned long nr, void * addr
return oldbit != 0;
}
-extern __inline__ unsigned long test_and_change_bit(unsigned long nr, void * addr)
+extern __inline__ unsigned long test_and_change_bit(unsigned long nr,
+ volatile void * addr)
{
unsigned long oldbit;
unsigned long temp;
@@ -143,7 +146,7 @@ extern __inline__ unsigned long test_and_change_bit(unsigned long nr, void * add
return oldbit != 0;
}
-extern __inline__ unsigned long test_bit(int nr, const void * addr)
+extern __inline__ unsigned long test_bit(int nr, volatile void * addr)
{
return 1UL & (((const int *) addr)[nr >> 5] >> (nr & 31));
}
diff --git a/include/asm-alpha/checksum.h b/include/asm-alpha/checksum.h
index 6cf76c5a7..2f6b82e6c 100644
--- a/include/asm-alpha/checksum.h
+++ b/include/asm-alpha/checksum.h
@@ -18,6 +18,10 @@ extern unsigned short int csum_tcpudp_magic(unsigned long saddr,
unsigned short proto,
unsigned int sum);
+unsigned int csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
+ unsigned short len, unsigned short proto,
+ unsigned int sum);
+
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
@@ -55,6 +59,9 @@ unsigned int csum_partial_copy(const char *src, char *dst, int len, unsigned int
*/
unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, unsigned int sum, int *errp);
+unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum);
+
+
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
diff --git a/include/asm-alpha/cia.h b/include/asm-alpha/cia.h
index 963cfa8c1..38269eaae 100644
--- a/include/asm-alpha/cia.h
+++ b/include/asm-alpha/cia.h
@@ -74,11 +74,23 @@
#define BYTE_ENABLE_SHIFT 5
#define TRANSFER_LENGTH_SHIFT 3
-#define MEM_SP1_MASK 0x1fffffff /* Mem sparse space 1 mask is 29 bits */
+#define MEM_R1_MASK 0x1fffffff /* SPARSE Mem region 1 mask is 29 bits */
+#define MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
+#define MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
-#define CIA_DMA_WIN_BASE (1024UL*1024UL*1024UL)
+#ifdef CONFIG_ALPHA_SRM_SETUP
+/* if we are using the SRM PCI setup, we'll need to use variables instead */
+#define CIA_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
+#define CIA_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
+
+extern unsigned int CIA_DMA_WIN_BASE;
+extern unsigned int CIA_DMA_WIN_SIZE;
+
+#else /* SRM_SETUP */
+#define CIA_DMA_WIN_BASE (1024*1024*1024)
#define CIA_DMA_WIN_SIZE (1024*1024*1024)
+#endif /* SRM_SETUP */
/*
* 21171-CA Control and Status Registers (p4-1)
@@ -86,6 +98,7 @@
#define CIA_IOC_CIA_REV (IDENT_ADDR + 0x8740000080UL)
#define CIA_IOC_PCI_LAT (IDENT_ADDR + 0x87400000C0UL)
#define CIA_IOC_CIA_CTRL (IDENT_ADDR + 0x8740000100UL)
+#define CIA_IOC_CIA_CNFG (IDENT_ADDR + 0x8740000140UL)
#define CIA_IOC_HAE_MEM (IDENT_ADDR + 0x8740000400UL)
#define CIA_IOC_HAE_IO (IDENT_ADDR + 0x8740000440UL)
#define CIA_IOC_CFG (IDENT_ADDR + 0x8740000480UL)
@@ -119,18 +132,25 @@
#define CIA_IOC_PCI_ERR3 (IDENT_ADDR + 0x8740008880UL)
/*
- * 2117A-CA PCI Address Translation Registers. I've only defined
- * the first window fully as that's the only one that we're currently using.
- * The other window bases are needed to disable the windows.
+ * 2117A-CA PCI Address Translation Registers.
*/
#define CIA_IOC_PCI_TBIA (IDENT_ADDR + 0x8760000100UL)
+
#define CIA_IOC_PCI_W0_BASE (IDENT_ADDR + 0x8760000400UL)
#define CIA_IOC_PCI_W0_MASK (IDENT_ADDR + 0x8760000440UL)
#define CIA_IOC_PCI_T0_BASE (IDENT_ADDR + 0x8760000480UL)
#define CIA_IOC_PCI_W1_BASE (IDENT_ADDR + 0x8760000500UL)
+#define CIA_IOC_PCI_W1_MASK (IDENT_ADDR + 0x8760000540UL)
+#define CIA_IOC_PCI_T1_BASE (IDENT_ADDR + 0x8760000580UL)
+
#define CIA_IOC_PCI_W2_BASE (IDENT_ADDR + 0x8760000600UL)
+#define CIA_IOC_PCI_W2_MASK (IDENT_ADDR + 0x8760000640UL)
+#define CIA_IOC_PCI_T2_BASE (IDENT_ADDR + 0x8760000680UL)
+
#define CIA_IOC_PCI_W3_BASE (IDENT_ADDR + 0x8760000700UL)
+#define CIA_IOC_PCI_W3_MASK (IDENT_ADDR + 0x8760000740UL)
+#define CIA_IOC_PCI_T3_BASE (IDENT_ADDR + 0x8760000780UL)
/*
* 21171-CA System configuration registers (p4-3)
@@ -155,6 +175,8 @@
#define CIA_CONF (IDENT_ADDR + 0x8700000000UL)
#define CIA_IO (IDENT_ADDR + 0x8580000000UL)
#define CIA_SPARSE_MEM (IDENT_ADDR + 0x8000000000UL)
+#define CIA_SPARSE_MEM_R2 (IDENT_ADDR + 0x8400000000UL)
+#define CIA_SPARSE_MEM_R3 (IDENT_ADDR + 0x8500000000UL)
#define CIA_DENSE_MEM (IDENT_ADDR + 0x8600000000UL)
/*
@@ -296,13 +318,125 @@ extern inline void __outl(unsigned int b, unsigned long addr)
*
*/
+#ifdef CONFIG_ALPHA_SRM_SETUP
+
+extern unsigned long cia_sm_base_r1, cia_sm_base_r2, cia_sm_base_r3;
+
+extern inline unsigned long __readb(unsigned long addr)
+{
+ unsigned long result, shift, work;
+
+ if ((addr >= cia_sm_base_r1) &&
+ (addr <= (cia_sm_base_r1 + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + CIA_SPARSE_MEM + 0x00);
+ else
+ if ((addr >= cia_sm_base_r2) &&
+ (addr <= (cia_sm_base_r2 + MEM_R2_MASK)))
+ work = (((addr & MEM_R2_MASK) << 5) + CIA_SPARSE_MEM_R2 + 0x00);
+ else
+ if ((addr >= cia_sm_base_r3) &&
+ (addr <= (cia_sm_base_r3 + MEM_R3_MASK)))
+ work = (((addr & MEM_R3_MASK) << 5) + CIA_SPARSE_MEM_R3 + 0x00);
+ else
+ {
+#if 0
+ printk("__readb: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return 0x0ffUL;
+ }
+ shift = (addr & 0x3) << 3;
+ result = *(vuip) work;
+ result >>= shift;
+ return 0x0ffUL & result;
+}
+
+extern inline unsigned long __readw(unsigned long addr)
+{
+ unsigned long result, shift, work;
+
+ if ((addr >= cia_sm_base_r1) &&
+ (addr <= (cia_sm_base_r1 + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + CIA_SPARSE_MEM + 0x08);
+ else
+ if ((addr >= cia_sm_base_r2) &&
+ (addr <= (cia_sm_base_r2 + MEM_R2_MASK)))
+ work = (((addr & MEM_R2_MASK) << 5) + CIA_SPARSE_MEM_R2 + 0x08);
+ else
+ if ((addr >= cia_sm_base_r3) &&
+ (addr <= (cia_sm_base_r3 + MEM_R3_MASK)))
+ work = (((addr & MEM_R3_MASK) << 5) + CIA_SPARSE_MEM_R3 + 0x08);
+ else
+ {
+#if 0
+ printk("__readw: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return 0x0ffffUL;
+ }
+ shift = (addr & 0x3) << 3;
+ result = *(vuip) work;
+ result >>= shift;
+ return 0x0ffffUL & result;
+}
+
+extern inline void __writeb(unsigned char b, unsigned long addr)
+{
+ unsigned long work;
+
+ if ((addr >= cia_sm_base_r1) &&
+ (addr <= (cia_sm_base_r1 + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + CIA_SPARSE_MEM + 0x00);
+ else
+ if ((addr >= cia_sm_base_r2) &&
+ (addr <= (cia_sm_base_r2 + MEM_R2_MASK)))
+ work = (((addr & MEM_R2_MASK) << 5) + CIA_SPARSE_MEM_R2 + 0x00);
+ else
+ if ((addr >= cia_sm_base_r3) &&
+ (addr <= (cia_sm_base_r3 + MEM_R3_MASK)))
+ work = (((addr & MEM_R3_MASK) << 5) + CIA_SPARSE_MEM_R3 + 0x00);
+ else
+ {
+#if 0
+ printk("__writeb: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return;
+ }
+ *(vuip) work = b * 0x01010101;
+}
+
+extern inline void __writew(unsigned short b, unsigned long addr)
+{
+ unsigned long work;
+
+ if ((addr >= cia_sm_base_r1) &&
+ (addr <= (cia_sm_base_r1 + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + CIA_SPARSE_MEM + 0x00);
+ else
+ if ((addr >= cia_sm_base_r2) &&
+ (addr <= (cia_sm_base_r2 + MEM_R2_MASK)))
+ work = (((addr & MEM_R2_MASK) << 5) + CIA_SPARSE_MEM_R2 + 0x00);
+ else
+ if ((addr >= cia_sm_base_r3) &&
+ (addr <= (cia_sm_base_r3 + MEM_R3_MASK)))
+ work = (((addr & MEM_R3_MASK) << 5) + CIA_SPARSE_MEM_R3 + 0x00);
+ else
+ {
+#if 0
+ printk("__writew: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return;
+ }
+ *(vuip) work = b * 0x00010001;
+}
+
+#else /* SRM_SETUP */
+
extern inline unsigned long __readb(unsigned long addr)
{
unsigned long result, shift, msb;
shift = (addr & 0x3) * 8 ;
msb = addr & 0xE0000000 ;
- addr &= MEM_SP1_MASK ;
+ addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
@@ -317,7 +451,7 @@ extern inline unsigned long __readw(unsigned long addr)
shift = (addr & 0x3) * 8;
msb = addr & 0xE0000000 ;
- addr &= MEM_SP1_MASK ;
+ addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
@@ -326,17 +460,12 @@ extern inline unsigned long __readw(unsigned long addr)
return 0xffffUL & result;
}
-extern inline unsigned long __readl(unsigned long addr)
-{
- return *(vuip) (addr + CIA_DENSE_MEM);
-}
-
extern inline void __writeb(unsigned char b, unsigned long addr)
{
unsigned long msb ;
msb = addr & 0xE0000000 ;
- addr &= MEM_SP1_MASK ;
+ addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
@@ -348,13 +477,20 @@ extern inline void __writew(unsigned short b, unsigned long addr)
unsigned long msb ;
msb = addr & 0xE0000000 ;
- addr &= MEM_SP1_MASK ;
+ addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
*(vuip) ((addr << 5) + CIA_SPARSE_MEM + 0x08) = b * 0x00010001;
}
+#endif /* SRM_SETUP */
+
+extern inline unsigned long __readl(unsigned long addr)
+{
+ return *(vuip) (addr + CIA_DENSE_MEM);
+}
+
extern inline void __writel(unsigned int b, unsigned long addr)
{
*(vuip) (addr + CIA_DENSE_MEM) = b;
diff --git a/include/asm-alpha/hardirq.h b/include/asm-alpha/hardirq.h
index 733216be9..556e744d2 100644
--- a/include/asm-alpha/hardirq.h
+++ b/include/asm-alpha/hardirq.h
@@ -17,7 +17,65 @@ extern unsigned int local_irq_count[NR_CPUS];
#else
-#error No habla alpha SMP
+/* initially just a straight copy if the i386 code */
+
+#include <asm/atomic.h>
+#include <asm/spinlock.h>
+#include <asm/system.h>
+#include <asm/smp.h>
+
+extern unsigned char global_irq_holder;
+extern spinlock_t global_irq_lock;
+extern atomic_t global_irq_count;
+
+static inline void release_irqlock(int cpu)
+{
+ /* if we didn't own the irq lock, just ignore.. */
+ if (global_irq_holder == (unsigned char) cpu) {
+ global_irq_holder = NO_PROC_ID;
+ spin_unlock(&global_irq_lock);
+ }
+}
+
+/* Ordering of the counter bumps is _deadly_ important. */
+static inline void hardirq_enter(int cpu)
+{
+ ++local_irq_count[cpu];
+ atomic_inc(&global_irq_count);
+}
+
+static inline void hardirq_exit(int cpu)
+{
+ atomic_dec(&global_irq_count);
+ --local_irq_count[cpu];
+}
+
+static inline int hardirq_trylock(int cpu)
+{
+ unsigned long flags;
+ int ret = 1;
+
+ __save_and_cli(flags);
+ if ((atomic_add_return(1, &global_irq_count) != 1) ||
+ (global_irq_lock.lock != 0)) {
+ atomic_dec(&global_irq_count);
+ __restore_flags(flags);
+ ret = 0;
+ } else {
+ ++local_irq_count[cpu];
+ __sti();
+ }
+ return ret;
+}
+
+#define hardirq_endlock(cpu) \
+ do { \
+ __cli(); \
+ hardirq_exit(cpu); \
+ __sti(); \
+ } while (0)
+
+extern void synchronize_irq(void);
#endif /* __SMP__ */
#endif /* _ALPHA_HARDIRQ_H */
diff --git a/include/asm-alpha/hwrpb.h b/include/asm-alpha/hwrpb.h
index 4f6c5d01e..6909f0b6b 100644
--- a/include/asm-alpha/hwrpb.h
+++ b/include/asm-alpha/hwrpb.h
@@ -16,7 +16,7 @@
#define EV56_CPU 7 /* EV5.6 (21164) */
#define EV6_CPU 8 /* EV6 (21164) */
#define PCA56_CPU 9 /* PCA56 (21164PC) */
-#define PCA57_CPU 10 /* PCA57 (??) */
+#define PCA57_CPU 10 /* PCA57 (21164??) */
/*
* DEC system types for Alpha systems. Found in HWRPB.
@@ -34,14 +34,12 @@
#define ST_DEC_AXPPCI_33 11 /* NoName system type */
#define ST_DEC_TLASER 12 /* Turbolaser systype */
#define ST_DEC_2100_A50 13 /* Avanti systype */
-#define ST_DEC_MUSTANG 14 /* Mustang systype */
#define ST_DEC_ALCOR 15 /* Alcor (EV5) systype */
#define ST_DEC_1000 17 /* Mikasa systype */
+#define ST_DEC_EB64 18 /* EB64 systype */
#define ST_DEC_EB66 19 /* EB66 systype */
#define ST_DEC_EB64P 20 /* EB64+ systype */
-#define ST_DEC_EB66P -19 /* EB66 systype */
-#define ST_DEC_EBPC64 -20 /* Cabriolet (AlphaPC64) systype */
-#define ST_DEC_BURNS 21 /* Laptop systype */
+#define ST_DEC_BURNS 21 /* laptop systype */
#define ST_DEC_RAWHIDE 22 /* Rawhide systype */
#define ST_DEC_K2 23 /* K2 systype */
#define ST_DEC_LYNX 24 /* Lynx systype */
@@ -49,7 +47,7 @@
#define ST_DEC_EB164 26 /* EB164 systype */
#define ST_DEC_NORITAKE 27 /* Noritake systype */
#define ST_DEC_CORTEX 28 /* Cortex systype */
-#define ST_DEC_MIATA 30 /* MIATA systype */
+#define ST_DEC_MIATA 30 /* Miata systype */
#define ST_DEC_XXM 31 /* XXM systype */
#define ST_DEC_TAKARA 32 /* Takara systype */
#define ST_DEC_YUKON 33 /* Yukon systype */
@@ -61,7 +59,6 @@
#define ST_UNOFFICIAL_BIAS 100
#define ST_DTI_RUFFIAN 101 /* RUFFIAN systype */
-
struct pcb_struct {
unsigned long ksp;
unsigned long usp;
@@ -139,6 +136,12 @@ struct memdesc_struct {
struct memclust_struct cluster[0];
};
+struct dsr_struct {
+ long smm; /* SMM nubber used by LMF */
+ unsigned long lurt_off; /* offset to LURT table */
+ unsigned long sysname_off; /* offset to sysname char count */
+};
+
struct hwrpb_struct {
unsigned long phys_addr; /* check: physical address of the hwrpb */
unsigned long id; /* check: "HWRPB\0\0\0" */
@@ -178,7 +181,7 @@ struct hwrpb_struct {
unsigned long chksum;
unsigned long rxrdy;
unsigned long txrdy;
- unsigned long dsrdbt_offset; /* "Dynamic System Recognition Data Block Table" Whee */
+ unsigned long dsr_offset; /* "Dynamic System Recognition Data Block Table" */
};
#endif
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h
index da03e68db..d2668b973 100644
--- a/include/asm-alpha/io.h
+++ b/include/asm-alpha/io.h
@@ -25,7 +25,11 @@ extern struct hae {
/*
* Virtual -> physical identity mapping starts at this offset
*/
+#ifdef USE_48_BIT_KSEG
+#define IDENT_ADDR (0xffff800000000000UL)
+#else
#define IDENT_ADDR (0xfffffc0000000000UL)
+#endif
#ifdef __KERNEL__
@@ -38,11 +42,11 @@ extern struct hae {
*/
extern inline void set_hae(unsigned long new_hae)
{
- unsigned long ipl;
- ipl = swpipl(7);
+ unsigned long ipl = swpipl(7);
hae.cache = new_hae;
*hae.reg = new_hae;
mb();
+ new_hae = *hae.reg; /* read to make sure it was written */
setipl(ipl);
}
@@ -84,6 +88,10 @@ extern void _sethae (unsigned long addr); /* cached version */
# include <asm/t2.h> /* get chip-specific definitions */
#elif defined(CONFIG_ALPHA_PYXIS)
# include <asm/pyxis.h> /* get chip-specific definitions */
+#elif defined(CONFIG_ALPHA_TSUNAMI)
+# include <asm/tsunami.h> /* get chip-specific definitions */
+#elif defined(CONFIG_ALPHA_MCPCIA)
+# include <asm/mcpcia.h> /* get chip-specific definitions */
#else
# include <asm/jensen.h>
#endif
diff --git a/include/asm-alpha/irq.h b/include/asm-alpha/irq.h
index 5c81ff6a3..7140a1437 100644
--- a/include/asm-alpha/irq.h
+++ b/include/asm-alpha/irq.h
@@ -10,23 +10,56 @@
#include <linux/linkage.h>
#include <linux/config.h>
-#if defined(CONFIG_ALPHA_CABRIOLET) || defined(CONFIG_ALPHA_EB66P) || \
- defined(CONFIG_ALPHA_EB164) || defined(CONFIG_ALPHA_PC164) || \
+#if defined(CONFIG_ALPHA_CABRIOLET) || \
+ defined(CONFIG_ALPHA_EB66P) || \
+ defined(CONFIG_ALPHA_EB164) || \
+ defined(CONFIG_ALPHA_PC164) || \
defined(CONFIG_ALPHA_LX164)
-# define NR_IRQS 33
-#elif defined(CONFIG_ALPHA_EB66) || defined(CONFIG_ALPHA_EB64P) || \
+
+# define NR_IRQS 35
+
+#elif defined(CONFIG_ALPHA_EB66) || \
+ defined(CONFIG_ALPHA_EB64P) || \
defined(CONFIG_ALPHA_MIKASA)
+
# define NR_IRQS 32
-#elif defined(CONFIG_ALPHA_ALCOR) || defined(CONFIG_ALPHA_XLT) || \
- defined(CONFIG_ALPHA_MIATA) || defined(CONFIG_ALPHA_NORITAKE) || \
- defined(CONFIG_ALPHA_RUFFIAN)
+
+#elif defined(CONFIG_ALPHA_ALCOR) || \
+ defined(CONFIG_ALPHA_XLT) || \
+ defined(CONFIG_ALPHA_MIATA) || \
+ defined(CONFIG_ALPHA_RUFFIAN) || \
+ defined(CONFIG_ALPHA_NORITAKE)
+
# define NR_IRQS 48
-#elif defined(CONFIG_ALPHA_SABLE) || defined(CONFIG_ALPHA_SX164)
+
+#elif defined(CONFIG_ALPHA_SABLE) || \
+ defined(CONFIG_ALPHA_SX164)
+
# define NR_IRQS 40
-#else
+
+#elif defined(CONFIG_ALPHA_DP264) || \
+ defined(CONFIG_ALPHA_RAWHIDE)
+
+# define NR_IRQS 64
+
+#elif defined(CONFIG_ALPHA_TAKARA)
+
+# define NR_IRQS 20
+
+#else /* everyone else */
+
# define NR_IRQS 16
+
#endif
+static __inline__ int irq_cannonicalize(int irq)
+{
+ /*
+ * XXX is this true for all Alpha's? The old serial driver
+ * did it this way for years without any complaints, so....
+ */
+ return ((irq == 2) ? 9 : irq);
+}
extern void disable_irq(unsigned int);
extern void enable_irq(unsigned int);
diff --git a/include/asm-alpha/keyboard.h b/include/asm-alpha/keyboard.h
index d4db034b8..acdd45d83 100644
--- a/include/asm-alpha/keyboard.h
+++ b/include/asm-alpha/keyboard.h
@@ -3,7 +3,7 @@
*
* Created 3 Nov 1996 by Geert Uytterhoeven
*
- * $Id: keyboard.h,v 1.4 1997/08/05 09:44:28 ralf Exp $
+ * $Id: keyboard.h,v 1.5 1998/05/04 01:16:31 ralf Exp $
*/
/*
@@ -41,8 +41,6 @@ extern unsigned char pckbd_sysrq_xlate[128];
#define kbd_init_hw pckbd_init_hw
#define kbd_sysrq_xlate pckbd_sysrq_xlate
-#define INIT_KBD
-
#define SYSRQ_KEY 0x54
/*
diff --git a/include/asm-alpha/lca.h b/include/asm-alpha/lca.h
index 1eb35a2e4..f1f8a1ad7 100644
--- a/include/asm-alpha/lca.h
+++ b/include/asm-alpha/lca.h
@@ -52,10 +52,21 @@
* ugh).
*/
+#include <linux/config.h>
#include <asm/system.h>
+#ifdef CONFIG_ALPHA_SRM_SETUP
+/* if we are using the SRM PCI setup, we'll need to use variables instead */
+#define LCA_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
+#define LCA_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
+
+extern unsigned int LCA_DMA_WIN_BASE;
+extern unsigned int LCA_DMA_WIN_SIZE;
+
+#else /* SRM_SETUP */
#define LCA_DMA_WIN_BASE (1024*1024*1024)
#define LCA_DMA_WIN_SIZE (1024*1024*1024)
+#endif /* SRM_SETUP */
/*
* Memory Controller registers:
diff --git a/include/asm-alpha/mcpcia.h b/include/asm-alpha/mcpcia.h
new file mode 100644
index 000000000..fd4d48839
--- /dev/null
+++ b/include/asm-alpha/mcpcia.h
@@ -0,0 +1,668 @@
+#ifndef __ALPHA_MCPCIA__H__
+#define __ALPHA_MCPCIA__H__
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+/*
+ * MCPCIA is the internal name for a core logic chipset which provides
+ * PCI access for the RAWHIDE family of systems.
+ *
+ * This file is based on:
+ *
+ * RAWHIDE System Programmer's Manual
+ * 16-May-96
+ * Rev. 1.4
+ *
+ */
+
+/*------------------------------------------------------------------------**
+** **
+** I/O procedures **
+** **
+** inport[b|w|t|l], outport[b|w|t|l] 8:16:24:32 IO xfers **
+** inportbxt: 8 bits only **
+** inport: alias of inportw **
+** outport: alias of outportw **
+** **
+** inmem[b|w|t|l], outmem[b|w|t|l] 8:16:24:32 ISA memory xfers **
+** inmembxt: 8 bits only **
+** inmem: alias of inmemw **
+** outmem: alias of outmemw **
+** **
+**------------------------------------------------------------------------*/
+
+
+/* MCPCIA ADDRESS BIT DEFINITIONS
+ *
+ * 3 3 3 3|3 3 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |1| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |0|0|0|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | \_/ \_/
+ * | | |
+ * +-- IO space, not cached. Byte Enable --+ |
+ * Transfer Length --+
+ *
+ *
+ *
+ * Byte Transfer
+ * Enable Length Transfer Byte Address
+ * adr<6:5> adr<4:3> Length Enable Adder
+ * ---------------------------------------------
+ * 00 00 Byte 1110 0x000
+ * 01 00 Byte 1101 0x020
+ * 10 00 Byte 1011 0x040
+ * 11 00 Byte 0111 0x060
+ *
+ * 00 01 Word 1100 0x008
+ * 01 01 Word 1001 0x028 <= Not supported in this code.
+ * 10 01 Word 0011 0x048
+ *
+ * 00 10 Tribyte 1000 0x010
+ * 01 10 Tribyte 0001 0x030
+ *
+ * 10 11 Longword 0000 0x058
+ *
+ * Note that byte enables are asserted low.
+ *
+ */
+
+#define BYTE_ENABLE_SHIFT 5
+#define TRANSFER_LENGTH_SHIFT 3
+
+#define MEM_R1_MASK 0x1fffffff /* SPARSE Mem region 1 mask is 29 bits */
+#define MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
+#define MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
+
+#ifdef CONFIG_ALPHA_SRM_SETUP
+/* if we are using the SRM PCI setup, we'll need to use variables instead */
+#define MCPCIA_DMA_WIN_BASE_DEFAULT (2*1024*1024*1024U)
+#define MCPCIA_DMA_WIN_SIZE_DEFAULT (2*1024*1024*1024U)
+
+extern unsigned int MCPCIA_DMA_WIN_BASE;
+extern unsigned int MCPCIA_DMA_WIN_SIZE;
+
+#else /* SRM_SETUP */
+#define MCPCIA_DMA_WIN_BASE (2*1024*1024*1024UL)
+#define MCPCIA_DMA_WIN_SIZE (2*1024*1024*1024UL)
+#endif /* SRM_SETUP */
+
+#define HOSE(h) (((unsigned long)(h)) << 33)
+/*
+ * General Registers
+ */
+#define MCPCIA_REV(h) (IDENT_ADDR + 0xf9e0000000UL + HOSE(h))
+#define MCPCIA_WHOAMI(h) (IDENT_ADDR + 0xf9e0000040UL + HOSE(h))
+#define MCPCIA_PCI_LAT(h) (IDENT_ADDR + 0xf9e0000080UL + HOSE(h))
+#define MCPCIA_CAP_CTRL(h) (IDENT_ADDR + 0xf9e0000100UL + HOSE(h))
+#define MCPCIA_HAE_MEM(h) (IDENT_ADDR + 0xf9e0000400UL + HOSE(h))
+#define MCPCIA_HAE_IO(h) (IDENT_ADDR + 0xf9e0000440UL + HOSE(h))
+#if 0
+#define MCPCIA_IACK_SC(h) (IDENT_ADDR + 0xf9e0000480UL + HOSE(h))
+#endif
+#define MCPCIA_HAE_DENSE(h) (IDENT_ADDR + 0xf9e00004c0UL + HOSE(h))
+
+/*
+ * Interrupt Control registers
+ */
+#define MCPCIA_INT_CTL(h) (IDENT_ADDR + 0xf9e0000500UL + HOSE(h))
+#define MCPCIA_INT_REQ(h) (IDENT_ADDR + 0xf9e0000540UL + HOSE(h))
+#define MCPCIA_INT_TARG(h) (IDENT_ADDR + 0xf9e0000580UL + HOSE(h))
+#define MCPCIA_INT_ADR(h) (IDENT_ADDR + 0xf9e00005c0UL + HOSE(h))
+#define MCPCIA_INT_ADR_EXT(h) (IDENT_ADDR + 0xf9e0000600UL + HOSE(h))
+#define MCPCIA_INT_MASK0(h) (IDENT_ADDR + 0xf9e0000640UL + HOSE(h))
+#define MCPCIA_INT_MASK1(h) (IDENT_ADDR + 0xf9e0000680UL + HOSE(h))
+#define MCPCIA_INT_ACK0(h) (IDENT_ADDR + 0xf9f0003f00UL + HOSE(h))
+#define MCPCIA_INT_ACK1(h) (IDENT_ADDR + 0xf9e0003f40UL + HOSE(h))
+
+/*
+ * Performance Monitor registers
+ */
+#define MCPCIA_PERF_MONITOR(h) (IDENT_ADDR + 0xf9e0000300UL + HOSE(h))
+#define MCPCIA_PERF_CONTROL(h) (IDENT_ADDR + 0xf9e0000340UL + HOSE(h))
+
+/*
+ * Diagnostic Registers
+ */
+#define MCPCIA_CAP_DIAG(h) (IDENT_ADDR + 0xf9e0000700UL + HOSE(h))
+#define MCPCIA_TOP_OF_MEM(h) (IDENT_ADDR + 0xf9e00007c0UL + HOSE(h))
+
+/*
+ * Error registers
+ */
+#define MCPCIA_CAP_ERR(h) (IDENT_ADDR + 0xf9e0000880UL + HOSE(h))
+#define MCPCIA_PCI_ERR1(h) (IDENT_ADDR + 0xf9e0001040UL + HOSE(h))
+
+/*
+ * PCI Address Translation Registers.
+ */
+#define MCPCIA_SG_TBIA(h) (IDENT_ADDR + 0xf9e0001300UL + HOSE(h))
+#define MCPCIA_HBASE(h) (IDENT_ADDR + 0xf9e0001340UL + HOSE(h))
+
+#define MCPCIA_W0_BASE(h) (IDENT_ADDR + 0xf9e0001400UL + HOSE(h))
+#define MCPCIA_W0_MASK(h) (IDENT_ADDR + 0xf9e0001440UL + HOSE(h))
+#define MCPCIA_T0_BASE(h) (IDENT_ADDR + 0xf9e0001480UL + HOSE(h))
+
+#define MCPCIA_W1_BASE(h) (IDENT_ADDR + 0xf9e0001500UL + HOSE(h))
+#define MCPCIA_W1_MASK(h) (IDENT_ADDR + 0xf9e0001540UL + HOSE(h))
+#define MCPCIA_T1_BASE(h) (IDENT_ADDR + 0xf9e0001580UL + HOSE(h))
+
+#define MCPCIA_W2_BASE(h) (IDENT_ADDR + 0xf9e0001600UL + HOSE(h))
+#define MCPCIA_W2_MASK(h) (IDENT_ADDR + 0xf9e0001640UL + HOSE(h))
+#define MCPCIA_T2_BASE(h) (IDENT_ADDR + 0xf9e0001680UL + HOSE(h))
+
+#define MCPCIA_W3_BASE(h) (IDENT_ADDR + 0xf9e0001700UL + HOSE(h))
+#define MCPCIA_W3_MASK(h) (IDENT_ADDR + 0xf9e0001740UL + HOSE(h))
+#define MCPCIA_T3_BASE(h) (IDENT_ADDR + 0xf9e0001780UL + HOSE(h))
+
+/*
+ * Memory spaces:
+ */
+#define MCPCIA_CONF(h) (IDENT_ADDR + 0xf9c0000000UL + HOSE(h))
+#define MCPCIA_IO(h) (IDENT_ADDR + 0xf980000000UL + HOSE(h))
+#define MCPCIA_SPARSE(h) (IDENT_ADDR + 0xf800000000UL + HOSE(h))
+#define MCPCIA_DENSE(h) (IDENT_ADDR + 0xf900000000UL + HOSE(h))
+#define MCPCIA_IACK_SC(h) (IDENT_ADDR + 0xf9f0003f00UL + HOSE(h))
+
+#define HAE_ADDRESS MCPCIA_HAE_MEM(0)
+
+#ifdef __KERNEL__
+
+/*
+ * Translate physical memory address as seen on (PCI) bus into
+ * a kernel virtual address and vv.
+ */
+extern inline unsigned long virt_to_bus(void * address)
+{
+ return virt_to_phys(address) + MCPCIA_DMA_WIN_BASE;
+}
+
+extern inline void * bus_to_virt(unsigned long address)
+{
+ return phys_to_virt(address - MCPCIA_DMA_WIN_BASE);
+}
+
+/*
+ * I/O functions:
+ *
+ * MCPCIA, the RAWHIDE family PCI/memory support chipset for the EV5 (21164)
+ * and EV56 (21164a) processors, can use either a sparse address mapping
+ * scheme, or the so-called byte-word PCI address space, to get at PCI memory
+ * and I/O.
+ *
+ * Unfortunately, we can't use BWIO with EV5, so for now, we always use SPARSE.
+ */
+
+#define vuip volatile unsigned int *
+
+#ifdef DISABLE_BWIO_ENABLED
+
+extern inline unsigned int __inb(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldbu %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned char *)(addr+MCPCIA_BW_IO)));
+
+ return result;
+}
+
+extern inline void __outb(unsigned char b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stb %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned char *)(addr+MCPCIA_BW_IO)), "r" (b));
+}
+
+extern inline unsigned int __inw(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldwu %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned short *)(addr+MCPCIA_BW_IO)));
+
+ return result;
+}
+
+extern inline void __outw(unsigned short b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stw %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned short *)(addr+MCPCIA_BW_IO)), "r" (b));
+}
+
+extern inline unsigned int __inl(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldl %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned int *)(addr+MCPCIA_BW_IO)));
+
+ return result;
+}
+
+extern inline void __outl(unsigned int b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stl %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned int *)(addr+MCPCIA_BW_IO)), "r" (b));
+}
+
+#define inb(port) __inb((port))
+#define inw(port) __inw((port))
+#define inl(port) __inl((port))
+
+#define outb(x, port) __outb((x),(port))
+#define outw(x, port) __outw((x),(port))
+#define outl(x, port) __outl((x),(port))
+
+#else /* BWIO_ENABLED */
+
+extern inline unsigned int __inb(unsigned long in_addr)
+{
+ unsigned long addr = in_addr & 0xffffffffUL;
+ unsigned long hose = (in_addr >> 32) & 3;
+ long result = *(vuip) ((addr << 5) + MCPCIA_IO(hose) + 0x00);
+ result >>= (addr & 3) * 8;
+ return 0xffUL & result;
+}
+
+extern inline void __outb(unsigned char b, unsigned long in_addr)
+{
+ unsigned long addr = in_addr & 0xffffffffUL;
+ unsigned long hose = (in_addr >> 32) & 3;
+ unsigned int w;
+
+ asm ("insbl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
+ *(vuip) ((addr << 5) + MCPCIA_IO(hose) + 0x00) = w;
+ mb();
+}
+
+extern inline unsigned int __inw(unsigned long in_addr)
+{
+ unsigned long addr = in_addr & 0xffffffffUL;
+ unsigned long hose = (in_addr >> 32) & 3;
+ long result = *(vuip) ((addr << 5) + MCPCIA_IO(hose) + 0x08);
+ result >>= (addr & 3) * 8;
+ return 0xffffUL & result;
+}
+
+extern inline void __outw(unsigned short b, unsigned long in_addr)
+{
+ unsigned long addr = in_addr & 0xffffffffUL;
+ unsigned long hose = (in_addr >> 32) & 3;
+ unsigned int w;
+
+ asm ("inswl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
+ *(vuip) ((addr << 5) + MCPCIA_IO(hose) + 0x08) = w;
+ mb();
+}
+
+extern inline unsigned int __inl(unsigned long in_addr)
+{
+ unsigned long addr = in_addr & 0xffffffffUL;
+ unsigned long hose = (in_addr >> 32) & 3;
+ return *(vuip) ((addr << 5) + MCPCIA_IO(hose) + 0x18);
+}
+
+extern inline void __outl(unsigned int b, unsigned long in_addr)
+{
+ unsigned long addr = in_addr & 0xffffffffUL;
+ unsigned long hose = (in_addr >> 32) & 3;
+ *(vuip) ((addr << 5) + MCPCIA_IO(hose) + 0x18) = b;
+ mb();
+}
+
+#define inb(port) \
+(__builtin_constant_p((port))?__inb(port):_inb(port))
+
+#define outb(x, port) \
+(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
+
+#endif /* BWIO_ENABLED */
+
+
+/*
+ * Memory functions. 64-bit and 32-bit accesses are done through
+ * dense memory space, everything else through sparse space.
+ *
+ * For reading and writing 8 and 16 bit quantities we need to
+ * go through one of the three sparse address mapping regions
+ * and use the HAE_MEM CSR to provide some bits of the address.
+ * The following few routines use only sparse address region 1
+ * which gives 1Gbyte of accessible space which relates exactly
+ * to the amount of PCI memory mapping *into* system address space.
+ * See p 6-17 of the specification but it looks something like this:
+ *
+ * 21164 Address:
+ *
+ * 3 2 1
+ * 9876543210987654321098765432109876543210
+ * 1ZZZZ0.PCI.QW.Address............BBLL
+ *
+ * ZZ = SBZ
+ * BB = Byte offset
+ * LL = Transfer length
+ *
+ * PCI Address:
+ *
+ * 3 2 1
+ * 10987654321098765432109876543210
+ * HHH....PCI.QW.Address........ 00
+ *
+ * HHH = 31:29 HAE_MEM CSR
+ *
+ */
+
+#ifdef DISABLE_BWIO_ENABLED
+
+extern inline unsigned long __readb(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldbu %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned char *)(addr+MCPCIA_BW_MEM)));
+
+ return result;
+}
+
+extern inline unsigned long __readw(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldwu %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned short *)(addr+MCPCIA_BW_MEM)));
+
+ return result;
+}
+
+extern inline unsigned long __readl(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldl %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned int *)(addr+MCPCIA_BW_MEM)));
+
+ return result;
+}
+
+extern inline void __writeb(unsigned char b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stb %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned char *)(addr+MCPCIA_BW_MEM)), "r" (b));
+}
+
+extern inline void __writew(unsigned short b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stw %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned short *)(addr+MCPCIA_BW_MEM)), "r" (b));
+}
+
+extern inline void __writel(unsigned int b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stl %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned int *)(addr+MCPCIA_BW_MEM)), "r" (b));
+}
+
+#define readb(addr) __readb((addr))
+#define readw(addr) __readw((addr))
+
+#define writeb(b, addr) __writeb((b),(addr))
+#define writew(b, addr) __writew((b),(addr))
+
+#else /* BWIO_ENABLED */
+
+#ifdef CONFIG_ALPHA_SRM_SETUP
+
+extern unsigned long mcpcia_sm_base_r1, mcpcia_sm_base_r2, mcpcia_sm_base_r3;
+
+extern inline unsigned long __readb(unsigned long addr)
+{
+ unsigned long result, shift, work;
+
+ if ((addr >= mcpcia_sm_base_r1) &&
+ (addr <= (mcpcia_sm_base_r1 + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + MCPCIA_SPARSE_MEM + 0x00);
+ else
+ if ((addr >= mcpcia_sm_base_r2) &&
+ (addr <= (mcpcia_sm_base_r2 + MEM_R2_MASK)))
+ work = (((addr & MEM_R2_MASK) << 5) + MCPCIA_SPARSE_MEM_R2 + 0x00);
+ else
+ if ((addr >= mcpcia_sm_base_r3) &&
+ (addr <= (mcpcia_sm_base_r3 + MEM_R3_MASK)))
+ work = (((addr & MEM_R3_MASK) << 5) + MCPCIA_SPARSE_MEM_R3 + 0x00);
+ else
+ {
+#if 0
+ printk("__readb: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return 0x0ffUL;
+ }
+ shift = (addr & 0x3) << 3;
+ result = *(vuip) work;
+ result >>= shift;
+ return 0x0ffUL & result;
+}
+
+extern inline unsigned long __readw(unsigned long addr)
+{
+ unsigned long result, shift, work;
+
+ if ((addr >= mcpcia_sm_base_r1) &&
+ (addr <= (mcpcia_sm_base_r1 + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + MCPCIA_SPARSE_MEM + 0x08);
+ else
+ if ((addr >= mcpcia_sm_base_r2) &&
+ (addr <= (mcpcia_sm_base_r2 + MEM_R2_MASK)))
+ work = (((addr & MEM_R2_MASK) << 5) + MCPCIA_SPARSE_MEM_R2 + 0x08);
+ else
+ if ((addr >= mcpcia_sm_base_r3) &&
+ (addr <= (mcpcia_sm_base_r3 + MEM_R3_MASK)))
+ work = (((addr & MEM_R3_MASK) << 5) + MCPCIA_SPARSE_MEM_R3 + 0x08);
+ else
+ {
+#if 0
+ printk("__readw: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return 0x0ffffUL;
+ }
+ shift = (addr & 0x3) << 3;
+ result = *(vuip) work;
+ result >>= shift;
+ return 0x0ffffUL & result;
+}
+
+extern inline void __writeb(unsigned char b, unsigned long addr)
+{
+ unsigned long work;
+
+ if ((addr >= mcpcia_sm_base_r1) &&
+ (addr <= (mcpcia_sm_base_r1 + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + MCPCIA_SPARSE_MEM + 0x00);
+ else
+ if ((addr >= mcpcia_sm_base_r2) &&
+ (addr <= (mcpcia_sm_base_r2 + MEM_R2_MASK)))
+ work = (((addr & MEM_R2_MASK) << 5) + MCPCIA_SPARSE_MEM_R2 + 0x00);
+ else
+ if ((addr >= mcpcia_sm_base_r3) &&
+ (addr <= (mcpcia_sm_base_r3 + MEM_R3_MASK)))
+ work = (((addr & MEM_R3_MASK) << 5) + MCPCIA_SPARSE_MEM_R3 + 0x00);
+ else
+ {
+#if 0
+ printk("__writeb: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return;
+ }
+ *(vuip) work = b * 0x01010101;
+}
+
+extern inline void __writew(unsigned short b, unsigned long addr)
+{
+ unsigned long work;
+
+ if ((addr >= mcpcia_sm_base_r1) &&
+ (addr <= (mcpcia_sm_base_r1 + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + MCPCIA_SPARSE_MEM + 0x00);
+ else
+ if ((addr >= mcpcia_sm_base_r2) &&
+ (addr <= (mcpcia_sm_base_r2 + MEM_R2_MASK)))
+ work = (((addr & MEM_R2_MASK) << 5) + MCPCIA_SPARSE_MEM_R2 + 0x00);
+ else
+ if ((addr >= mcpcia_sm_base_r3) &&
+ (addr <= (mcpcia_sm_base_r3 + MEM_R3_MASK)))
+ work = (((addr & MEM_R3_MASK) << 5) + MCPCIA_SPARSE_MEM_R3 + 0x00);
+ else
+ {
+#if 0
+ printk("__writew: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return;
+ }
+ *(vuip) work = b * 0x00010001;
+}
+
+#else /* SRM_SETUP */
+
+extern inline unsigned long __readb(unsigned long in_addr)
+{
+ unsigned long addr = in_addr & 0xffffffffUL;
+ unsigned long hose = (in_addr >> 32) & 3;
+ unsigned long result, shift, msb, work, temp;
+
+ shift = (addr & 0x3) << 3;
+ msb = addr & 0xE0000000UL;
+ temp = addr & MEM_R1_MASK;
+ if (msb != hae.cache) {
+ set_hae(msb);
+ }
+ work = ((temp << 5) + MCPCIA_SPARSE(hose) + 0x00);
+ result = *(vuip) work;
+ result >>= shift;
+ return 0x0ffUL & result;
+}
+
+extern inline unsigned long __readw(unsigned long in_addr)
+{
+ unsigned long addr = in_addr & 0xffffffffUL;
+ unsigned long hose = (in_addr >> 32) & 3;
+ unsigned long result, shift, msb, work, temp;
+
+ shift = (addr & 0x3) << 3;
+ msb = addr & 0xE0000000UL;
+ temp = addr & MEM_R1_MASK ;
+ if (msb != hae.cache) {
+ set_hae(msb);
+ }
+ work = ((temp << 5) + MCPCIA_SPARSE(hose) + 0x08);
+ result = *(vuip) work;
+ result >>= shift;
+ return 0x0ffffUL & result;
+}
+
+extern inline void __writeb(unsigned char b, unsigned long in_addr)
+{
+ unsigned long addr = in_addr & 0xffffffffUL;
+ unsigned long hose = (in_addr >> 32) & 3;
+ unsigned long msb;
+
+ msb = addr & 0xE0000000;
+ addr &= MEM_R1_MASK;
+ if (msb != hae.cache) {
+ set_hae(msb);
+ }
+ *(vuip) ((addr << 5) + MCPCIA_SPARSE(hose) + 0x00) = b * 0x01010101;
+}
+
+extern inline void __writew(unsigned short b, unsigned long in_addr)
+{
+ unsigned long addr = in_addr & 0xffffffffUL;
+ unsigned long hose = (in_addr >> 32) & 3;
+ unsigned long msb ;
+
+ msb = addr & 0xE0000000 ;
+ addr &= MEM_R1_MASK ;
+ if (msb != hae.cache) {
+ set_hae(msb);
+ }
+ *(vuip) ((addr << 5) + MCPCIA_SPARSE(hose) + 0x08) = b * 0x00010001;
+}
+#endif /* SRM_SETUP */
+
+extern inline unsigned long __readl(unsigned long in_addr)
+{
+ unsigned long addr = in_addr & 0xffffffffUL;
+ unsigned long hose = (in_addr >> 32) & 3;
+ return *(vuip) (addr + MCPCIA_DENSE(hose));
+}
+
+extern inline void __writel(unsigned int b, unsigned long in_addr)
+{
+ unsigned long addr = in_addr & 0xffffffffUL;
+ unsigned long hose = (in_addr >> 32) & 3;
+ *(vuip) (addr + MCPCIA_DENSE(hose)) = b;
+}
+
+#endif /* BWIO_ENABLED */
+
+#define readl(a) __readl((unsigned long)(a))
+#define writel(v,a) __writel((v),(unsigned long)(a))
+
+#undef vuip
+
+struct linux_hose_info {
+ struct pci_bus pci_bus;
+ struct linux_hose_info *next;
+ unsigned long pci_io_space;
+ unsigned long pci_mem_space;
+ unsigned long pci_config_space;
+ unsigned long pci_sparse_space;
+ unsigned int pci_first_busno;
+ unsigned int pci_last_busno;
+ unsigned int pci_hose_index;
+};
+
+extern unsigned long mcpcia_init (unsigned long mem_start,
+ unsigned long mem_end);
+extern unsigned long mcpcia_fixup (unsigned long mem_start,
+ unsigned long mem_end);
+
+#endif /* __KERNEL__ */
+
+/*
+ * Data structure for handling MCPCIA machine checks:
+ */
+struct el_MCPCIA_uncorrected_frame_mcheck {
+ struct el_common header;
+ struct el_common_EV5_uncorrectable_mcheck procdata;
+};
+
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_ADDR(x) (0x80 | (x))
+#define RTC_ALWAYS_BCD 0
+
+#endif /* __ALPHA_MCPCIA__H__ */
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index 6f3f3672a..285fafa05 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -39,28 +39,33 @@
#define BROKEN_ASN 1
#endif
-extern unsigned long asn_cache;
+#ifdef __SMP__
+#define WIDTH_THIS_PROCESSOR 5
+/*
+ * last_asn[processor]:
+ * 63 0
+ * +-------------+----------------+--------------+
+ * | asn version | this processor | hardware asn |
+ * +-------------+----------------+--------------+
+ */
+extern unsigned long last_asn[];
+#define asn_cache last_asn[p->processor]
-#define ASN_VERSION_SHIFT 16
-#define ASN_VERSION_MASK ((~0UL) << ASN_VERSION_SHIFT)
-#define ASN_FIRST_VERSION (1UL << ASN_VERSION_SHIFT)
+#else
+#define WIDTH_THIS_PROCESSOR 0
+/*
+ * asn_cache:
+ * 63 0
+ * +------------------------------+--------------+
+ * | asn version | hardware asn |
+ * +------------------------------+--------------+
+ */
+extern unsigned long asn_cache;
+#endif /* __SMP__ */
-extern inline void get_new_mmu_context(struct task_struct *p,
- struct mm_struct *mm,
- unsigned long asn)
-{
- /* check if it's legal.. */
- if ((asn & ~ASN_VERSION_MASK) > MAX_ASN) {
- /* start a new version, invalidate all old asn's */
- tbiap(); imb();
- asn = (asn & ASN_VERSION_MASK) + ASN_FIRST_VERSION;
- if (!asn)
- asn = ASN_FIRST_VERSION;
- }
- asn_cache = asn + 1;
- mm->context = asn; /* full version + asn */
- p->tss.asn = asn & ~ASN_VERSION_MASK; /* just asn */
-}
+#define WIDTH_HARDWARE_ASN 7
+#define ASN_FIRST_VERSION (1UL << (WIDTH_THIS_PROCESSOR + WIDTH_HARDWARE_ASN))
+#define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1)
/*
* NOTE! The way this is set up, the high bits of the "asn_cache" (and
@@ -73,6 +78,23 @@ extern inline void get_new_mmu_context(struct task_struct *p,
* force a new asn for any other processes the next time they want to
* run.
*/
+extern inline void
+get_new_mmu_context(struct task_struct *p, struct mm_struct *mm)
+{
+ unsigned long asn = asn_cache;
+
+ if ((asn & HARDWARE_ASN_MASK) < MAX_ASN)
+ ++asn;
+ else {
+ tbiap();
+ imb();
+ asn = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
+ }
+ asn_cache = asn;
+ mm->context = asn; /* full version + asn */
+ p->tss.asn = asn & HARDWARE_ASN_MASK; /* just asn */
+}
+
extern inline void get_mmu_context(struct task_struct *p)
{
#ifndef BROKEN_ASN
@@ -81,8 +103,8 @@ extern inline void get_mmu_context(struct task_struct *p)
if (mm) {
unsigned long asn = asn_cache;
/* Check if our ASN is of an older version and thus invalid */
- if ((mm->context ^ asn) & ASN_VERSION_MASK)
- get_new_mmu_context(p, mm, asn);
+ if ((mm->context ^ asn) & ~HARDWARE_ASN_MASK)
+ get_new_mmu_context(p, mm);
}
#endif
}
@@ -91,3 +113,4 @@ extern inline void get_mmu_context(struct task_struct *p)
#define destroy_context(mm) do { } while(0)
#endif
+
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index fdb1e4d0b..3467e6103 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -8,6 +8,7 @@
* This hopefully works with any standard alpha page-size, as defined
* in <asm/page.h> (currently 8192).
*/
+#include <linux/config.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
@@ -29,6 +30,9 @@ static inline void reload_context(struct task_struct *task)
{
__asm__ __volatile__(
"bis %0,%0,$16\n\t"
+#ifdef CONFIG_ALPHA_DP264
+ "zap $16,0xe0,$16\n\t"
+#endif /* DP264 */
"call_pal %1"
: /* no outputs */
: "r" (&task->tss), "i" (PAL_swpctx)
@@ -80,6 +84,7 @@ static inline void flush_tlb(void)
flush_tlb_current(current->mm);
}
+#ifndef __SMP__
/*
* Flush everything (kernel mapping may also have
* changed due to vmalloc/vfree)
@@ -129,6 +134,28 @@ static inline void flush_tlb_range(struct mm_struct *mm,
flush_tlb_mm(mm);
}
+#else /* __SMP__ */
+
+/* ipi_msg_flush_tb is owned by the holder of the global kernel lock. */
+struct ipi_msg_flush_tb_struct {
+ volatile unsigned int flush_tb_mask;
+ union {
+ struct mm_struct * flush_mm;
+ struct vm_area_struct * flush_vma;
+ } p;
+ unsigned long flush_addr;
+ /* unsigned long flush_end; */ /* not used by local_flush_tlb_range */
+};
+
+extern struct ipi_msg_flush_tb_struct ipi_msg_flush_tb;
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long);
+
+#endif /* __SMP__ */
+
/* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following
* hook is made available.
diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h
index 2b805db93..c36876ca8 100644
--- a/include/asm-alpha/processor.h
+++ b/include/asm-alpha/processor.h
@@ -92,6 +92,9 @@ extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
+#define copy_segments(nr, tsk, mm) do { } while (0)
+#define release_segments(mm) do { } while (0)
+
/* NOTE: The task struct and the stack go together! */
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
diff --git a/include/asm-alpha/pyxis.h b/include/asm-alpha/pyxis.h
index 9d3d49595..d10408bde 100644
--- a/include/asm-alpha/pyxis.h
+++ b/include/asm-alpha/pyxis.h
@@ -1,11 +1,11 @@
#ifndef __ALPHA_PYXIS__H__
#define __ALPHA_PYXIS__H__
-#include <linux/config.h> /* CONFIG_ALPHA_RUFFIAN. */
+#include <linux/config.h>
#include <linux/types.h>
/*
- * PYXIS is the internal name for a cor logic chipset which provides
+ * PYXIS is the internal name for a core logic chipset which provides
* memory controller and PCI access for the 21164A chip based systems.
*
* This file is based on:
@@ -71,11 +71,23 @@
#define BYTE_ENABLE_SHIFT 5
#define TRANSFER_LENGTH_SHIFT 3
-#define MEM_SP1_MASK 0x1fffffff /* Mem sparse space 1 mask is 29 bits */
+#define MEM_R1_MASK 0x1fffffff /* SPARSE Mem region 1 mask is 29 bits */
+#define MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
+#define MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
-#define PYXIS_DMA_WIN_BASE (1024UL*1024UL*1024UL)
+#ifdef CONFIG_ALPHA_SRM_SETUP
+/* if we are using the SRM PCI setup, we'll need to use variables instead */
+#define PYXIS_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
+#define PYXIS_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
+
+extern unsigned int PYXIS_DMA_WIN_BASE;
+extern unsigned int PYXIS_DMA_WIN_SIZE;
+
+#else /* SRM_SETUP */
+#define PYXIS_DMA_WIN_BASE (1024*1024*1024)
#define PYXIS_DMA_WIN_SIZE (1024*1024*1024)
+#endif /* SRM_SETUP */
/*
* General Registers
@@ -97,13 +109,13 @@
#define PYXIS_DIAG_CHECK (IDENT_ADDR + 0x8740003000UL)
/*
- * Performance Monitor registers (p4-3)
+ * Performance Monitor registers
*/
#define PYXIS_PERF_MONITOR (IDENT_ADDR + 0x8740004000UL)
#define PYXIS_PERF_CONTROL (IDENT_ADDR + 0x8740004040UL)
/*
- * 21171-CA Error registers (p4-3)
+ * Error registers
*/
#define PYXIS_ERR (IDENT_ADDR + 0x8740008200UL)
#define PYXIS_STAT (IDENT_ADDR + 0x8740008240UL)
@@ -118,18 +130,25 @@
#define PYXIS_PCI_ERR2 (IDENT_ADDR + 0x8740008880UL)
/*
- * PCI Address Translation Registers. I've only defined
- * the first window fully as that's the only one that we're currently using.
- * The other window bases are needed to disable the windows.
+ * PCI Address Translation Registers.
*/
#define PYXIS_TBIA (IDENT_ADDR + 0x8760000100UL)
+
#define PYXIS_W0_BASE (IDENT_ADDR + 0x8760000400UL)
#define PYXIS_W0_MASK (IDENT_ADDR + 0x8760000440UL)
#define PYXIS_T0_BASE (IDENT_ADDR + 0x8760000480UL)
#define PYXIS_W1_BASE (IDENT_ADDR + 0x8760000500UL)
+#define PYXIS_W1_MASK (IDENT_ADDR + 0x8760000540UL)
+#define PYXIS_T1_BASE (IDENT_ADDR + 0x8760000580UL)
+
#define PYXIS_W2_BASE (IDENT_ADDR + 0x8760000600UL)
+#define PYXIS_W2_MASK (IDENT_ADDR + 0x8760000640UL)
+#define PYXIS_T2_BASE (IDENT_ADDR + 0x8760000680UL)
+
#define PYXIS_W3_BASE (IDENT_ADDR + 0x8760000700UL)
+#define PYXIS_W3_MASK (IDENT_ADDR + 0x8760000740UL)
+#define PYXIS_T3_BASE (IDENT_ADDR + 0x8760000780UL)
/*
* Memory Control registers
@@ -143,9 +162,19 @@
#define PYXIS_CONF (IDENT_ADDR + 0x8700000000UL)
#define PYXIS_IO (IDENT_ADDR + 0x8580000000UL)
#define PYXIS_SPARSE_MEM (IDENT_ADDR + 0x8000000000UL)
+#define PYXIS_SPARSE_MEM_R2 (IDENT_ADDR + 0x8400000000UL)
+#define PYXIS_SPARSE_MEM_R3 (IDENT_ADDR + 0x8500000000UL)
#define PYXIS_DENSE_MEM (IDENT_ADDR + 0x8600000000UL)
/*
+ * Byte/Word PCI Memory Spaces:
+ */
+#define PYXIS_BW_MEM (IDENT_ADDR + 0x8800000000UL)
+#define PYXIS_BW_IO (IDENT_ADDR + 0x8900000000UL)
+#define PYXIS_BW_CFG_0 (IDENT_ADDR + 0x8a00000000UL)
+#define PYXIS_BW_CFG_1 (IDENT_ADDR + 0x8b00000000UL)
+
+/*
* Interrupt Control registers
*/
#define PYXIS_INT_REQ (IDENT_ADDR + 0x87A0000000UL)
@@ -179,9 +208,8 @@
* Translate physical memory address as seen on (PCI) bus into
* a kernel virtual address and vv.
*/
-
#if defined(CONFIG_ALPHA_RUFFIAN)
-/* Ruffian doesn't do 1G PCI window. */
+/* Ruffian doesn't do 1G PCI window */
extern inline unsigned long virt_to_bus(void * address)
{
@@ -192,7 +220,7 @@ extern inline void * bus_to_virt(unsigned long address)
{
return phys_to_virt(address);
}
-#else
+#else /* RUFFIAN */
extern inline unsigned long virt_to_bus(void * address)
{
return virt_to_phys(address) + PYXIS_DMA_WIN_BASE;
@@ -207,13 +235,86 @@ extern inline void * bus_to_virt(unsigned long address)
/*
* I/O functions:
*
- * PYXIS (the 2117x PCI/memory support chipset for the EV5 (21164)
- * series of processors uses a sparse address mapping scheme to
+ * PYXIS, the 21174 PCI/memory support chipset for the EV56 (21164A)
+ * and PCA56 (21164PC) processors, can use either a sparse address
+ * mapping scheme, or the so-called byte-word PCI address space, to
* get at PCI memory and I/O.
*/
#define vuip volatile unsigned int *
+#ifdef BWIO_ENABLED
+
+extern inline unsigned int __inb(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldbu %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned char *)(addr+PYXIS_BW_IO)));
+
+ return result;
+}
+
+extern inline void __outb(unsigned char b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stb %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned char *)(addr+PYXIS_BW_IO)), "r" (b));
+}
+
+extern inline unsigned int __inw(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldwu %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned short *)(addr+PYXIS_BW_IO)));
+
+ return result;
+}
+
+extern inline void __outw(unsigned short b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stw %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned short *)(addr+PYXIS_BW_IO)), "r" (b));
+}
+
+extern inline unsigned int __inl(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldl %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned int *)(addr+PYXIS_BW_IO)));
+
+ return result;
+}
+
+extern inline void __outl(unsigned int b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stl %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned int *)(addr+PYXIS_BW_IO)), "r" (b));
+}
+
+#define inb(port) __inb((port))
+#define inw(port) __inw((port))
+#define inl(port) __inl((port))
+
+#define outb(x, port) __outb((x),(port))
+#define outw(x, port) __outw((x),(port))
+#define outl(x, port) __outl((x),(port))
+
+#else /* BWIO_ENABLED */
+
extern inline unsigned int __inb(unsigned long addr)
{
long result = *(vuip) ((addr << 5) + PYXIS_IO + 0x00);
@@ -257,6 +358,14 @@ extern inline void __outl(unsigned int b, unsigned long addr)
mb();
}
+#define inb(port) \
+(__builtin_constant_p((port))?__inb(port):_inb(port))
+
+#define outb(x, port) \
+(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
+
+#endif /* BWIO_ENABLED */
+
/*
* Memory functions. 64-bit and 32-bit accesses are done through
@@ -290,25 +399,197 @@ extern inline void __outl(unsigned int b, unsigned long addr)
*
*/
-extern inline void pyxis_set_hae(unsigned long new_hae)
+#ifdef BWIO_ENABLED
+
+extern inline unsigned long __readb(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldbu %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned char *)(addr+PYXIS_BW_MEM)));
+
+ return result;
+}
+
+extern inline unsigned long __readw(unsigned long addr)
{
- unsigned long ipl = swpipl(7);
- hae.cache = new_hae;
- *hae.reg = new_hae;
- mb();
- new_hae = *hae.reg; /* read it to be sure it got out */
- setipl(ipl);
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldwu %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned short *)(addr+PYXIS_BW_MEM)));
+
+ return result;
}
+extern inline unsigned long __readl(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldl %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned int *)(addr+PYXIS_BW_MEM)));
+
+ return result;
+}
+
+extern inline void __writeb(unsigned char b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stb %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned char *)(addr+PYXIS_BW_MEM)), "r" (b));
+}
+
+extern inline void __writew(unsigned short b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stw %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned short *)(addr+PYXIS_BW_MEM)), "r" (b));
+}
+
+extern inline void __writel(unsigned int b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stl %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned int *)(addr+PYXIS_BW_MEM)), "r" (b));
+}
+
+#define readb(addr) __readb((addr))
+#define readw(addr) __readw((addr))
+
+#define writeb(b, addr) __writeb((b),(addr))
+#define writew(b, addr) __writew((b),(addr))
+
+#else /* BWIO_ENABLED */
+
+#ifdef CONFIG_ALPHA_SRM_SETUP
+
+extern unsigned long pyxis_sm_base_r1, pyxis_sm_base_r2, pyxis_sm_base_r3;
+
+extern inline unsigned long __readb(unsigned long addr)
+{
+ unsigned long result, shift, work;
+
+ if ((addr >= pyxis_sm_base_r1) &&
+ (addr <= (pyxis_sm_base_r1 + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + PYXIS_SPARSE_MEM + 0x00);
+ else
+ if ((addr >= pyxis_sm_base_r2) &&
+ (addr <= (pyxis_sm_base_r2 + MEM_R2_MASK)))
+ work = (((addr & MEM_R2_MASK) << 5) + PYXIS_SPARSE_MEM_R2 + 0x00);
+ else
+ if ((addr >= pyxis_sm_base_r3) &&
+ (addr <= (pyxis_sm_base_r3 + MEM_R3_MASK)))
+ work = (((addr & MEM_R3_MASK) << 5) + PYXIS_SPARSE_MEM_R3 + 0x00);
+ else
+ {
+#if 0
+ printk("__readb: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return 0x0ffUL;
+ }
+ shift = (addr & 0x3) << 3;
+ result = *(vuip) work;
+ result >>= shift;
+ return 0x0ffUL & result;
+}
+
+extern inline unsigned long __readw(unsigned long addr)
+{
+ unsigned long result, shift, work;
+
+ if ((addr >= pyxis_sm_base_r1) &&
+ (addr <= (pyxis_sm_base_r1 + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + PYXIS_SPARSE_MEM + 0x08);
+ else
+ if ((addr >= pyxis_sm_base_r2) &&
+ (addr <= (pyxis_sm_base_r2 + MEM_R2_MASK)))
+ work = (((addr & MEM_R2_MASK) << 5) + PYXIS_SPARSE_MEM_R2 + 0x08);
+ else
+ if ((addr >= pyxis_sm_base_r3) &&
+ (addr <= (pyxis_sm_base_r3 + MEM_R3_MASK)))
+ work = (((addr & MEM_R3_MASK) << 5) + PYXIS_SPARSE_MEM_R3 + 0x08);
+ else
+ {
+#if 0
+ printk("__readw: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return 0x0ffffUL;
+ }
+ shift = (addr & 0x3) << 3;
+ result = *(vuip) work;
+ result >>= shift;
+ return 0x0ffffUL & result;
+}
+
+extern inline void __writeb(unsigned char b, unsigned long addr)
+{
+ unsigned long work;
+
+ if ((addr >= pyxis_sm_base_r1) &&
+ (addr <= (pyxis_sm_base_r1 + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + PYXIS_SPARSE_MEM + 0x00);
+ else
+ if ((addr >= pyxis_sm_base_r2) &&
+ (addr <= (pyxis_sm_base_r2 + MEM_R2_MASK)))
+ work = (((addr & MEM_R2_MASK) << 5) + PYXIS_SPARSE_MEM_R2 + 0x00);
+ else
+ if ((addr >= pyxis_sm_base_r3) &&
+ (addr <= (pyxis_sm_base_r3 + MEM_R3_MASK)))
+ work = (((addr & MEM_R3_MASK) << 5) + PYXIS_SPARSE_MEM_R3 + 0x00);
+ else
+ {
+#if 0
+ printk("__writeb: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return;
+ }
+ *(vuip) work = b * 0x01010101;
+}
+
+extern inline void __writew(unsigned short b, unsigned long addr)
+{
+ unsigned long work;
+
+ if ((addr >= pyxis_sm_base_r1) &&
+ (addr <= (pyxis_sm_base_r1 + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + PYXIS_SPARSE_MEM + 0x00);
+ else
+ if ((addr >= pyxis_sm_base_r2) &&
+ (addr <= (pyxis_sm_base_r2 + MEM_R2_MASK)))
+ work = (((addr & MEM_R2_MASK) << 5) + PYXIS_SPARSE_MEM_R2 + 0x00);
+ else
+ if ((addr >= pyxis_sm_base_r3) &&
+ (addr <= (pyxis_sm_base_r3 + MEM_R3_MASK)))
+ work = (((addr & MEM_R3_MASK) << 5) + PYXIS_SPARSE_MEM_R3 + 0x00);
+ else
+ {
+#if 0
+ printk("__writew: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return;
+ }
+ *(vuip) work = b * 0x00010001;
+}
+
+#else /* SRM_SETUP */
+
extern inline unsigned long __readb(unsigned long addr)
{
unsigned long result, shift, msb, work, temp;
shift = (addr & 0x3) << 3;
msb = addr & 0xE0000000UL;
- temp = addr & MEM_SP1_MASK ;
+ temp = addr & MEM_R1_MASK ;
if (msb != hae.cache) {
- pyxis_set_hae(msb);
+ set_hae(msb);
}
work = ((temp << 5) + PYXIS_SPARSE_MEM + 0x00);
result = *(vuip) work;
@@ -322,9 +603,9 @@ extern inline unsigned long __readw(unsigned long addr)
shift = (addr & 0x3) << 3;
msb = addr & 0xE0000000UL;
- temp = addr & MEM_SP1_MASK ;
+ temp = addr & MEM_R1_MASK ;
if (msb != hae.cache) {
- pyxis_set_hae(msb);
+ set_hae(msb);
}
work = ((temp << 5) + PYXIS_SPARSE_MEM + 0x08);
result = *(vuip) work;
@@ -332,19 +613,14 @@ extern inline unsigned long __readw(unsigned long addr)
return 0x0ffffUL & result;
}
-extern inline unsigned long __readl(unsigned long addr)
-{
- return *(vuip) (addr + PYXIS_DENSE_MEM);
-}
-
extern inline void __writeb(unsigned char b, unsigned long addr)
{
unsigned long msb ;
msb = addr & 0xE0000000 ;
- addr &= MEM_SP1_MASK ;
+ addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
- pyxis_set_hae(msb);
+ set_hae(msb);
}
*(vuip) ((addr << 5) + PYXIS_SPARSE_MEM + 0x00) = b * 0x01010101;
}
@@ -354,23 +630,25 @@ extern inline void __writew(unsigned short b, unsigned long addr)
unsigned long msb ;
msb = addr & 0xE0000000 ;
- addr &= MEM_SP1_MASK ;
+ addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
- pyxis_set_hae(msb);
+ set_hae(msb);
}
*(vuip) ((addr << 5) + PYXIS_SPARSE_MEM + 0x08) = b * 0x00010001;
}
+#endif /* SRM_SETUP */
+
+extern inline unsigned long __readl(unsigned long addr)
+{
+ return *(vuip) (addr + PYXIS_DENSE_MEM);
+}
extern inline void __writel(unsigned int b, unsigned long addr)
{
*(vuip) (addr + PYXIS_DENSE_MEM) = b;
}
-#define inb(port) \
-(__builtin_constant_p((port))?__inb(port):_inb(port))
-
-#define outb(x, port) \
-(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
+#endif /* BWIO_ENABLED */
#define readl(a) __readl((unsigned long)(a))
#define writel(v,a) __writel((v),(unsigned long)(a))
diff --git a/include/asm-alpha/serial.h b/include/asm-alpha/serial.h
new file mode 100644
index 000000000..565b59dd0
--- /dev/null
+++ b/include/asm-alpha/serial.h
@@ -0,0 +1,75 @@
+/*
+ * include/asm-alpha/serial.h
+ */
+
+#include <linux/config.h>
+
+/*
+ * This assumes you have a 1.8432 MHz clock for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ */
+#define BASE_BAUD ( 1843200 / 16 )
+
+/* Standard COM flags (except for COM4, because of the 8514 problem) */
+#ifdef CONFIG_SERIAL_DETECT_IRQ
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
+#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
+#else
+#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
+#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
+#endif
+
+#ifdef CONFIG_SERIAL_MANY_PORTS
+#define FOURPORT_FLAGS ASYNC_FOURPORT
+#define ACCENT_FLAGS 0
+#define BOCA_FLAGS 0
+#endif
+
+#define STD_SERIAL_PORT_DEFNS \
+ /* UART CLK PORT IRQ FLAGS */ \
+ { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
+ { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
+ { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
+ { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
+
+
+#ifdef CONFIG_SERIAL_MANY_PORTS
+#define EXTRA_SERIAL_PORT_DEFNS \
+ { 0, BASE_BAUD, 0x1A0, 9, FOURPORT_FLAGS }, /* ttyS4 */ \
+ { 0, BASE_BAUD, 0x1A8, 9, FOURPORT_FLAGS }, /* ttyS5 */ \
+ { 0, BASE_BAUD, 0x1B0, 9, FOURPORT_FLAGS }, /* ttyS6 */ \
+ { 0, BASE_BAUD, 0x1B8, 9, FOURPORT_FLAGS }, /* ttyS7 */ \
+ { 0, BASE_BAUD, 0x2A0, 5, FOURPORT_FLAGS }, /* ttyS8 */ \
+ { 0, BASE_BAUD, 0x2A8, 5, FOURPORT_FLAGS }, /* ttyS9 */ \
+ { 0, BASE_BAUD, 0x2B0, 5, FOURPORT_FLAGS }, /* ttyS10 */ \
+ { 0, BASE_BAUD, 0x2B8, 5, FOURPORT_FLAGS }, /* ttyS11 */ \
+ { 0, BASE_BAUD, 0x330, 4, ACCENT_FLAGS }, /* ttyS12 */ \
+ { 0, BASE_BAUD, 0x338, 4, ACCENT_FLAGS }, /* ttyS13 */ \
+ { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS14 (spare) */ \
+ { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS15 (spare) */ \
+ { 0, BASE_BAUD, 0x100, 12, BOCA_FLAGS }, /* ttyS16 */ \
+ { 0, BASE_BAUD, 0x108, 12, BOCA_FLAGS }, /* ttyS17 */ \
+ { 0, BASE_BAUD, 0x110, 12, BOCA_FLAGS }, /* ttyS18 */ \
+ { 0, BASE_BAUD, 0x118, 12, BOCA_FLAGS }, /* ttyS19 */ \
+ { 0, BASE_BAUD, 0x120, 12, BOCA_FLAGS }, /* ttyS20 */ \
+ { 0, BASE_BAUD, 0x128, 12, BOCA_FLAGS }, /* ttyS21 */ \
+ { 0, BASE_BAUD, 0x130, 12, BOCA_FLAGS }, /* ttyS22 */ \
+ { 0, BASE_BAUD, 0x138, 12, BOCA_FLAGS }, /* ttyS23 */ \
+ { 0, BASE_BAUD, 0x140, 12, BOCA_FLAGS }, /* ttyS24 */ \
+ { 0, BASE_BAUD, 0x148, 12, BOCA_FLAGS }, /* ttyS25 */ \
+ { 0, BASE_BAUD, 0x150, 12, BOCA_FLAGS }, /* ttyS26 */ \
+ { 0, BASE_BAUD, 0x158, 12, BOCA_FLAGS }, /* ttyS27 */ \
+ { 0, BASE_BAUD, 0x160, 12, BOCA_FLAGS }, /* ttyS28 */ \
+ { 0, BASE_BAUD, 0x168, 12, BOCA_FLAGS }, /* ttyS29 */ \
+ { 0, BASE_BAUD, 0x170, 12, BOCA_FLAGS }, /* ttyS30 */ \
+ { 0, BASE_BAUD, 0x178, 12, BOCA_FLAGS }, /* ttyS31 */
+#else
+#define EXTRA_SERIAL_PORT_DEFNS
+#endif
+
+#define SERIAL_PORT_DFNS \
+ STD_SERIAL_PORT_DEFNS \
+ EXTRA_SERIAL_PORT_DEFNS
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index f7596af91..811abc297 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -3,6 +3,51 @@
#define cpu_logical_map(cpu) (cpu)
-/* We'll get here eventually.. */
+#ifdef __SMP__
+
+#include <linux/tasks.h>
+
+struct cpuinfo_alpha {
+ unsigned long loops_per_sec;
+ unsigned int next;
+};
+
+extern struct cpuinfo_alpha cpu_data[NR_CPUS];
+
+typedef volatile struct {
+ unsigned int kernel_flag; /* 4 bytes, please */
+ unsigned int akp; /* 4 bytes, please */
+ unsigned long pc;
+ unsigned int cpu;
+} klock_info_t;
+
+extern klock_info_t klock_info;
+
+#define KLOCK_HELD 0xff
+#define KLOCK_CLEAR 0x00
+
+extern int task_lock_depth;
+
+#define PROC_CHANGE_PENALTY 20
+
+extern __volatile__ int cpu_number_map[NR_CPUS];
+
+/* HACK: Cabrio WHAMI return value is bogus if more than 8 bits used.. :-( */
+#define hard_smp_processor_id() \
+({ \
+ register unsigned char __r0 __asm__("$0"); \
+ __asm__ __volatile__( \
+ "call_pal %0" \
+ : /* no output (bound to the template) */ \
+ :"i" (PAL_whami) \
+ :"$0", "$1", "$22", "$23", "$24", "$25", "memory"); \
+ __r0; \
+})
+
+#define smp_processor_id() hard_smp_processor_id()
+
+#endif /* __SMP__ */
+
+#define NO_PROC_ID (-1)
#endif
diff --git a/include/asm-alpha/smp_lock.h b/include/asm-alpha/smp_lock.h
index 5f5d69297..bd04c6fb2 100644
--- a/include/asm-alpha/smp_lock.h
+++ b/include/asm-alpha/smp_lock.h
@@ -10,7 +10,110 @@
#else
-#error "We do not support SMP on alpha yet"
+#include <asm/system.h>
+#include <asm/current.h>
+#include <asm/bitops.h>
+#include <asm/hardirq.h>
+
+#define kernel_lock_held() \
+ (klock_info.kernel_flag && (klock_info.akp == smp_processor_id()))
+
+/* Release global kernel lock and global interrupt lock */
+#define release_kernel_lock(task, cpu, depth) \
+do { \
+ if ((depth = (task)->lock_depth) != 0) { \
+ __cli(); \
+ (task)->lock_depth = 0; \
+ klock_info.akp = NO_PROC_ID; \
+ klock_info.kernel_flag = 0; \
+ mb(); \
+ } \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+#if 1
+#define DEBUG_KERNEL_LOCK
+#else
+#undef DEBUG_KERNEL_LOCK
+#endif
+
+#ifdef DEBUG_KERNEL_LOCK
+extern void ___lock_kernel(klock_info_t *klip, int cpu, long ipl);
+#else /* DEBUG_KERNEL_LOCK */
+static inline void ___lock_kernel(klock_info_t *klip, int cpu, long ipl)
+{
+ long regx;
+
+ __asm__ __volatile__(
+ "1: ldl_l %1,%0;"
+ " blbs %1,6f;"
+ " or %1,1,%1;"
+ " stl_c %1,%0;"
+ " beq %1,6f;"
+ "4: mb\n"
+ ".section .text2,\"ax\"\n"
+ "6: mov %4,$16;"
+ " call_pal %3;"
+ "7: ldl %1,%0;"
+ " blbs %1,7b;"
+ " bis $31,7,$16;"
+ " call_pal %3;"
+ " br 1b\n"
+ ".previous"
+ : "=m,=m" (__dummy_lock(klip)), "=&r,=&r" (regx)
+ : "0,0" (__dummy_lock(klip)), "i,i" (PAL_swpipl), "i,r" (ipl)
+ : "$0", "$1", "$16", "$22", "$23", "$24", "$25", "memory"
+ );
+}
+#endif /* DEBUG_KERNEL_LOCK */
+
+#define reacquire_kernel_lock(task, cpu, depth) \
+do { \
+ if (depth) { \
+ long ipl; \
+ klock_info_t *klip = &klock_info; \
+ __save_and_cli(ipl); \
+ ___lock_kernel(klip, cpu, ipl); \
+ klip->akp = cpu; \
+ (task)->lock_depth = depth; \
+ __restore_flags(ipl); \
+ } \
+} while (0)
+
+/* The following acquire and release the master kernel global lock,
+ * the idea is that the usage of this mechanmism becomes less and less
+ * as time goes on, to the point where they are no longer needed at all
+ * and can thus disappear.
+ */
+
+#define lock_kernel() \
+if (current->lock_depth > 0) { \
+ ++current->lock_depth; \
+} else { \
+ long ipl; \
+ int cpu = smp_processor_id(); \
+ klock_info_t *klip = &klock_info; \
+ __save_and_cli(ipl); \
+ ___lock_kernel(klip, cpu, ipl); \
+ klip->akp = cpu; \
+ current->lock_depth = 1; \
+ __restore_flags(ipl); \
+}
+
+/* Release kernel global lock. */
+#define unlock_kernel() \
+if (current->lock_depth > 1) { \
+ --current->lock_depth; \
+} else { \
+ long ipl; \
+ __save_and_cli(ipl); \
+ klock_info.akp = NO_PROC_ID; \
+ klock_info.kernel_flag = KLOCK_CLEAR; \
+ mb(); \
+ current->lock_depth = 0; \
+ __restore_flags(ipl); \
+}
#endif /* __SMP__ */
diff --git a/include/asm-alpha/softirq.h b/include/asm-alpha/softirq.h
index 96be9ed43..f49bad83f 100644
--- a/include/asm-alpha/softirq.h
+++ b/include/asm-alpha/softirq.h
@@ -1,10 +1,9 @@
#ifndef _ALPHA_SOFTIRQ_H
#define _ALPHA_SOFTIRQ_H
-/* The locking mechanism for base handlers, to prevent re-entrancy,
- * is entirely private to an implementation, it should not be
- * referenced at all outside of this file.
- */
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
+
extern unsigned int local_bh_count[NR_CPUS];
#define get_active_bhs() (bh_mask & bh_active)
@@ -42,10 +41,50 @@ extern inline void mark_bh(int nr)
set_bit(nr, &bh_active);
}
+#ifdef __SMP__
+
/*
- * start_bh_atomic/end_bh_atomic also nest
- * naturally by using a counter
+ * The locking mechanism for base handlers, to prevent re-entrancy,
+ * is entirely private to an implementation, it should not be
+ * referenced at all outside of this file.
*/
+extern atomic_t global_bh_lock;
+extern atomic_t global_bh_count;
+
+extern void synchronize_bh(void);
+
+static inline void start_bh_atomic(void)
+{
+ atomic_inc(&global_bh_lock);
+ synchronize_bh();
+}
+
+static inline void end_bh_atomic(void)
+{
+ atomic_dec(&global_bh_lock);
+}
+
+/* These are for the irq's testing the lock */
+static inline int softirq_trylock(int cpu)
+{
+ if (!test_and_set_bit(0,&global_bh_count)) {
+ if (atomic_read(&global_bh_lock) == 0) {
+ ++local_bh_count[cpu];
+ return 1;
+ }
+ clear_bit(0,&global_bh_count);
+ }
+ return 0;
+}
+
+static inline void softirq_endlock(int cpu)
+{
+ local_bh_count[cpu]--;
+ clear_bit(0,&global_bh_count);
+}
+
+#else
+
extern inline void start_bh_atomic(void)
{
local_bh_count[smp_processor_id()]++;
@@ -58,19 +97,16 @@ extern inline void end_bh_atomic(void)
local_bh_count[smp_processor_id()]--;
}
-#ifndef __SMP__
-
/* These are for the irq's testing the lock */
#define softirq_trylock(cpu) \
(local_bh_count[cpu] ? 0 : (local_bh_count[cpu] = 1))
+
#define softirq_endlock(cpu) \
(local_bh_count[cpu] = 0)
-#else
-
-#error FIXME
+#define synchronize_bh() do { } while (0)
-#endif /* __SMP__ */
+#endif /* SMP */
/*
* These use a mask count to correctly handle
diff --git a/include/asm-alpha/spinlock.h b/include/asm-alpha/spinlock.h
index a38477426..1c7afd449 100644
--- a/include/asm-alpha/spinlock.h
+++ b/include/asm-alpha/spinlock.h
@@ -45,7 +45,10 @@ typedef struct { int dummy; } rwlock_t;
#define write_lock_irqsave(lock, flags) do { (flags) = swpipl(7); } while (0)
#define write_unlock_irqrestore(lock, flags) setipl(flags)
-#else
+#else /* __SMP__ */
+
+#include <linux/kernel.h>
+#include <asm/current.h>
/* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
@@ -56,12 +59,15 @@ typedef struct { int dummy; } rwlock_t;
typedef struct {
volatile unsigned long lock;
unsigned long previous;
+ unsigned long task;
} spinlock_t;
#define SPIN_LOCK_UNLOCKED { 0, 0 }
-#define spin_lock_init(lock) do { (lock)->lock = 0; (lock)->previous = 0; } while(0)
-#define spin_unlock_wait(lock) do { barrier(); } while(((volatile spinlock_t *)lock)->lock)
+#define spin_lock_init(x) \
+ do { (x)->lock = 0; (x)->previous = 0; } while(0)
+#define spin_unlock_wait(x) \
+ do { barrier(); } while(((volatile spinlock_t *)x)->lock)
typedef struct { unsigned long a[100]; } __dummy_lock_t;
#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
@@ -73,40 +79,38 @@ static inline void spin_unlock(spinlock_t * lock)
:"=m" (__dummy_lock(lock)));
}
+#if 1
+#define DEBUG_SPINLOCK
+#else
+#undef DEBUG_SPINLOCK
+#endif
+
+#ifdef DEBUG_SPINLOCK
+extern void spin_lock(spinlock_t * lock);
+#else
static inline void spin_lock(spinlock_t * lock)
{
- __label__ l1;
long tmp;
- long stuck = 0x100000000;
-l1:
+
/* Use sub-sections to put the actual loop at the end
of this object file's text section so as to perfect
branch prediction. */
__asm__ __volatile__(
"1: ldq_l %0,%1\n"
- " subq %2,1,%2\n"
" blbs %0,2f\n"
" or %0,1,%0\n"
" stq_c %0,%1\n"
- " beq %0,3f\n"
+ " beq %0,2f\n"
"4: mb\n"
".section .text2,\"ax\"\n"
"2: ldq %0,%1\n"
- " subq %2,1,%2\n"
- "3: blt %2,4b\n"
" blbs %0,2b\n"
" br 1b\n"
".previous"
: "=r" (tmp),
- "=m" (__dummy_lock(lock)),
- "=r" (stuck)
- : "2" (stuck));
-
- if (stuck < 0)
- printk("spinlock stuck at %p (%lx)\n",&&l1,lock->previous);
- else
- lock->previous = (unsigned long) &&l1;
+ "=m" (__dummy_lock(lock)));
}
+#endif /* DEBUG_SPINLOCK */
#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
@@ -117,10 +121,124 @@ l1:
do { spin_unlock(lock); __sti(); } while (0)
#define spin_lock_irqsave(lock, flags) \
- do { flags = swpipl(7); spin_lock(lock); } while (0)
+ do { __save_and_cli(flags); spin_lock(lock); } while (0)
#define spin_unlock_irqrestore(lock, flags) \
- do { spin_unlock(lock); setipl(flags); } while (0)
+ do { spin_unlock(lock); __restore_flags(flags); } while (0)
+
+/***********************************************************/
+
+#if 1
+#define DEBUG_RWLOCK
+#else
+#undef DEBUG_RWLOCK
+#endif
+
+typedef struct { volatile int write_lock:1, read_counter:31; } rwlock_t;
+
+#define RW_LOCK_UNLOCKED { 0, 0 }
+
+#ifdef DEBUG_RWLOCK
+extern void write_lock(rwlock_t * lock);
+#else
+static inline void write_lock(rwlock_t * lock)
+{
+ long regx, regy;
+
+ __asm__ __volatile__(
+ "1: ldl_l %1,%0;"
+ " blbs %1,6f;"
+ " or %1,1,%2;"
+ " stl_c %2,%0;"
+ " beq %2,6f;"
+ " blt %1,8f;"
+ "4: mb\n"
+ ".section .text2,\"ax\"\n"
+ "6: ldl %1,%0;"
+ " blbs %1,6b;"
+ " br 1b;"
+ "8: ldl %1,%0;"
+ " blt %1,8b;"
+ "9: br 4b\n"
+ ".previous"
+ : "=m" (__dummy_lock(lock)), "=&r" (regx), "=&r" (regy)
+ : "0" (__dummy_lock(lock))
+ );
+}
+#endif /* DEBUG_RWLOCK */
+
+static inline void write_unlock(rwlock_t * lock)
+{
+ __asm__ __volatile__("mb; stl $31,%0" : "=m" (__dummy_lock(lock)));
+}
+
+#ifdef DEBUG_RWLOCK
+extern void _read_lock(rwlock_t * lock);
+#else
+static inline void _read_lock(rwlock_t * lock)
+{
+ long regx;
+
+ __asm__ __volatile__(
+ "1: ldl_l %1,%0;"
+ " blbs %1,6f;"
+ " subl %1,2,%1;"
+ " stl_c %1,%0;"
+ " beq %1,6f;"
+ "4: mb\n"
+ ".section .text2,\"ax\"\n"
+ "6: ldl %1,%0;"
+ " blbs %1,6b;"
+ " br 1b\n"
+ ".previous"
+ : "=m" (__dummy_lock(lock)), "=&r" (regx)
+ : "0" (__dummy_lock(lock))
+ );
+}
+#endif /* DEBUG_RWLOCK */
+
+#define read_lock(lock) \
+do { unsigned long flags; \
+ __save_and_cli(flags); \
+ _read_lock(lock); \
+ __restore_flags(flags); \
+} while(0)
+
+static inline void _read_unlock(rwlock_t * lock)
+{
+ long regx;
+ __asm__ __volatile__(
+ "1: ldl_l %1,%0;"
+ " addl %1,2,%1;"
+ " stl_c %1,%0;"
+ " beq %1,6f;"
+ ".section .text2,\"ax\"\n"
+ "6: br 1b\n"
+ ".previous"
+ : "=m" (__dummy_lock(lock)), "=&r" (regx)
+ : "0" (__dummy_lock(lock)));
+}
+
+#define read_unlock(lock) \
+do { unsigned long flags; \
+ __save_and_cli(flags); \
+ _read_unlock(lock); \
+ __restore_flags(flags); \
+} while(0)
+
+#define read_lock_irq(lock) do { __cli(); _read_lock(lock); } while (0)
+#define read_unlock_irq(lock) do { _read_unlock(lock); __sti(); } while (0)
+#define write_lock_irq(lock) do { __cli(); write_lock(lock); } while (0)
+#define write_unlock_irq(lock) do { write_unlock(lock); __sti(); } while (0)
+
+#define read_lock_irqsave(lock, flags) \
+ do { __save_and_cli(flags); _read_lock(lock); } while (0)
+#define read_unlock_irqrestore(lock, flags) \
+ do { _read_unlock(lock); __restore_flags(flags); } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { __save_and_cli(flags); write_lock(lock); } while (0)
+#define write_unlock_irqrestore(lock, flags) \
+ do { write_unlock(lock); __restore_flags(flags); } while (0)
#endif /* SMP */
#endif /* _ALPHA_SPINLOCK_H */
diff --git a/include/asm-alpha/string.h b/include/asm-alpha/string.h
index 6ef1f4120..4b83c8291 100644
--- a/include/asm-alpha/string.h
+++ b/include/asm-alpha/string.h
@@ -3,29 +3,40 @@
#ifdef __KERNEL__
-extern void * __constant_c_memset(void *, unsigned long, long);
-extern void * __memset(void *, char, size_t);
-
/*
- * Ugh. Gcc uses "bcopy()" internally for structure assignments.
+ * GCC of any recent vintage doesn't do stupid things with bcopy. Of
+ * EGCS-devel vintage, it knows all about expanding memcpy inline.
+ * For things other than EGCS-devel but still recent, GCC will expand
+ * __builtin_memcpy as a simple call to memcpy.
+ *
+ * Similarly for a memset with data = 0.
*/
-#define __HAVE_ARCH_BCOPY
-/*
- * Define "memcpy()" to something else, otherwise gcc will
- * corrupt that too into a "bcopy". Also, some day we might
- * want to do a separate inlined constant-size memcpy (for 8
- * and 16 byte user<->kernel structure copying).
- */
#define __HAVE_ARCH_MEMCPY
+/* For backward compatibility with modules. Unused otherwise. */
extern void * __memcpy(void *, const void *, size_t);
-#define memcpy __memcpy
+
+#if __GNUC__ > 2 || __GNUC_MINOR__ >= 8
+#define memcpy __builtin_memcpy
+#endif
#define __HAVE_ARCH_MEMSET
-#define memset(s, c, count) \
-(__builtin_constant_p(c) ? \
- __constant_c_memset((s),(0x0101010101010101UL*(unsigned char)c),(count)) : \
- __memset((s),(c),(count)))
+extern void * __constant_c_memset(void *, unsigned long, long);
+extern void * __memset(void *, char, size_t);
+
+#if __GNUC__ > 2 || __GNUC_MINOR__ >= 8
+#define memset(s, c, n) \
+(__builtin_constant_p(c) \
+ ? (__builtin_constant_p(n) && (c) == 0 \
+ ? __builtin_memset((s),0,(n)) \
+ : __constant_c_memset((s),0x0101010101010101UL*(unsigned char)(c),(n))) \
+ : __memset((s),(c),(n)))
+#else
+#define memset(s, c, n) \
+(__builtin_constant_p(c) \
+ ? __constant_c_memset((s),0x0101010101010101UL*(unsigned char)(c),(n)) \
+ : __memset((s),(c),(n)))
+#endif
#define __HAVE_ARCH_STRCPY
#define __HAVE_ARCH_STRNCPY
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index 1a1fb246f..72b91317e 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -39,13 +39,54 @@
*/
struct el_common {
unsigned int size; /* size in bytes of logout area */
- int sbz1 : 31; /* should be zero */
- char retry : 1; /* retry flag */
+ int sbz1 : 30; /* should be zero */
+ int err2 : 1; /* second error */
+ int retry : 1; /* retry flag */
unsigned int proc_offset; /* processor-specific offset */
unsigned int sys_offset; /* system-specific offset */
unsigned long code; /* machine check code */
};
+/* Machine Check Frame for uncorrectable errors (Large format)
+ * --- This is used to log uncorrectable errors such as
+ * double bit ECC errors.
+ * --- These errors are detected by both processor and systems.
+ */
+struct el_common_EV5_uncorrectable_mcheck {
+ unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */
+ unsigned long paltemp[24]; /* PAL TEMP REGS. */
+ unsigned long exc_addr; /* Address of excepting instruction*/
+ unsigned long exc_sum; /* Summary of arithmetic traps. */
+ unsigned long exc_mask; /* Exception mask (from exc_sum). */
+ unsigned long pal_base; /* Base address for PALcode. */
+ unsigned long isr; /* Interrupt Status Reg. */
+ unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */
+ unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity
+ <12> set TAG parity*/
+ unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1:
+ <2> Data error in bank 0
+ <3> Data error in bank 1
+ <4> Tag error in bank 0
+ <5> Tag error in bank 1 */
+ unsigned long va; /* Effective VA of fault or miss. */
+ unsigned long mm_stat; /* Holds the reason for D-stream
+ fault or D-cache parity errors */
+ unsigned long sc_addr; /* Address that was being accessed
+ when EV5 detected Secondary cache
+ failure. */
+ unsigned long sc_stat; /* Helps determine if the error was
+ TAG/Data parity(Secondary Cache)*/
+ unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */
+ unsigned long ei_addr; /* Physical address of any transfer
+ that is logged in EV5 EI_STAT */
+ unsigned long fill_syndrome; /* For correcting ECC errors. */
+ unsigned long ei_stat; /* Helps identify reason of any
+ processor uncorrectable error
+ at its external interface. */
+ unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/
+};
+
+
extern void wrent(void *, unsigned long);
extern void wrkgp(unsigned long);
extern void wrusp(unsigned long);
@@ -96,6 +137,7 @@ __asm__ __volatile__ ("call_pal %0" : : "i" (PAL_draina) : "memory")
r0; \
})
+#ifdef THE_OLD_VERSION
#define setipl(ipl) \
do { \
register unsigned long __r16 __asm__("$16") = (ipl); \
@@ -117,6 +159,27 @@ do { \
:"$1", "$22", "$23", "$24", "$25", "memory"); \
__r0; \
})
+#else
+#define setipl(ipl) \
+do { \
+ __asm__ __volatile__( \
+ "mov %0,$16; call_pal %1" \
+ : /* no output */ \
+ :"i,r" (ipl), "i,i" (PAL_swpipl) \
+ :"$0", "$1", "$16", "$22", "$23", "$24", "$25", "memory"); \
+} while (0)
+
+#define swpipl(ipl) \
+({ \
+ register unsigned long __r0 __asm__("$0"); \
+ __asm__ __volatile__( \
+ "mov %0,$16; call_pal %1" \
+ : /* no output (bound to the template) */ \
+ : "i,r" (ipl), "i,i" (PAL_swpipl) \
+ : "$0", "$1", "$16", "$22", "$23", "$24", "$25", "memory"); \
+ __r0; \
+})
+#endif
#define __cli() setipl(7)
#define __sti() setipl(0)
@@ -124,12 +187,37 @@ do { \
#define __save_and_cli(flags) do { (flags) = swpipl(7); } while (0)
#define __restore_flags(flags) setipl(flags)
+#ifdef __SMP__
+
+extern unsigned char global_irq_holder;
+
+#define save_flags(x) \
+do { \
+ (x) = ((global_irq_holder == (unsigned char) smp_processor_id()) \
+ ? 1 \
+ : ((getipl() & 7) ? 2 : 0)); \
+} while (0)
+
+#define save_and_cli(flags) do { save_flags(flags); cli(); } while(0)
+
+extern void __global_cli(void);
+extern void __global_sti(void);
+extern void __global_restore_flags(unsigned long flags);
+
+#define cli() __global_cli()
+#define sti() __global_sti()
+#define restore_flags(flags) __global_restore_flags(flags)
+
+#else /* __SMP__ */
+
#define cli() setipl(7)
#define sti() setipl(0)
#define save_flags(flags) do { (flags) = getipl(); } while (0)
#define save_and_cli(flags) do { (flags) = swpipl(7); } while (0)
#define restore_flags(flags) setipl(flags)
+#endif /* __SMP__ */
+
/*
* TB routines..
*/
diff --git a/include/asm-alpha/t2.h b/include/asm-alpha/t2.h
index 1f4f8c741..524d6f765 100644
--- a/include/asm-alpha/t2.h
+++ b/include/asm-alpha/t2.h
@@ -1,6 +1,7 @@
#ifndef __ALPHA_T2__H__
#define __ALPHA_T2__H__
+#include <linux/config.h>
#include <linux/types.h>
/*
@@ -18,40 +19,56 @@
#define BYTE_ENABLE_SHIFT 5
#define TRANSFER_LENGTH_SHIFT 3
-#define MEM_SP1_MASK 0x1fffffff /* Mem sparse space 1 mask is 29 bits */
+#define MEM_R1_MASK 0x03ffffff /* Mem sparse space region 1 mask is 26 bits */
+#ifdef CONFIG_ALPHA_SRM_SETUP
+/* if we are using the SRM PCI setup, we'll need to use variables instead */
+#define T2_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
+#define T2_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
-#define T2_DMA_WIN_BASE (1024UL*1024UL*1024UL)
+extern unsigned int T2_DMA_WIN_BASE;
+extern unsigned int T2_DMA_WIN_SIZE;
+
+#else /* SRM_SETUP */
+#define T2_DMA_WIN_BASE (1024*1024*1024)
#define T2_DMA_WIN_SIZE (1024*1024*1024)
+#endif /* SRM_SETUP */
+
+/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
+#ifdef CONFIG_ALPHA_GAMMA
+# define GAMMA_BIAS 0x8000000000UL
+#else /* GAMMA */
+# define GAMMA_BIAS 0x0000000000UL
+#endif /* GAMMA */
/*
* Memory spaces:
*/
-#define T2_CONF (IDENT_ADDR + 0x390000000UL)
-#define T2_IO (IDENT_ADDR + 0x3a0000000UL)
-#define T2_SPARSE_MEM (IDENT_ADDR + 0x200000000UL)
-#define T2_DENSE_MEM (IDENT_ADDR + 0x3c0000000UL)
-
-#define T2_IOCSR (IDENT_ADDR + 0x38e000000UL)
-#define T2_CERR1 (IDENT_ADDR + 0x38e000020UL)
-#define T2_CERR2 (IDENT_ADDR + 0x38e000040UL)
-#define T2_CERR3 (IDENT_ADDR + 0x38e000060UL)
-#define T2_PERR1 (IDENT_ADDR + 0x38e000080UL)
-#define T2_PERR2 (IDENT_ADDR + 0x38e0000a0UL)
-#define T2_PSCR (IDENT_ADDR + 0x38e0000c0UL)
-#define T2_HAE_1 (IDENT_ADDR + 0x38e0000e0UL)
-#define T2_HAE_2 (IDENT_ADDR + 0x38e000100UL)
-#define T2_HBASE (IDENT_ADDR + 0x38e000120UL)
-#define T2_WBASE1 (IDENT_ADDR + 0x38e000140UL)
-#define T2_WMASK1 (IDENT_ADDR + 0x38e000160UL)
-#define T2_TBASE1 (IDENT_ADDR + 0x38e000180UL)
-#define T2_WBASE2 (IDENT_ADDR + 0x38e0001a0UL)
-#define T2_WMASK2 (IDENT_ADDR + 0x38e0001c0UL)
-#define T2_TBASE2 (IDENT_ADDR + 0x38e0001e0UL)
-#define T2_TLBBR (IDENT_ADDR + 0x38e000200UL)
-
-#define T2_HAE_3 (IDENT_ADDR + 0x38e000240UL)
-#define T2_HAE_4 (IDENT_ADDR + 0x38e000260UL)
+#define T2_CONF (IDENT_ADDR + GAMMA_BIAS + 0x390000000UL)
+#define T2_IO (IDENT_ADDR + GAMMA_BIAS + 0x3a0000000UL)
+#define T2_SPARSE_MEM (IDENT_ADDR + GAMMA_BIAS + 0x200000000UL)
+#define T2_DENSE_MEM (IDENT_ADDR + GAMMA_BIAS + 0x3c0000000UL)
+
+#define T2_IOCSR (IDENT_ADDR + GAMMA_BIAS + 0x38e000000UL)
+#define T2_CERR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000020UL)
+#define T2_CERR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000040UL)
+#define T2_CERR3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000060UL)
+#define T2_PERR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000080UL)
+#define T2_PERR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0000a0UL)
+#define T2_PSCR (IDENT_ADDR + GAMMA_BIAS + 0x38e0000c0UL)
+#define T2_HAE_1 (IDENT_ADDR + GAMMA_BIAS + 0x38e0000e0UL)
+#define T2_HAE_2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000100UL)
+#define T2_HBASE (IDENT_ADDR + GAMMA_BIAS + 0x38e000120UL)
+#define T2_WBASE1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000140UL)
+#define T2_WMASK1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000160UL)
+#define T2_TBASE1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000180UL)
+#define T2_WBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001a0UL)
+#define T2_WMASK2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001c0UL)
+#define T2_TBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001e0UL)
+#define T2_TLBBR (IDENT_ADDR + GAMMA_BIAS + 0x38e000200UL)
+
+#define T2_HAE_3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000240UL)
+#define T2_HAE_4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000260UL)
#define HAE_ADDRESS T2_HAE_1
@@ -88,14 +105,14 @@
*
*
*/
-#define CPU0_BASE (IDENT_ADDR + 0x380000000L)
-#define CPU1_BASE (IDENT_ADDR + 0x381000000L)
-#define CPU2_BASE (IDENT_ADDR + 0x382000000L)
-#define CPU3_BASE (IDENT_ADDR + 0x383000000L)
-#define MEM0_BASE (IDENT_ADDR + 0x388000000L)
-#define MEM1_BASE (IDENT_ADDR + 0x389000000L)
-#define MEM2_BASE (IDENT_ADDR + 0x38a000000L)
-#define MEM3_BASE (IDENT_ADDR + 0x38b000000L)
+#define CPU0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x380000000L)
+#define CPU1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x381000000L)
+#define CPU2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x382000000L)
+#define CPU3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x383000000L)
+#define MEM0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x388000000L)
+#define MEM1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x389000000L)
+#define MEM2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38a000000L)
+#define MEM3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38b000000L)
#ifdef __KERNEL__
@@ -198,6 +215,133 @@ extern inline void __outl(unsigned int b, unsigned long addr)
* HHH = 31:29 HAE_MEM CSR
*
*/
+#ifdef CONFIG_ALPHA_SRM_SETUP
+
+extern unsigned long t2_sm_base;
+
+extern inline unsigned long __readb(unsigned long addr)
+{
+ unsigned long result, shift, work;
+
+ if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
+ else
+ if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
+ work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
+ else
+ {
+#if 0
+ printk("__readb: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return 0x0ffUL;
+ }
+ shift = (addr & 0x3) << 3;
+ result = *(vuip) work;
+ result >>= shift;
+ return 0x0ffUL & result;
+}
+
+extern inline unsigned long __readw(unsigned long addr)
+{
+ unsigned long result, shift, work;
+
+ if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
+ else
+ if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
+ work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
+ else
+ {
+#if 0
+ printk("__readw: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return 0x0ffffUL;
+ }
+ shift = (addr & 0x3) << 3;
+ result = *(vuip) work;
+ result >>= shift;
+ return 0x0ffffUL & result;
+}
+
+/* on SABLE with T2, we must use SPARSE memory even for 32-bit access */
+extern inline unsigned long __readl(unsigned long addr)
+{
+ unsigned long result, work;
+
+ if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
+ else
+ if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
+ work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
+ else
+ {
+#if 0
+ printk("__readl: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return 0x0ffffffffUL;
+ }
+ result = *(vuip) work;
+ return 0xffffffffUL & result;
+}
+
+extern inline void __writeb(unsigned char b, unsigned long addr)
+{
+ unsigned long work;
+
+ if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
+ else
+ if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
+ work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
+ else
+ {
+#if 0
+ printk("__writeb: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return;
+ }
+ *(vuip) work = b * 0x01010101;
+}
+
+extern inline void __writew(unsigned short b, unsigned long addr)
+{
+ unsigned long work;
+
+ if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
+ else
+ if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
+ work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
+ else
+ {
+#if 0
+ printk("__writew: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return;
+ }
+ *(vuip) work = b * 0x00010001;
+}
+
+/* on SABLE with T2, we must use SPARSE memory even for 32-bit access */
+extern inline void __writel(unsigned int b, unsigned long addr)
+{
+ unsigned long work;
+
+ if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
+ work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
+ else
+ if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
+ work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
+ {
+#if 0
+ printk("__writel: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return;
+ }
+ *(vuip) work = b;
+}
+
+#else /* SRM_SETUP */
extern inline unsigned long __readb(unsigned long addr)
{
@@ -205,7 +349,7 @@ extern inline unsigned long __readb(unsigned long addr)
shift = (addr & 0x3) * 8 ;
msb = addr & 0xE0000000 ;
- addr &= MEM_SP1_MASK ;
+ addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
@@ -220,7 +364,7 @@ extern inline unsigned long __readw(unsigned long addr)
shift = (addr & 0x3) * 8;
msb = addr & 0xE0000000 ;
- addr &= MEM_SP1_MASK ;
+ addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
@@ -235,7 +379,7 @@ extern inline unsigned long __readl(unsigned long addr)
unsigned long result, msb;
msb = addr & 0xE0000000 ;
- addr &= MEM_SP1_MASK ;
+ addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
@@ -248,7 +392,7 @@ extern inline void __writeb(unsigned char b, unsigned long addr)
unsigned long msb ;
msb = addr & 0xE0000000 ;
- addr &= MEM_SP1_MASK ;
+ addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
@@ -260,7 +404,7 @@ extern inline void __writew(unsigned short b, unsigned long addr)
unsigned long msb ;
msb = addr & 0xE0000000 ;
- addr &= MEM_SP1_MASK ;
+ addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
@@ -273,13 +417,15 @@ extern inline void __writel(unsigned int b, unsigned long addr)
unsigned long msb ;
msb = addr & 0xE0000000 ;
- addr &= MEM_SP1_MASK ;
+ addr &= MEM_R1_MASK ;
if (msb != hae.cache) {
set_hae(msb);
}
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
}
+#endif /* SRM_SETUP */
+
#define inb(port) \
(__builtin_constant_p((port))?__inb(port):_inb(port))
diff --git a/include/asm-alpha/termios.h b/include/asm-alpha/termios.h
index 3cd68d99c..07b9489d9 100644
--- a/include/asm-alpha/termios.h
+++ b/include/asm-alpha/termios.h
@@ -72,6 +72,8 @@ struct termio {
#define N_MOUSE 2
#define N_PPP 3
#define N_AX25 5
+#define N_X25 6
+#define N_6PACK 7
#ifdef __KERNEL__
/* eof=^D eol=\0 eol2=\0 erase=del
diff --git a/include/asm-alpha/tsunami.h b/include/asm-alpha/tsunami.h
new file mode 100644
index 000000000..2c6be4e23
--- /dev/null
+++ b/include/asm-alpha/tsunami.h
@@ -0,0 +1,483 @@
+#ifndef __ALPHA_TSUNAMI__H__
+#define __ALPHA_TSUNAMI__H__
+
+#include <linux/config.h>
+#include <linux/types.h>
+
+/*
+ * TSUNAMI/TYPHOON are the internal names for the core logic chipset which
+ * provides memory controller and PCI access for the 21264 based systems.
+ *
+ * This file is based on:
+ *
+ * Tsunami System Programmers Manual
+ * Preliminary, Chapters 2-5
+ *
+ */
+
+#define BYTE_ENABLE_SHIFT 5
+#define TRANSFER_LENGTH_SHIFT 3
+
+#ifdef CONFIG_ALPHA_SRM_SETUP
+/* if we are using the SRM PCI setup, we'll need to use variables instead */
+#define TSUNAMI_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
+#define TSUNAMI_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
+
+extern unsigned int TSUNAMI_DMA_WIN_BASE;
+extern unsigned int TSUNAMI_DMA_WIN_SIZE;
+
+#else /* SRM_SETUP */
+#define TSUNAMI_DMA_WIN_BASE (1024*1024*1024)
+#define TSUNAMI_DMA_WIN_SIZE (1024*1024*1024)
+#endif /* SRM_SETUP */
+
+#ifdef USE_48_BIT_KSEG
+#define TS_BIAS 0x80000000000UL
+#else
+#define TS_BIAS 0x10000000000UL
+#endif
+
+/*
+ * CChip and DChip registers
+ */
+#define TSUNAMI_CSR_CSC (IDENT_ADDR + TS_BIAS + 0x1A0000000UL)
+#define TSUNAMI_CSR_MTR (IDENT_ADDR + TS_BIAS + 0x1A0000040UL)
+#define TSUNAMI_CSR_MISC (IDENT_ADDR + TS_BIAS + 0x1A0000080UL)
+#define TSUNAMI_CSR_MPD (IDENT_ADDR + TS_BIAS + 0x1A00000C0UL)
+#define TSUNAMI_CSR_AAR0 (IDENT_ADDR + TS_BIAS + 0x1A0000100UL)
+#define TSUNAMI_CSR_AAR1 (IDENT_ADDR + TS_BIAS + 0x1A0000140UL)
+#define TSUNAMI_CSR_AAR2 (IDENT_ADDR + TS_BIAS + 0x1A0000180UL)
+#define TSUNAMI_CSR_AAR3 (IDENT_ADDR + TS_BIAS + 0x1A00001C0UL)
+#define TSUNAMI_CSR_DIM0 (IDENT_ADDR + TS_BIAS + 0x1A0000200UL)
+#define TSUNAMI_CSR_DIM1 (IDENT_ADDR + TS_BIAS + 0x1A0000240UL)
+#define TSUNAMI_CSR_DIR0 (IDENT_ADDR + TS_BIAS + 0x1A0000280UL)
+#define TSUNAMI_CSR_DIR1 (IDENT_ADDR + TS_BIAS + 0x1A00002C0UL)
+
+#define TSUNAMI_CSR_DRIR (IDENT_ADDR + TS_BIAS + 0x1A0000300UL)
+#define TSUNAMI_CSR_PRBEN (IDENT_ADDR + TS_BIAS + 0x1A0000340UL)
+#define TSUNAMI_CSR_IIC (IDENT_ADDR + TS_BIAS + 0x1A0000380UL)
+#define TSUNAMI_CSR_WDR (IDENT_ADDR + TS_BIAS + 0x1A00003C0UL)
+#define TSUNAMI_CSR_MPR0 (IDENT_ADDR + TS_BIAS + 0x1A0000400UL)
+#define TSUNAMI_CSR_MPR1 (IDENT_ADDR + TS_BIAS + 0x1A0000440UL)
+#define TSUNAMI_CSR_MPR2 (IDENT_ADDR + TS_BIAS + 0x1A0000480UL)
+#define TSUNAMI_CSR_MPR3 (IDENT_ADDR + TS_BIAS + 0x1A00004C0UL)
+#define TSUNAMI_CSR_TTR (IDENT_ADDR + TS_BIAS + 0x1A0000580UL)
+#define TSUNAMI_CSR_TDR (IDENT_ADDR + TS_BIAS + 0x1A00005C0UL)
+#define TSUNAMI_CSR_DSC (IDENT_ADDR + TS_BIAS + 0x1B0000800UL)
+#define TSUNAMI_CSR_STR (IDENT_ADDR + TS_BIAS + 0x1B0000840UL)
+#define TSUNAMI_CSR_DREV (IDENT_ADDR + TS_BIAS + 0x1B0000880UL)
+
+/*
+ * PChip registers
+ */
+#define TSUNAMI_PCHIP0_WSBA0 (IDENT_ADDR + TS_BIAS + 0x180000000UL)
+#define TSUNAMI_PCHIP0_WSBA1 (IDENT_ADDR + TS_BIAS + 0x180000040UL)
+#define TSUNAMI_PCHIP0_WSBA2 (IDENT_ADDR + TS_BIAS + 0x180000080UL)
+#define TSUNAMI_PCHIP0_WSBA3 (IDENT_ADDR + TS_BIAS + 0x1800000C0UL)
+
+#define TSUNAMI_PCHIP0_WSM0 (IDENT_ADDR + TS_BIAS + 0x180000100UL)
+#define TSUNAMI_PCHIP0_WSM1 (IDENT_ADDR + TS_BIAS + 0x180000140UL)
+#define TSUNAMI_PCHIP0_WSM2 (IDENT_ADDR + TS_BIAS + 0x180000180UL)
+#define TSUNAMI_PCHIP0_WSM3 (IDENT_ADDR + TS_BIAS + 0x1800001C0UL)
+#define TSUNAMI_PCHIP0_TBA0 (IDENT_ADDR + TS_BIAS + 0x180000200UL)
+#define TSUNAMI_PCHIP0_TBA1 (IDENT_ADDR + TS_BIAS + 0x180000240UL)
+#define TSUNAMI_PCHIP0_TBA2 (IDENT_ADDR + TS_BIAS + 0x180000280UL)
+#define TSUNAMI_PCHIP0_TBA3 (IDENT_ADDR + TS_BIAS + 0x1800002C0UL)
+
+#define TSUNAMI_PCHIP0_PCTL (IDENT_ADDR + TS_BIAS + 0x180000300UL)
+#define TSUNAMI_PCHIP0_PLAT (IDENT_ADDR + TS_BIAS + 0x180000340UL)
+#define TSUNAMI_PCHIP0_RESERVED (IDENT_ADDR + TS_BIAS + 0x180000380UL)
+#define TSUNAMI_PCHIP0_PERROR (IDENT_ADDR + TS_BIAS + 0x1800003c0UL)
+#define TSUNAMI_PCHIP0_PERRMASK (IDENT_ADDR + TS_BIAS + 0x180000400UL)
+#define TSUNAMI_PCHIP0_PERRSET (IDENT_ADDR + TS_BIAS + 0x180000440UL)
+#define TSUNAMI_PCHIP0_TLBIV (IDENT_ADDR + TS_BIAS + 0x180000480UL)
+#define TSUNAMI_PCHIP0_TLBIA (IDENT_ADDR + TS_BIAS + 0x1800004C0UL)
+#define TSUNAMI_PCHIP0_PMONCTL (IDENT_ADDR + TS_BIAS + 0x180000500UL)
+#define TSUNAMI_PCHIP0_PMONCNT (IDENT_ADDR + TS_BIAS + 0x180000540UL)
+
+#define TSUNAMI_PCHIP1_WSBA0 (IDENT_ADDR + TS_BIAS + 0x380000000UL)
+#define TSUNAMI_PCHIP1_WSBA1 (IDENT_ADDR + TS_BIAS + 0x380000040UL)
+#define TSUNAMI_PCHIP1_WSBA2 (IDENT_ADDR + TS_BIAS + 0x380000080UL)
+#define TSUNAMI_PCHIP1_WSBA3 (IDENT_ADDR + TS_BIAS + 0x3800000C0UL)
+#define TSUNAMI_PCHIP1_WSM0 (IDENT_ADDR + TS_BIAS + 0x380000100UL)
+#define TSUNAMI_PCHIP1_WSM1 (IDENT_ADDR + TS_BIAS + 0x380000140UL)
+#define TSUNAMI_PCHIP1_WSM2 (IDENT_ADDR + TS_BIAS + 0x380000180UL)
+#define TSUNAMI_PCHIP1_WSM3 (IDENT_ADDR + TS_BIAS + 0x3800001C0UL)
+
+#define TSUNAMI_PCHIP1_TBA0 (IDENT_ADDR + TS_BIAS + 0x380000200UL)
+#define TSUNAMI_PCHIP1_TBA1 (IDENT_ADDR + TS_BIAS + 0x380000240UL)
+#define TSUNAMI_PCHIP1_TBA2 (IDENT_ADDR + TS_BIAS + 0x380000280UL)
+#define TSUNAMI_PCHIP1_TBA3 (IDENT_ADDR + TS_BIAS + 0x3800002C0UL)
+
+#define TSUNAMI_PCHIP1_PCTL (IDENT_ADDR + TS_BIAS + 0x380000300UL)
+#define TSUNAMI_PCHIP1_PLAT (IDENT_ADDR + TS_BIAS + 0x380000340UL)
+#define TSUNAMI_PCHIP1_RESERVED (IDENT_ADDR + TS_BIAS + 0x380000380UL)
+#define TSUNAMI_PCHIP1_PERROR (IDENT_ADDR + TS_BIAS + 0x3800003c0UL)
+#define TSUNAMI_PCHIP1_PERRMASK (IDENT_ADDR + TS_BIAS + 0x380000400UL)
+#define TSUNAMI_PCHIP1_PERRSET (IDENT_ADDR + TS_BIAS + 0x380000440UL)
+#define TSUNAMI_PCHIP1_TLBIV (IDENT_ADDR + TS_BIAS + 0x380000480UL)
+#define TSUNAMI_PCHIP1_TLBIA (IDENT_ADDR + TS_BIAS + 0x3800004C0UL)
+#define TSUNAMI_PCHIP1_PMONCTL (IDENT_ADDR + TS_BIAS + 0x380000500UL)
+#define TSUNAMI_PCHIP1_PMONCNT (IDENT_ADDR + TS_BIAS + 0x380000540UL)
+
+/* */
+/* TSUNAMI Pchip Error register. */
+/* */
+#define perror_m_lost 0x1
+#define perror_m_serr 0x2
+#define perror_m_perr 0x4
+#define perror_m_dcrto 0x8
+#define perror_m_sge 0x10
+#define perror_m_ape 0x20
+#define perror_m_ta 0x40
+#define perror_m_rdpe 0x80
+#define perror_m_nds 0x100
+#define perror_m_rto 0x200
+#define perror_m_uecc 0x400
+#define perror_m_cre 0x800
+#define perror_m_addrl 0xFFFFFFFF0000UL
+#define perror_m_addrh 0x7000000000000UL
+#define perror_m_cmd 0xF0000000000000UL
+#define perror_m_syn 0xFF00000000000000UL
+union TPchipPERROR {
+ struct {
+ unsigned int perror_v_lost : 1;
+ unsigned perror_v_serr : 1;
+ unsigned perror_v_perr : 1;
+ unsigned perror_v_dcrto : 1;
+ unsigned perror_v_sge : 1;
+ unsigned perror_v_ape : 1;
+ unsigned perror_v_ta : 1;
+ unsigned perror_v_rdpe : 1;
+ unsigned perror_v_nds : 1;
+ unsigned perror_v_rto : 1;
+ unsigned perror_v_uecc : 1;
+ unsigned perror_v_cre : 1;
+ unsigned perror_v_rsvd1 : 4;
+ unsigned perror_v_addrl : 32;
+ unsigned perror_v_addrh : 3;
+ unsigned perror_v_rsvd2 : 1;
+ unsigned perror_v_cmd : 4;
+ unsigned perror_v_syn : 8;
+ } perror_r_bits;
+ int perror_q_whole [2];
+ } ;
+/* */
+/* TSUNAMI Pchip Window Space Base Address register. */
+/* */
+#define wsba_m_ena 0x1
+#define wsba_m_sg 0x2
+#define wsba_m_ptp 0x4
+#define wsba_m_addr 0xFFF00000
+#define wmask_k_sz1gb 0x3FF00000
+union TPchipWSBA {
+ struct {
+ unsigned wsba_v_ena : 1;
+ unsigned wsba_v_sg : 1;
+ unsigned wsba_v_ptp : 1;
+ unsigned wsba_v_rsvd1 : 17;
+ unsigned wsba_v_addr : 12;
+ unsigned wsba_v_rsvd2 : 32;
+ } wsba_r_bits;
+ int wsba_q_whole [2];
+ } ;
+/* */
+/* TSUNAMI Pchip Control Register */
+/* */
+#define pctl_m_fdsc 0x1
+#define pctl_m_fbtb 0x2
+#define pctl_m_thdis 0x4
+#define pctl_m_chaindis 0x8
+#define pctl_m_tgtlat 0x10
+#define pctl_m_hole 0x20
+#define pctl_m_mwin 0x40
+#define pctl_m_arbena 0x80
+#define pctl_m_prigrp 0x7F00
+#define pctl_m_ppri 0x8000
+#define pctl_m_rsvd1 0x30000
+#define pctl_m_eccen 0x40000
+#define pctl_m_padm 0x80000
+#define pctl_m_cdqmax 0xF00000
+#define pctl_m_rev 0xFF000000
+#define pctl_m_crqmax 0xF00000000UL
+#define pctl_m_ptpmax 0xF000000000UL
+#define pctl_m_pclkx 0x30000000000UL
+#define pctl_m_fdsdis 0x40000000000UL
+#define pctl_m_fdwdis 0x80000000000UL
+#define pctl_m_ptevrfy 0x100000000000UL
+#define pctl_m_rpp 0x200000000000UL
+#define pctl_m_pid 0xC00000000000UL
+#define pctl_m_rsvd2 0xFFFF000000000000UL
+
+union TPchipPCTL {
+ struct {
+ unsigned pctl_v_fdsc : 1;
+ unsigned pctl_v_fbtb : 1;
+ unsigned pctl_v_thdis : 1;
+ unsigned pctl_v_chaindis : 1;
+ unsigned pctl_v_tgtlat : 1;
+ unsigned pctl_v_hole : 1;
+ unsigned pctl_v_mwin : 1;
+ unsigned pctl_v_arbena : 1;
+ unsigned pctl_v_prigrp : 7;
+ unsigned pctl_v_ppri : 1;
+ unsigned pctl_v_rsvd1 : 2;
+ unsigned pctl_v_eccen : 1;
+ unsigned pctl_v_padm : 1;
+ unsigned pctl_v_cdqmax : 4;
+ unsigned pctl_v_rev : 8;
+ unsigned pctl_v_crqmax : 4;
+ unsigned pctl_v_ptpmax : 4;
+ unsigned pctl_v_pclkx : 2;
+ unsigned pctl_v_fdsdis : 1;
+ unsigned pctl_v_fdwdis : 1;
+ unsigned pctl_v_ptevrfy : 1;
+ unsigned pctl_v_rpp : 1;
+ unsigned pctl_v_pid : 2;
+ unsigned pctl_v_rsvd2 : 16;
+ } pctl_r_bits;
+ int pctl_q_whole [2];
+} ;
+/* */
+/* TSUNAMI Pchip Error Mask Register. */
+/* */
+#define perrmask_m_lost 0x1
+#define perrmask_m_serr 0x2
+#define perrmask_m_perr 0x4
+#define perrmask_m_dcrto 0x8
+#define perrmask_m_sge 0x10
+#define perrmask_m_ape 0x20
+#define perrmask_m_ta 0x40
+#define perrmask_m_rdpe 0x80
+#define perrmask_m_nds 0x100
+#define perrmask_m_rto 0x200
+#define perrmask_m_uecc 0x400
+#define perrmask_m_cre 0x800
+#define perrmask_m_rsvd 0xFFFFFFFFFFFFF000UL
+union TPchipPERRMASK {
+ struct {
+ unsigned int perrmask_v_lost : 1;
+ unsigned perrmask_v_serr : 1;
+ unsigned perrmask_v_perr : 1;
+ unsigned perrmask_v_dcrto : 1;
+ unsigned perrmask_v_sge : 1;
+ unsigned perrmask_v_ape : 1;
+ unsigned perrmask_v_ta : 1;
+ unsigned perrmask_v_rdpe : 1;
+ unsigned perrmask_v_nds : 1;
+ unsigned perrmask_v_rto : 1;
+ unsigned perrmask_v_uecc : 1;
+ unsigned perrmask_v_cre : 1;
+ unsigned perrmask_v_rsvd1 : 20;
+ unsigned perrmask_v_rsvd2 : 32;
+ } perrmask_r_bits;
+ int perrmask_q_whole [2];
+ } ;
+
+/*
+ * Memory spaces:
+ */
+#define TSUNAMI_PCI0_MEM (IDENT_ADDR + TS_BIAS + 0x000000000UL)
+#define TSUNAMI_PCI0_IACK_SC (IDENT_ADDR + TS_BIAS + 0x1F8000000UL)
+#define TSUNAMI_PCI0_IO (IDENT_ADDR + TS_BIAS + 0x1FC000000UL)
+#define TSUNAMI_PCI0_CONF (IDENT_ADDR + TS_BIAS + 0x1FE000000UL)
+
+#define TSUNAMI_PCI1_MEM (IDENT_ADDR + TS_BIAS + 0x200000000UL)
+#define TSUNAMI_PCI1_IACK_SC (IDENT_ADDR + TS_BIAS + 0x3F8000000UL)
+#define TSUNAMI_PCI1_IO (IDENT_ADDR + TS_BIAS + 0x3FC000000UL)
+#define TSUNAMI_PCI1_CONF (IDENT_ADDR + TS_BIAS + 0x3FE000000UL)
+
+#define HAE_ADDRESS 0
+
+#ifdef __KERNEL__
+
+/*
+ * Translate physical memory address as seen on (PCI) bus into
+ * a kernel virtual address and vv.
+ */
+extern inline unsigned long virt_to_bus(void * address)
+{
+ return virt_to_phys(address) + TSUNAMI_DMA_WIN_BASE;
+}
+
+extern inline void * bus_to_virt(unsigned long address)
+{
+ return phys_to_virt(address - TSUNAMI_DMA_WIN_BASE);
+}
+
+/*
+ * I/O functions:
+ *
+ * TSUNAMI, the 21??? PCI/memory support chipset for the EV6 (21264)
+ * can only use linear accesses to get at PCI memory and I/O spaces.
+ */
+
+/* HACK ALERT! HACK ALERT! */
+/* HACK ALERT! HACK ALERT! */
+
+/* only using PCI bus 0 for now in all routines */
+
+/* HACK ALERT! HACK ALERT! */
+/* HACK ALERT! HACK ALERT! */
+
+
+#define vuip volatile unsigned int *
+
+extern inline unsigned int __inb(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldbu %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned char *)(addr+TSUNAMI_PCI0_IO)));
+
+ return result;
+}
+
+extern inline void __outb(unsigned char b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stb %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned char *)(addr+TSUNAMI_PCI0_IO)), "r" (b));
+}
+
+extern inline unsigned int __inw(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldwu %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned short *)(addr+TSUNAMI_PCI0_IO)));
+
+ return result;
+}
+
+extern inline void __outw(unsigned short b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stw %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned short *)(addr+TSUNAMI_PCI0_IO)), "r" (b));
+}
+
+extern inline unsigned int __inl(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldl %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned int *)(addr+TSUNAMI_PCI0_IO)));
+
+ return result;
+}
+
+extern inline void __outl(unsigned int b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stl %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned int *)(addr+TSUNAMI_PCI0_IO)), "r" (b));
+}
+
+/*
+ * Memory functions. all accesses are done through linear space.
+ */
+
+extern inline unsigned long __readb(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldbu %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned char *)(addr+TSUNAMI_PCI0_MEM)));
+
+ return result;
+}
+
+extern inline unsigned long __readw(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldwu %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned short *)(addr+TSUNAMI_PCI0_MEM)));
+
+ return result;
+}
+
+extern inline unsigned long __readl(unsigned long addr)
+{
+ register unsigned long result;
+
+ __asm__ __volatile__ (
+ "ldl %0,%1"
+ : "=r" (result)
+ : "m" (*(unsigned int *)(addr+TSUNAMI_PCI0_MEM)));
+
+ return result;
+}
+
+extern inline void __writeb(unsigned char b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stb %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned char *)(addr+TSUNAMI_PCI0_MEM)), "r" (b));
+}
+
+extern inline void __writew(unsigned short b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stw %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned short *)(addr+TSUNAMI_PCI0_MEM)), "r" (b));
+}
+
+extern inline void __writel(unsigned int b, unsigned long addr)
+{
+ __asm__ __volatile__ (
+ "stl %1,%0\n\t"
+ "mb"
+ : : "m" (*(unsigned int *)(addr+TSUNAMI_PCI0_MEM)), "r" (b));
+}
+
+#define inb(port) __inb((port))
+#define inw(port) __inw((port))
+#define inl(port) __inl((port))
+
+#define outb(v, port) __outb((v),(port))
+#define outw(v, port) __outw((v),(port))
+#define outl(v, port) __outl((v),(port))
+
+#define readb(a) __readb((unsigned long)(a))
+#define readw(a) __readw((unsigned long)(a))
+#define readl(a) __readl((unsigned long)(a))
+
+#define writeb(v,a) __writeb((v),(unsigned long)(a))
+#define writew(v,a) __writew((v),(unsigned long)(a))
+#define writel(v,a) __writel((v),(unsigned long)(a))
+
+#undef vuip
+
+extern unsigned long tsunami_init (unsigned long mem_start,
+ unsigned long mem_end);
+
+#endif /* __KERNEL__ */
+
+/*
+ * Data structure for handling TSUNAMI machine checks:
+ */
+struct el_TSUNAMI_sysdata_mcheck {
+};
+
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_ADDR(x) (0x80 | (x))
+#define RTC_ALWAYS_BCD 0
+
+#endif /* __ALPHA_TSUNAMI__H__ */
diff --git a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h
index 64361c234..e5c3636f7 100644
--- a/include/asm-alpha/unistd.h
+++ b/include/asm-alpha/unistd.h
@@ -421,12 +421,6 @@ static inline int read(int fd, char * buf, int nr)
return sys_read(fd, buf, nr);
}
-extern int sys_fork(void);
-static inline int fork(void)
-{
- return sys_fork();
-}
-
extern int __kernel_execve(char *, char **, char **, struct pt_regs *);
static inline int execve(char * file, char ** argvp, char ** envp)
{