summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/core_lca.h (renamed from include/asm-alpha/lca.h)284
-rw-r--r--include/asm-alpha/core_mcpcia.h (renamed from include/asm-alpha/mcpcia.h)562
-rw-r--r--include/asm-alpha/core_pyxis.h (renamed from include/asm-alpha/pyxis.h)630
-rw-r--r--include/asm-alpha/core_t2.h (renamed from include/asm-alpha/t2.h)799
-rw-r--r--include/asm-alpha/core_tsunami.h (renamed from include/asm-alpha/tsunami.h)217
-rw-r--r--include/asm-alpha/delay.h9
-rw-r--r--include/asm-alpha/dma.h22
-rw-r--r--include/asm-alpha/elf.h2
-rw-r--r--include/asm-alpha/floppy.h25
-rw-r--r--include/asm-alpha/hardirq.h1
-rw-r--r--include/asm-alpha/hwrpb.h11
-rw-r--r--include/asm-alpha/init.h6
-rw-r--r--include/asm-alpha/io.h182
-rw-r--r--include/asm-alpha/irq.h51
-rw-r--r--include/asm-alpha/jensen.h220
-rw-r--r--include/asm-alpha/linux_logo.h47
-rw-r--r--include/asm-alpha/machvec.h118
-rw-r--r--include/asm-alpha/md.h13
-rw-r--r--include/asm-alpha/mmu_context.h118
-rw-r--r--include/asm-alpha/page.h87
-rw-r--r--include/asm-alpha/pci.h48
-rw-r--r--include/asm-alpha/pgtable.h343
-rw-r--r--include/asm-alpha/posix_types.h4
-rw-r--r--include/asm-alpha/processor.h37
-rw-r--r--include/asm-alpha/serial.h2
-rw-r--r--include/asm-alpha/siginfo.h1
-rw-r--r--include/asm-alpha/signal.h21
-rw-r--r--include/asm-alpha/smp.h8
-rw-r--r--include/asm-alpha/smp_lock.h120
-rw-r--r--include/asm-alpha/smplock.h49
-rw-r--r--include/asm-alpha/string.h28
-rw-r--r--include/asm-alpha/system.h5
-rw-r--r--include/asm-alpha/timex.h11
-rw-r--r--include/asm-alpha/uaccess.h4
-rw-r--r--include/asm-alpha/unaligned.h39
-rw-r--r--include/asm-alpha/unistd.h172
-rw-r--r--include/asm-alpha/vga.h56
37 files changed, 2564 insertions, 1788 deletions
diff --git a/include/asm-alpha/lca.h b/include/asm-alpha/core_lca.h
index f1f8a1ad7..bce449fa4 100644
--- a/include/asm-alpha/lca.h
+++ b/include/asm-alpha/core_lca.h
@@ -1,6 +1,10 @@
#ifndef __ALPHA_LCA__H__
#define __ALPHA_LCA__H__
+#include <linux/config.h>
+#include <asm/system.h>
+#include <asm/compiler.h>
+
/*
* Low Cost Alpha (LCA) definitions (these apply to 21066 and 21068,
* for example).
@@ -52,21 +56,16 @@
* ugh).
*/
-#include <linux/config.h>
-#include <asm/system.h>
-
-#ifdef CONFIG_ALPHA_SRM_SETUP
-/* if we are using the SRM PCI setup, we'll need to use variables instead */
-#define LCA_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
-#define LCA_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
-
-extern unsigned int LCA_DMA_WIN_BASE;
-extern unsigned int LCA_DMA_WIN_SIZE;
+#define LCA_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
+#define LCA_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
-#else /* SRM_SETUP */
-#define LCA_DMA_WIN_BASE (1024*1024*1024)
-#define LCA_DMA_WIN_SIZE (1024*1024*1024)
-#endif /* SRM_SETUP */
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM_SETUP)
+#define LCA_DMA_WIN_BASE alpha_mv.dma_win_base
+#define LCA_DMA_WIN_SIZE alpha_mv.dma_win_size
+#else
+#define LCA_DMA_WIN_BASE LCA_DMA_WIN_BASE_DEFAULT
+#define LCA_DMA_WIN_SIZE LCA_DMA_WIN_SIZE_DEFAULT
+#endif
/*
* Memory Controller registers:
@@ -139,7 +138,7 @@ extern unsigned int LCA_DMA_WIN_SIZE;
#define LCA_IOC_STAT0_P_NBR_SHIFT 13
#define LCA_IOC_STAT0_P_NBR_MASK 0x7ffff
-#define HAE_ADDRESS LCA_IOC_HAE
+#define LCA_HAE_ADDRESS LCA_IOC_HAE
/* LCA PMR Power Management register defines */
#define LCA_PMR_ADDR (IDENT_ADDR + 0x120000098UL)
@@ -147,43 +146,87 @@ extern unsigned int LCA_DMA_WIN_SIZE;
#define LCA_PMR_ODIV 0x38 /* Override clock divisor */
#define LCA_PMR_INTO 0x40 /* Interrupt override */
#define LCA_PMR_DMAO 0x80 /* DMA override */
-#define LCA_PMR_OCCEB 0xffff0000L /* Override cycle counter - even
- bits */
-#define LCA_PMR_OCCOB 0xffff000000000000L /* Override cycle counter - even
- bits */
+#define LCA_PMR_OCCEB 0xffff0000L /* Override cycle counter - even bits */
+#define LCA_PMR_OCCOB 0xffff000000000000L /* Override cycle counter - even bits */
#define LCA_PMR_PRIMARY_MASK 0xfffffffffffffff8
+
/* LCA PMR Macros */
-#define READ_PMR (*(volatile unsigned long *)LCA_PMR_ADDR)
-#define WRITE_PMR(d) (*((volatile unsigned long *)LCA_PMR_ADDR) = (d))
+#define LCA_READ_PMR (*(volatile unsigned long *)LCA_PMR_ADDR)
+#define LCA_WRITE_PMR(d) (*((volatile unsigned long *)LCA_PMR_ADDR) = (d))
-#define GET_PRIMARY(r) ((r) & LCA_PMR_PDIV)
-#define GET_OVERRIDE(r) (((r) >> 3) & LCA_PMR_PDIV)
-#define SET_PRIMARY_CLOCK(r, c) ((r) = (((r) & LCA_PMR_PRIMARY_MASK) | (c)))
+#define LCA_GET_PRIMARY(r) ((r) & LCA_PMR_PDIV)
+#define LCA_GET_OVERRIDE(r) (((r) >> 3) & LCA_PMR_PDIV)
+#define LCA_SET_PRIMARY_CLOCK(r, c) ((r) = (((r) & LCA_PMR_PRIMARY_MASK)|(c)))
/* LCA PMR Divisor values */
-#define DIV_1 0x0
-#define DIV_1_5 0x1
-#define DIV_2 0x2
-#define DIV_4 0x3
-#define DIV_8 0x4
-#define DIV_16 0x5
-#define DIV_MIN DIV_1
-#define DIV_MAX DIV_16
+#define LCA_PMR_DIV_1 0x0
+#define LCA_PMR_DIV_1_5 0x1
+#define LCA_PMR_DIV_2 0x2
+#define LCA_PMR_DIV_4 0x3
+#define LCA_PMR_DIV_8 0x4
+#define LCA_PMR_DIV_16 0x5
+#define LCA_PMR_DIV_MIN DIV_1
+#define LCA_PMR_DIV_MAX DIV_16
+
+
+/*
+ * Data structure for handling LCA machine checks. Correctable errors
+ * result in a short logout frame, uncorrectable ones in a long one.
+ */
+struct el_lca_mcheck_short {
+ struct el_common h; /* common logout header */
+ unsigned long esr; /* error-status register */
+ unsigned long ear; /* error-address register */
+ unsigned long dc_stat; /* dcache status register */
+ unsigned long ioc_stat0; /* I/O controller status register 0 */
+ unsigned long ioc_stat1; /* I/O controller status register 1 */
+};
+
+struct el_lca_mcheck_long {
+ struct el_common h; /* common logout header */
+ unsigned long pt[31]; /* PAL temps */
+ unsigned long exc_addr; /* exception address */
+ unsigned long pad1[3];
+ unsigned long pal_base; /* PALcode base address */
+ unsigned long hier; /* hw interrupt enable */
+ unsigned long hirr; /* hw interrupt request */
+ unsigned long mm_csr; /* MMU control & status */
+ unsigned long dc_stat; /* data cache status */
+ unsigned long dc_addr; /* data cache addr register */
+ unsigned long abox_ctl; /* address box control register */
+ unsigned long esr; /* error status register */
+ unsigned long ear; /* error address register */
+ unsigned long car; /* cache control register */
+ unsigned long ioc_stat0; /* I/O controller status register 0 */
+ unsigned long ioc_stat1; /* I/O controller status register 1 */
+ unsigned long va; /* virtual address register */
+};
+union el_lca {
+ struct el_common * c;
+ struct el_lca_mcheck_long * l;
+ struct el_lca_mcheck_short * s;
+};
#ifdef __KERNEL__
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
/*
* Translate physical memory address as seen on (PCI) bus into
* a kernel virtual address and vv.
*/
-extern inline unsigned long virt_to_bus(void * address)
+
+__EXTERN_INLINE unsigned long lca_virt_to_bus(void * address)
{
return virt_to_phys(address) + LCA_DMA_WIN_BASE;
}
-extern inline void * bus_to_virt(unsigned long address)
+__EXTERN_INLINE void * lca_bus_to_virt(unsigned long address)
{
/*
* This check is a sanity check but also ensures that bus
@@ -207,46 +250,46 @@ extern inline void * bus_to_virt(unsigned long address)
* data to/from the right byte-lanes.
*/
+#define vip volatile int *
#define vuip volatile unsigned int *
+#define vulp volatile unsigned long *
-extern inline unsigned int __inb(unsigned long addr)
+__EXTERN_INLINE unsigned int lca_inb(unsigned long addr)
{
- long result = *(vuip) ((addr << 5) + LCA_IO + 0x00);
- result >>= (addr & 3) * 8;
- return 0xffUL & result;
+ long result = *(vip) ((addr << 5) + LCA_IO + 0x00);
+ return __kernel_extbl(result, addr & 3);
}
-extern inline void __outb(unsigned char b, unsigned long addr)
+__EXTERN_INLINE void lca_outb(unsigned char b, unsigned long addr)
{
unsigned int w;
- asm ("insbl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
+ w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + LCA_IO + 0x00) = w;
mb();
}
-extern inline unsigned int __inw(unsigned long addr)
+__EXTERN_INLINE unsigned int lca_inw(unsigned long addr)
{
- long result = *(vuip) ((addr << 5) + LCA_IO + 0x08);
- result >>= (addr & 3) * 8;
- return 0xffffUL & result;
+ long result = *(vip) ((addr << 5) + LCA_IO + 0x08);
+ return __kernel_extwl(result, addr & 3);
}
-extern inline void __outw(unsigned short b, unsigned long addr)
+__EXTERN_INLINE void lca_outw(unsigned short b, unsigned long addr)
{
unsigned int w;
- asm ("inswl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
+ w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + LCA_IO + 0x08) = w;
mb();
}
-extern inline unsigned int __inl(unsigned long addr)
+__EXTERN_INLINE unsigned int lca_inl(unsigned long addr)
{
return *(vuip) ((addr << 5) + LCA_IO + 0x18);
}
-extern inline void __outl(unsigned int b, unsigned long addr)
+__EXTERN_INLINE void lca_outl(unsigned int b, unsigned long addr)
{
*(vuip) ((addr << 5) + LCA_IO + 0x18) = b;
mb();
@@ -257,46 +300,44 @@ extern inline void __outl(unsigned int b, unsigned long addr)
* Memory functions. 64-bit and 32-bit accesses are done through
* dense memory space, everything else through sparse space.
*/
-extern inline unsigned long __readb(unsigned long addr)
+
+__EXTERN_INLINE unsigned long lca_readb(unsigned long addr)
{
- unsigned long result, shift, msb;
+ unsigned long result, msb;
- shift = (addr & 0x3) * 8;
if (addr >= (1UL << 24)) {
msb = addr & 0xf8000000;
addr -= msb;
- if (msb != hae.cache) {
- set_hae(msb);
- }
+ set_hae(msb);
}
- result = *(vuip) ((addr << 5) + LCA_SPARSE_MEM + 0x00);
- result >>= shift;
- return 0xffUL & result;
+ result = *(vip) ((addr << 5) + LCA_SPARSE_MEM + 0x00);
+ return __kernel_extbl(result, addr & 3);
}
-extern inline unsigned long __readw(unsigned long addr)
+__EXTERN_INLINE unsigned long lca_readw(unsigned long addr)
{
- unsigned long result, shift, msb;
+ unsigned long result, msb;
- shift = (addr & 0x3) * 8;
if (addr >= (1UL << 24)) {
msb = addr & 0xf8000000;
addr -= msb;
- if (msb != hae.cache) {
- set_hae(msb);
- }
+ set_hae(msb);
}
- result = *(vuip) ((addr << 5) + LCA_SPARSE_MEM + 0x08);
- result >>= shift;
- return 0xffffUL & result;
+ result = *(vip) ((addr << 5) + LCA_SPARSE_MEM + 0x08);
+ return __kernel_extwl(result, addr & 3);
}
-extern inline unsigned long __readl(unsigned long addr)
+__EXTERN_INLINE unsigned long lca_readl(unsigned long addr)
{
return *(vuip) (addr + LCA_DENSE_MEM);
}
-extern inline void __writeb(unsigned char b, unsigned long addr)
+__EXTERN_INLINE unsigned long lca_readq(unsigned long addr)
+{
+ return *(vulp) (addr + LCA_DENSE_MEM);
+}
+
+__EXTERN_INLINE void lca_writeb(unsigned char b, unsigned long addr)
{
unsigned long msb;
unsigned int w;
@@ -304,15 +345,13 @@ extern inline void __writeb(unsigned char b, unsigned long addr)
if (addr >= (1UL << 24)) {
msb = addr & 0xf8000000;
addr -= msb;
- if (msb != hae.cache) {
- set_hae(msb);
- }
+ set_hae(msb);
}
- asm ("insbl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
+ w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + LCA_SPARSE_MEM + 0x00) = w;
}
-extern inline void __writew(unsigned short b, unsigned long addr)
+__EXTERN_INLINE void lca_writew(unsigned short b, unsigned long addr)
{
unsigned long msb;
unsigned int w;
@@ -320,23 +359,52 @@ extern inline void __writew(unsigned short b, unsigned long addr)
if (addr >= (1UL << 24)) {
msb = addr & 0xf8000000;
addr -= msb;
- if (msb != hae.cache) {
- set_hae(msb);
- }
+ set_hae(msb);
}
- asm ("inswl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
+ w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + LCA_SPARSE_MEM + 0x08) = w;
}
-extern inline void __writel(unsigned int b, unsigned long addr)
+__EXTERN_INLINE void lca_writel(unsigned int b, unsigned long addr)
{
*(vuip) (addr + LCA_DENSE_MEM) = b;
}
-/*
- * Most of the above have so much overhead that it probably doesn't
- * make sense to have them inlined (better icache behavior).
- */
+__EXTERN_INLINE void lca_writeq(unsigned long b, unsigned long addr)
+{
+ *(vulp) (addr + LCA_DENSE_MEM) = b;
+}
+
+/* Find the DENSE memory area for a given bus address. */
+
+__EXTERN_INLINE unsigned long lca_dense_mem(unsigned long addr)
+{
+ return LCA_DENSE_MEM;
+}
+
+#undef vip
+#undef vuip
+#undef vulp
+
+#ifdef __WANT_IO_DEF
+
+#define virt_to_bus lca_virt_to_bus
+#define bus_to_virt lca_bus_to_virt
+#define __inb lca_inb
+#define __inw lca_inw
+#define __inl lca_inl
+#define __outb lca_outb
+#define __outw lca_outw
+#define __outl lca_outl
+#define __readb lca_readb
+#define __readw lca_readw
+#define __writeb lca_writeb
+#define __writew lca_writew
+#define __readl lca_readl
+#define __readq lca_readq
+#define __writel lca_writel
+#define __writeq lca_writeq
+#define dense_mem lca_dense_mem
#define inb(port) \
(__builtin_constant_p((port))?__inb(port):_inb(port))
@@ -345,55 +413,17 @@ extern inline void __writel(unsigned int b, unsigned long addr)
(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
#define readl(a) __readl((unsigned long)(a))
+#define readq(a) __readq((unsigned long)(a))
#define writel(v,a) __writel((v),(unsigned long)(a))
+#define writeq(v,a) __writeq((v),(unsigned long)(a))
-#undef vuip
+#endif /* __WANT_IO_DEF */
-extern unsigned long lca_init (unsigned long mem_start, unsigned long mem_end);
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
#endif /* __KERNEL__ */
-/*
- * Data structure for handling LCA machine checks. Correctable errors
- * result in a short logout frame, uncorrectable ones in a long one.
- */
-struct el_lca_mcheck_short {
- struct el_common h; /* common logout header */
- unsigned long esr; /* error-status register */
- unsigned long ear; /* error-address register */
- unsigned long dc_stat; /* dcache status register */
- unsigned long ioc_stat0; /* I/O controller status register 0 */
- unsigned long ioc_stat1; /* I/O controller status register 1 */
-};
-
-struct el_lca_mcheck_long {
- struct el_common h; /* common logout header */
- unsigned long pt[31]; /* PAL temps */
- unsigned long exc_addr; /* exception address */
- unsigned long pad1[3];
- unsigned long pal_base; /* PALcode base address */
- unsigned long hier; /* hw interrupt enable */
- unsigned long hirr; /* hw interrupt request */
- unsigned long mm_csr; /* MMU control & status */
- unsigned long dc_stat; /* data cache status */
- unsigned long dc_addr; /* data cache addr register */
- unsigned long abox_ctl; /* address box control register */
- unsigned long esr; /* error status register */
- unsigned long ear; /* error address register */
- unsigned long car; /* cache control register */
- unsigned long ioc_stat0; /* I/O controller status register 0 */
- unsigned long ioc_stat1; /* I/O controller status register 1 */
- unsigned long va; /* virtual address register */
-};
-
-union el_lca {
- struct el_common * c;
- struct el_lca_mcheck_long * l;
- struct el_lca_mcheck_short * s;
-};
-
-#define RTC_PORT(x) (0x70 + (x))
-#define RTC_ADDR(x) (0x80 | (x))
-#define RTC_ALWAYS_BCD 0
-
#endif /* __ALPHA_LCA__H__ */
diff --git a/include/asm-alpha/mcpcia.h b/include/asm-alpha/core_mcpcia.h
index fd4d48839..8b0e03a89 100644
--- a/include/asm-alpha/mcpcia.h
+++ b/include/asm-alpha/core_mcpcia.h
@@ -4,6 +4,7 @@
#include <linux/config.h>
#include <linux/types.h>
#include <linux/pci.h>
+#include <asm/compiler.h>
/*
* MCPCIA is the internal name for a core logic chipset which provides
@@ -19,7 +20,7 @@
/*------------------------------------------------------------------------**
** **
-** I/O procedures **
+** I/O procedures **
** **
** inport[b|w|t|l], outport[b|w|t|l] 8:16:24:32 IO xfers **
** inportbxt: 8 bits only **
@@ -70,27 +71,23 @@
*
*/
-#define BYTE_ENABLE_SHIFT 5
-#define TRANSFER_LENGTH_SHIFT 3
+#define MCPCIA_MEM_R1_MASK 0x1fffffff /* SPARSE Mem region 1 mask is 29 bits */
+#define MCPCIA_MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
+#define MCPCIA_MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
-#define MEM_R1_MASK 0x1fffffff /* SPARSE Mem region 1 mask is 29 bits */
-#define MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
-#define MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
-
-#ifdef CONFIG_ALPHA_SRM_SETUP
-/* if we are using the SRM PCI setup, we'll need to use variables instead */
#define MCPCIA_DMA_WIN_BASE_DEFAULT (2*1024*1024*1024U)
#define MCPCIA_DMA_WIN_SIZE_DEFAULT (2*1024*1024*1024U)
-extern unsigned int MCPCIA_DMA_WIN_BASE;
-extern unsigned int MCPCIA_DMA_WIN_SIZE;
-
-#else /* SRM_SETUP */
-#define MCPCIA_DMA_WIN_BASE (2*1024*1024*1024UL)
-#define MCPCIA_DMA_WIN_SIZE (2*1024*1024*1024UL)
-#endif /* SRM_SETUP */
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM_SETUP)
+#define MCPCIA_DMA_WIN_BASE alpha_mv.dma_win_base
+#define MCPCIA_DMA_WIN_SIZE alpha_mv.dma_win_size
+#else
+#define MCPCIA_DMA_WIN_BASE MCPCIA_DMA_WIN_BASE_DEFAULT
+#define MCPCIA_DMA_WIN_SIZE MCPCIA_DMA_WIN_SIZE_DEFAULT
+#endif
#define HOSE(h) (((unsigned long)(h)) << 33)
+
/*
* General Registers
*/
@@ -165,22 +162,38 @@ extern unsigned int MCPCIA_DMA_WIN_SIZE;
#define MCPCIA_IO(h) (IDENT_ADDR + 0xf980000000UL + HOSE(h))
#define MCPCIA_SPARSE(h) (IDENT_ADDR + 0xf800000000UL + HOSE(h))
#define MCPCIA_DENSE(h) (IDENT_ADDR + 0xf900000000UL + HOSE(h))
-#define MCPCIA_IACK_SC(h) (IDENT_ADDR + 0xf9f0003f00UL + HOSE(h))
+#define _MCPCIA_IACK_SC(h) (IDENT_ADDR + 0xf9f0003f00UL + HOSE(h))
+
+#define MCPCIA_HAE_ADDRESS MCPCIA_HAE_MEM(0)
+#define MCPCIA_IACK_SC _MCPCIA_IACK_SC(0)
+
+/*
+ * Data structure for handling MCPCIA machine checks:
+ */
+struct el_MCPCIA_uncorrected_frame_mcheck {
+ struct el_common header;
+ struct el_common_EV5_uncorrectable_mcheck procdata;
+};
-#define HAE_ADDRESS MCPCIA_HAE_MEM(0)
#ifdef __KERNEL__
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
/*
* Translate physical memory address as seen on (PCI) bus into
* a kernel virtual address and vv.
*/
-extern inline unsigned long virt_to_bus(void * address)
+
+__EXTERN_INLINE unsigned long mcpcia_virt_to_bus(void * address)
{
return virt_to_phys(address) + MCPCIA_DMA_WIN_BASE;
}
-extern inline void * bus_to_virt(unsigned long address)
+__EXTERN_INLINE void * mcpcia_bus_to_virt(unsigned long address)
{
return phys_to_virt(address - MCPCIA_DMA_WIN_BASE);
}
@@ -196,128 +209,93 @@ extern inline void * bus_to_virt(unsigned long address)
* Unfortunately, we can't use BWIO with EV5, so for now, we always use SPARSE.
*/
+#define vucp volatile unsigned char *
+#define vusp volatile unsigned short *
+#define vip volatile int *
#define vuip volatile unsigned int *
+#define vulp volatile unsigned long *
-#ifdef DISABLE_BWIO_ENABLED
-
-extern inline unsigned int __inb(unsigned long addr)
+#if 0 /* BWIO */
+__EXTERN_INLINE unsigned int mcpcia_bw_inb(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldbu %0,%1"
- : "=r" (result)
- : "m" (*(unsigned char *)(addr+MCPCIA_BW_IO)));
-
- return result;
+ return __kernel_ldbu(*(vucp)(addr+MCPCIA_BW_IO));
}
-extern inline void __outb(unsigned char b, unsigned long addr)
+__EXTERN_INLINE void mcpcia_bw_outb(unsigned char b, unsigned long addr)
{
- __asm__ __volatile__ (
- "stb %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned char *)(addr+MCPCIA_BW_IO)), "r" (b));
+ __kernel_stb(b, *(vucp)(addr+MCPCIA_BW_IO));
+ mb();
}
-extern inline unsigned int __inw(unsigned long addr)
+__EXTERN_INLINE unsigned int mcpcia_bw_inw(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldwu %0,%1"
- : "=r" (result)
- : "m" (*(unsigned short *)(addr+MCPCIA_BW_IO)));
-
- return result;
+ return __kernel_ldwu(*(vusp)(addr+MCPCIA_BW_IO));
}
-extern inline void __outw(unsigned short b, unsigned long addr)
+__EXTERN_INLINE void mcpcia_bw_outw(unsigned short b, unsigned long addr)
{
- __asm__ __volatile__ (
- "stw %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned short *)(addr+MCPCIA_BW_IO)), "r" (b));
+ __kernel_stw(b, *(vusp)(addr+MCPCIA_BW_IO));
+ mb();
}
-extern inline unsigned int __inl(unsigned long addr)
+__EXTERN_INLINE unsigned int mcpcia_bw_inl(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldl %0,%1"
- : "=r" (result)
- : "m" (*(unsigned int *)(addr+MCPCIA_BW_IO)));
-
- return result;
+ return *(vuip)(addr+MCPCIA_BW_IO);
}
-extern inline void __outl(unsigned int b, unsigned long addr)
+__EXTERN_INLINE void mcpcia_bw_outl(unsigned int b, unsigned long addr)
{
- __asm__ __volatile__ (
- "stl %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned int *)(addr+MCPCIA_BW_IO)), "r" (b));
+ *(vuip)(addr+MCPCIA_BW_IO) = b;
+ mb();
}
+#endif
-#define inb(port) __inb((port))
-#define inw(port) __inw((port))
-#define inl(port) __inl((port))
-
-#define outb(x, port) __outb((x),(port))
-#define outw(x, port) __outw((x),(port))
-#define outl(x, port) __outl((x),(port))
-
-#else /* BWIO_ENABLED */
-
-extern inline unsigned int __inb(unsigned long in_addr)
+__EXTERN_INLINE unsigned int mcpcia_inb(unsigned long in_addr)
{
unsigned long addr = in_addr & 0xffffffffUL;
unsigned long hose = (in_addr >> 32) & 3;
- long result = *(vuip) ((addr << 5) + MCPCIA_IO(hose) + 0x00);
- result >>= (addr & 3) * 8;
- return 0xffUL & result;
+ long result = *(vip) ((addr << 5) + MCPCIA_IO(hose) + 0x00);
+ return __kernel_extbl(result, addr & 3);
}
-extern inline void __outb(unsigned char b, unsigned long in_addr)
+__EXTERN_INLINE void mcpcia_outb(unsigned char b, unsigned long in_addr)
{
unsigned long addr = in_addr & 0xffffffffUL;
unsigned long hose = (in_addr >> 32) & 3;
unsigned int w;
- asm ("insbl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
+ w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + MCPCIA_IO(hose) + 0x00) = w;
mb();
}
-extern inline unsigned int __inw(unsigned long in_addr)
+__EXTERN_INLINE unsigned int mcpcia_inw(unsigned long in_addr)
{
unsigned long addr = in_addr & 0xffffffffUL;
unsigned long hose = (in_addr >> 32) & 3;
- long result = *(vuip) ((addr << 5) + MCPCIA_IO(hose) + 0x08);
- result >>= (addr & 3) * 8;
- return 0xffffUL & result;
+ long result = *(vip) ((addr << 5) + MCPCIA_IO(hose) + 0x08);
+ return __kernel_extwl(result, addr & 3);
}
-extern inline void __outw(unsigned short b, unsigned long in_addr)
+__EXTERN_INLINE void mcpcia_outw(unsigned short b, unsigned long in_addr)
{
unsigned long addr = in_addr & 0xffffffffUL;
unsigned long hose = (in_addr >> 32) & 3;
unsigned int w;
- asm ("inswl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
+ w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + MCPCIA_IO(hose) + 0x08) = w;
mb();
}
-extern inline unsigned int __inl(unsigned long in_addr)
+__EXTERN_INLINE unsigned int mcpcia_inl(unsigned long in_addr)
{
unsigned long addr = in_addr & 0xffffffffUL;
unsigned long hose = (in_addr >> 32) & 3;
return *(vuip) ((addr << 5) + MCPCIA_IO(hose) + 0x18);
}
-extern inline void __outl(unsigned int b, unsigned long in_addr)
+__EXTERN_INLINE void mcpcia_outl(unsigned int b, unsigned long in_addr)
{
unsigned long addr = in_addr & 0xffffffffUL;
unsigned long hose = (in_addr >> 32) & 3;
@@ -325,14 +303,6 @@ extern inline void __outl(unsigned int b, unsigned long in_addr)
mb();
}
-#define inb(port) \
-(__builtin_constant_p((port))?__inb(port):_inb(port))
-
-#define outb(x, port) \
-(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
-
-#endif /* BWIO_ENABLED */
-
/*
* Memory functions. 64-bit and 32-bit accesses are done through
@@ -366,303 +336,297 @@ extern inline void __outl(unsigned int b, unsigned long in_addr)
*
*/
-#ifdef DISABLE_BWIO_ENABLED
-
-extern inline unsigned long __readb(unsigned long addr)
+#if 0 /* BWIO */
+__EXTERN_INLINE unsigned long mcpcia_bw_readb(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldbu %0,%1"
- : "=r" (result)
- : "m" (*(unsigned char *)(addr+MCPCIA_BW_MEM)));
-
- return result;
+ return __kernel_ldbu(*(vucp)(addr+MCPCIA_BW_MEM));
}
-extern inline unsigned long __readw(unsigned long addr)
+__EXTERN_INLINE unsigned long mcpcia_bw_readw(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldwu %0,%1"
- : "=r" (result)
- : "m" (*(unsigned short *)(addr+MCPCIA_BW_MEM)));
-
- return result;
+ return __kernel_ldbw(*(vusp)(addr+MCPCIA_BW_MEM));
}
-extern inline unsigned long __readl(unsigned long addr)
+__EXTERN_INLINE unsigned long mcpcia_bw_readl(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldl %0,%1"
- : "=r" (result)
- : "m" (*(unsigned int *)(addr+MCPCIA_BW_MEM)));
-
- return result;
+ return *(vuip)(addr + MCPCIA_BW_MEM);
}
-extern inline void __writeb(unsigned char b, unsigned long addr)
+__EXTERN_INLINE unsigned long mcpcia_bw_readq(unsigned long addr)
{
- __asm__ __volatile__ (
- "stb %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned char *)(addr+MCPCIA_BW_MEM)), "r" (b));
+ return *(vulp)(addr + MCPCIA_BW_MEM);
}
-extern inline void __writew(unsigned short b, unsigned long addr)
+__EXTERN_INLINE void mcpcia_bw_writeb(unsigned char b, unsigned long addr)
{
- __asm__ __volatile__ (
- "stw %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned short *)(addr+MCPCIA_BW_MEM)), "r" (b));
+ __kernel_stb(b, *(vucp)(addr+MCPCIA_BW_MEM));
}
-extern inline void __writel(unsigned int b, unsigned long addr)
+__EXTERN_INLINE void mcpcia_bw_writew(unsigned short b, unsigned long addr)
{
- __asm__ __volatile__ (
- "stl %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned int *)(addr+MCPCIA_BW_MEM)), "r" (b));
+ __kernel_stw(b, *(vusp)(addr+MCPCIA_BW_MEM));
}
-#define readb(addr) __readb((addr))
-#define readw(addr) __readw((addr))
-
-#define writeb(b, addr) __writeb((b),(addr))
-#define writew(b, addr) __writew((b),(addr))
-
-#else /* BWIO_ENABLED */
-
-#ifdef CONFIG_ALPHA_SRM_SETUP
+__EXTERN_INLINE void mcpcia_bw_writel(unsigned int b, unsigned long addr)
+{
+ *(vuip)(addr+MCPCIA_BW_MEM) = b;
+}
-extern unsigned long mcpcia_sm_base_r1, mcpcia_sm_base_r2, mcpcia_sm_base_r3;
+__EXTERN_INLINE void mcpcia_bw_writeq(unsigned long b, unsigned long addr)
+{
+ *(vulp)(addr+MCPCIA_BW_MEM) = b;
+}
+#endif
-extern inline unsigned long __readb(unsigned long addr)
+__EXTERN_INLINE unsigned long mcpcia_srm_base(unsigned long addr)
{
- unsigned long result, shift, work;
+ unsigned long mask, base;
+ unsigned long hose = (addr >> 32) & 3;
- if ((addr >= mcpcia_sm_base_r1) &&
- (addr <= (mcpcia_sm_base_r1 + MEM_R1_MASK)))
- work = (((addr & MEM_R1_MASK) << 5) + MCPCIA_SPARSE_MEM + 0x00);
- else
- if ((addr >= mcpcia_sm_base_r2) &&
- (addr <= (mcpcia_sm_base_r2 + MEM_R2_MASK)))
- work = (((addr & MEM_R2_MASK) << 5) + MCPCIA_SPARSE_MEM_R2 + 0x00);
- else
- if ((addr >= mcpcia_sm_base_r3) &&
- (addr <= (mcpcia_sm_base_r3 + MEM_R3_MASK)))
- work = (((addr & MEM_R3_MASK) << 5) + MCPCIA_SPARSE_MEM_R3 + 0x00);
+ if (addr >= alpha_mv.sm_base_r1
+ && addr <= alpha_mv.sm_base_r1 + MCPCIA_MEM_R1_MASK) {
+ mask = MCPCIA_MEM_R1_MASK;
+ base = MCPCIA_SPARSE(hose);
+ }
+#if 0
+ /* FIXME FIXME FIXME: SPARSE_MEM_R2 and R3 are not defined? */
+ else if (addr >= alpha_mv.sm_base_r2
+ && addr <= alpha_mv.sm_base_r2 + MCPCIA_MEM_R2_MASK) {
+ mask = MCPCIA_MEM_R2_MASK;
+ base = MCPCIA_SPARSE_MEM_R2;
+ }
+ else if (addr >= alpha_mv.sm_base_r3
+ && addr <= alpha_mv.sm_base_r3 + MCPCIA_MEM_R3_MASK) {
+ mask = MCPCIA_MEM_R3_MASK;
+ base = MCPCIA_SPARSE_MEM_R3;
+ }
+#endif
else
{
#if 0
- printk("__readb: address 0x%lx not covered by HAE\n", addr);
+ printk("mcpcia: address 0x%lx not covered by HAE\n", addr);
#endif
- return 0x0ffUL;
+ return 0;
}
- shift = (addr & 0x3) << 3;
- result = *(vuip) work;
- result >>= shift;
- return 0x0ffUL & result;
+
+ return ((addr & mask) << 5) + base;
}
-extern inline unsigned long __readw(unsigned long addr)
+__EXTERN_INLINE unsigned long mcpcia_srm_readb(unsigned long addr)
{
- unsigned long result, shift, work;
+ unsigned long result, work;
- if ((addr >= mcpcia_sm_base_r1) &&
- (addr <= (mcpcia_sm_base_r1 + MEM_R1_MASK)))
- work = (((addr & MEM_R1_MASK) << 5) + MCPCIA_SPARSE_MEM + 0x08);
- else
- if ((addr >= mcpcia_sm_base_r2) &&
- (addr <= (mcpcia_sm_base_r2 + MEM_R2_MASK)))
- work = (((addr & MEM_R2_MASK) << 5) + MCPCIA_SPARSE_MEM_R2 + 0x08);
- else
- if ((addr >= mcpcia_sm_base_r3) &&
- (addr <= (mcpcia_sm_base_r3 + MEM_R3_MASK)))
- work = (((addr & MEM_R3_MASK) << 5) + MCPCIA_SPARSE_MEM_R3 + 0x08);
- else
- {
-#if 0
- printk("__readw: address 0x%lx not covered by HAE\n", addr);
-#endif
- return 0x0ffffUL;
- }
- shift = (addr & 0x3) << 3;
- result = *(vuip) work;
- result >>= shift;
- return 0x0ffffUL & result;
+ if ((work = mcpcia_srm_base(addr)) == 0)
+ return 0xff;
+ work += 0x00; /* add transfer length */
+
+ result = *(vip) work;
+ return __kernel_extbl(result, addr & 3);
}
-extern inline void __writeb(unsigned char b, unsigned long addr)
+__EXTERN_INLINE unsigned long mcpcia_srm_readw(unsigned long addr)
{
- unsigned long work;
+ unsigned long result, work;
- if ((addr >= mcpcia_sm_base_r1) &&
- (addr <= (mcpcia_sm_base_r1 + MEM_R1_MASK)))
- work = (((addr & MEM_R1_MASK) << 5) + MCPCIA_SPARSE_MEM + 0x00);
- else
- if ((addr >= mcpcia_sm_base_r2) &&
- (addr <= (mcpcia_sm_base_r2 + MEM_R2_MASK)))
- work = (((addr & MEM_R2_MASK) << 5) + MCPCIA_SPARSE_MEM_R2 + 0x00);
- else
- if ((addr >= mcpcia_sm_base_r3) &&
- (addr <= (mcpcia_sm_base_r3 + MEM_R3_MASK)))
- work = (((addr & MEM_R3_MASK) << 5) + MCPCIA_SPARSE_MEM_R3 + 0x00);
- else
- {
-#if 0
- printk("__writeb: address 0x%lx not covered by HAE\n", addr);
-#endif
- return;
- }
- *(vuip) work = b * 0x01010101;
+ if ((work = mcpcia_srm_base(addr)) == 0)
+ return 0xffff;
+ work += 0x08; /* add transfer length */
+
+ result = *(vip) work;
+ return __kernel_extwl(result, addr & 3);
}
-extern inline void __writew(unsigned short b, unsigned long addr)
+__EXTERN_INLINE void mcpcia_srm_writeb(unsigned char b, unsigned long addr)
{
- unsigned long work;
-
- if ((addr >= mcpcia_sm_base_r1) &&
- (addr <= (mcpcia_sm_base_r1 + MEM_R1_MASK)))
- work = (((addr & MEM_R1_MASK) << 5) + MCPCIA_SPARSE_MEM + 0x00);
- else
- if ((addr >= mcpcia_sm_base_r2) &&
- (addr <= (mcpcia_sm_base_r2 + MEM_R2_MASK)))
- work = (((addr & MEM_R2_MASK) << 5) + MCPCIA_SPARSE_MEM_R2 + 0x00);
- else
- if ((addr >= mcpcia_sm_base_r3) &&
- (addr <= (mcpcia_sm_base_r3 + MEM_R3_MASK)))
- work = (((addr & MEM_R3_MASK) << 5) + MCPCIA_SPARSE_MEM_R3 + 0x00);
- else
- {
-#if 0
- printk("__writew: address 0x%lx not covered by HAE\n", addr);
-#endif
- return;
+ unsigned long work = mcpcia_srm_base(addr);
+ if (work) {
+ work += 0x00; /* add transfer length */
+ *(vuip) work = b * 0x01010101;
}
- *(vuip) work = b * 0x00010001;
}
-#else /* SRM_SETUP */
+__EXTERN_INLINE void mcpcia_srm_writew(unsigned short b, unsigned long addr)
+{
+ unsigned long work = mcpcia_srm_base(addr);
+ if (work) {
+ work += 0x08; /* add transfer length */
+ *(vuip) work = b * 0x00010001;
+ }
+}
-extern inline unsigned long __readb(unsigned long in_addr)
+__EXTERN_INLINE unsigned long mcpcia_readb(unsigned long in_addr)
{
unsigned long addr = in_addr & 0xffffffffUL;
unsigned long hose = (in_addr >> 32) & 3;
- unsigned long result, shift, msb, work, temp;
+ unsigned long result, msb, work, temp;
- shift = (addr & 0x3) << 3;
msb = addr & 0xE0000000UL;
- temp = addr & MEM_R1_MASK;
- if (msb != hae.cache) {
- set_hae(msb);
- }
+ temp = addr & MCPCIA_MEM_R1_MASK;
+ set_hae(msb);
+
work = ((temp << 5) + MCPCIA_SPARSE(hose) + 0x00);
- result = *(vuip) work;
- result >>= shift;
- return 0x0ffUL & result;
+ result = *(vip) work;
+ return __kernel_extbl(result, addr & 3);
}
-extern inline unsigned long __readw(unsigned long in_addr)
+__EXTERN_INLINE unsigned long mcpcia_readw(unsigned long in_addr)
{
unsigned long addr = in_addr & 0xffffffffUL;
unsigned long hose = (in_addr >> 32) & 3;
- unsigned long result, shift, msb, work, temp;
+ unsigned long result, msb, work, temp;
- shift = (addr & 0x3) << 3;
msb = addr & 0xE0000000UL;
- temp = addr & MEM_R1_MASK ;
- if (msb != hae.cache) {
- set_hae(msb);
- }
+ temp = addr & MCPCIA_MEM_R1_MASK ;
+ set_hae(msb);
+
work = ((temp << 5) + MCPCIA_SPARSE(hose) + 0x08);
- result = *(vuip) work;
- result >>= shift;
- return 0x0ffffUL & result;
+ result = *(vip) work;
+ return __kernel_extwl(result, addr & 3);
}
-extern inline void __writeb(unsigned char b, unsigned long in_addr)
+__EXTERN_INLINE void mcpcia_writeb(unsigned char b, unsigned long in_addr)
{
unsigned long addr = in_addr & 0xffffffffUL;
unsigned long hose = (in_addr >> 32) & 3;
unsigned long msb;
msb = addr & 0xE0000000;
- addr &= MEM_R1_MASK;
- if (msb != hae.cache) {
- set_hae(msb);
- }
+ addr &= MCPCIA_MEM_R1_MASK;
+ set_hae(msb);
+
*(vuip) ((addr << 5) + MCPCIA_SPARSE(hose) + 0x00) = b * 0x01010101;
}
-extern inline void __writew(unsigned short b, unsigned long in_addr)
+__EXTERN_INLINE void mcpcia_writew(unsigned short b, unsigned long in_addr)
{
unsigned long addr = in_addr & 0xffffffffUL;
unsigned long hose = (in_addr >> 32) & 3;
unsigned long msb ;
msb = addr & 0xE0000000 ;
- addr &= MEM_R1_MASK ;
- if (msb != hae.cache) {
- set_hae(msb);
- }
+ addr &= MCPCIA_MEM_R1_MASK ;
+ set_hae(msb);
+
*(vuip) ((addr << 5) + MCPCIA_SPARSE(hose) + 0x08) = b * 0x00010001;
}
-#endif /* SRM_SETUP */
-extern inline unsigned long __readl(unsigned long in_addr)
+__EXTERN_INLINE unsigned long mcpcia_readl(unsigned long in_addr)
{
unsigned long addr = in_addr & 0xffffffffUL;
unsigned long hose = (in_addr >> 32) & 3;
return *(vuip) (addr + MCPCIA_DENSE(hose));
}
-extern inline void __writel(unsigned int b, unsigned long in_addr)
+__EXTERN_INLINE unsigned long mcpcia_readq(unsigned long in_addr)
+{
+ unsigned long addr = in_addr & 0xffffffffUL;
+ unsigned long hose = (in_addr >> 32) & 3;
+ return *(vulp) (addr + MCPCIA_DENSE(hose));
+}
+
+__EXTERN_INLINE void mcpcia_writel(unsigned int b, unsigned long in_addr)
{
unsigned long addr = in_addr & 0xffffffffUL;
unsigned long hose = (in_addr >> 32) & 3;
*(vuip) (addr + MCPCIA_DENSE(hose)) = b;
}
-#endif /* BWIO_ENABLED */
+__EXTERN_INLINE void mcpcia_writeq(unsigned long b, unsigned long in_addr)
+{
+ unsigned long addr = in_addr & 0xffffffffUL;
+ unsigned long hose = (in_addr >> 32) & 3;
+ *(vulp) (addr + MCPCIA_DENSE(hose)) = b;
+}
-#define readl(a) __readl((unsigned long)(a))
-#define writel(v,a) __writel((v),(unsigned long)(a))
+/* Find the DENSE memory area for a given bus address. */
-#undef vuip
+__EXTERN_INLINE unsigned long mcpcia_dense_mem(unsigned long addr)
+{
+ return MCPCIA_DENSE((addr >> 32) & 3);
+}
-struct linux_hose_info {
- struct pci_bus pci_bus;
- struct linux_hose_info *next;
- unsigned long pci_io_space;
- unsigned long pci_mem_space;
- unsigned long pci_config_space;
- unsigned long pci_sparse_space;
- unsigned int pci_first_busno;
- unsigned int pci_last_busno;
- unsigned int pci_hose_index;
-};
+#undef vucp
+#undef vusp
+#undef vip
+#undef vuip
+#undef vulp
+
+#ifdef __WANT_IO_DEF
+
+#define virt_to_bus mcpcia_virt_to_bus
+#define bus_to_virt mcpcia_bus_to_virt
+
+#if 0 /* BWIO */
+# define __inb mcpcia_bw_inb
+# define __inw mcpcia_bw_inw
+# define __inl mcpcia_bw_inl
+# define __outb mcpcia_bw_outb
+# define __outw mcpcia_bw_outw
+# define __outl mcpcia_bw_outl
+# define __readb mcpcia_bw_readb
+# define __readw mcpcia_bw_readw
+# define __writeb mcpcia_bw_writeb
+# define __writew mcpcia_bw_writew
+# define __readl mcpcia_bw_readl
+# define __readq mcpcia_bw_readq
+# define __writel mcpcia_bw_writel
+# define __writeq mcpcia_bw_writeq
+#else
+# define __inb mcpcia_inb
+# define __inw mcpcia_inw
+# define __inl mcpcia_inl
+# define __outb mcpcia_outb
+# define __outw mcpcia_outw
+# define __outl mcpcia_outl
+# ifdef CONFIG_ALPHA_SRM_SETUP
+# define __readb mcpcia_srm_readb
+# define __readw mcpcia_srm_readw
+# define __writeb mcpcia_srm_writeb
+# define __writew mcpcia_srm_writew
+# else
+# define __readb mcpcia_readb
+# define __readw mcpcia_readw
+# define __writeb mcpcia_writeb
+# define __writew mcpcia_writew
+# endif
+# define __readl mcpcia_readl
+# define __readq mcpcia_readq
+# define __writel mcpcia_writel
+# define __writeq mcpcia_writeq
+#endif /* BWIO */
+
+#define dense_mem mcpcia_dense_mem
+
+#if 0 /* BWIO */
+# define inb(port) __inb((port))
+# define inw(port) __inw((port))
+# define inl(port) __inl((port))
+# define outb(x, port) __outb((x),(port))
+# define outw(x, port) __outw((x),(port))
+# define outl(x, port) __outl((x),(port))
+# define readb(addr) __readb((addr))
+# define readw(addr) __readw((addr))
+# define writeb(b, addr) __writeb((b),(addr))
+# define writew(b, addr) __writew((b),(addr))
+#else
+# define inb(port) \
+ (__builtin_constant_p((port))?__inb(port):_inb(port))
+# define outb(x, port) \
+ (__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
+#endif /* BWIO */
-extern unsigned long mcpcia_init (unsigned long mem_start,
- unsigned long mem_end);
-extern unsigned long mcpcia_fixup (unsigned long mem_start,
- unsigned long mem_end);
+#define readl(a) __readl((unsigned long)(a))
+#define readq(a) __readq((unsigned long)(a))
+#define writel(v,a) __writel((v),(unsigned long)(a))
+#define writeq(v,a) __writeq((v),(unsigned long)(a))
-#endif /* __KERNEL__ */
+#endif /* __WANT_IO_DEF */
-/*
- * Data structure for handling MCPCIA machine checks:
- */
-struct el_MCPCIA_uncorrected_frame_mcheck {
- struct el_common header;
- struct el_common_EV5_uncorrectable_mcheck procdata;
-};
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
-#define RTC_PORT(x) (0x70 + (x))
-#define RTC_ADDR(x) (0x80 | (x))
-#define RTC_ALWAYS_BCD 0
+#endif /* __KERNEL__ */
#endif /* __ALPHA_MCPCIA__H__ */
diff --git a/include/asm-alpha/pyxis.h b/include/asm-alpha/core_pyxis.h
index d10408bde..8b00d1356 100644
--- a/include/asm-alpha/pyxis.h
+++ b/include/asm-alpha/core_pyxis.h
@@ -3,6 +3,7 @@
#include <linux/config.h>
#include <linux/types.h>
+#include <asm/compiler.h>
/*
* PYXIS is the internal name for a core logic chipset which provides
@@ -18,7 +19,7 @@
/*------------------------------------------------------------------------**
** **
-** I/O procedures **
+** I/O procedures **
** **
** inport[b|w|t|l], outport[b|w|t|l] 8:16:24:32 IO xfers **
** inportbxt: 8 bits only **
@@ -69,25 +70,20 @@
*
*/
-#define BYTE_ENABLE_SHIFT 5
-#define TRANSFER_LENGTH_SHIFT 3
+#define PYXIS_MEM_R1_MASK 0x1fffffff /* SPARSE Mem region 1 mask is 29 bits */
+#define PYXIS_MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
+#define PYXIS_MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
-#define MEM_R1_MASK 0x1fffffff /* SPARSE Mem region 1 mask is 29 bits */
-#define MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
-#define MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
+#define PYXIS_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
+#define PYXIS_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
-#ifdef CONFIG_ALPHA_SRM_SETUP
-/* if we are using the SRM PCI setup, we'll need to use variables instead */
-#define PYXIS_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
-#define PYXIS_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
-
-extern unsigned int PYXIS_DMA_WIN_BASE;
-extern unsigned int PYXIS_DMA_WIN_SIZE;
-
-#else /* SRM_SETUP */
-#define PYXIS_DMA_WIN_BASE (1024*1024*1024)
-#define PYXIS_DMA_WIN_SIZE (1024*1024*1024)
-#endif /* SRM_SETUP */
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM_SETUP)
+#define PYXIS_DMA_WIN_BASE alpha_mv.dma_win_base
+#define PYXIS_DMA_WIN_SIZE alpha_mv.dma_win_size
+#else
+#define PYXIS_DMA_WIN_BASE PYXIS_DMA_WIN_BASE_DEFAULT
+#define PYXIS_DMA_WIN_SIZE PYXIS_DMA_WIN_SIZE_DEFAULT
+#endif
/*
* General Registers
@@ -200,37 +196,90 @@ extern unsigned int PYXIS_DMA_WIN_SIZE;
#define PYXIS_STAT0_P_NBR_SHIFT 13
#define PYXIS_STAT0_P_NBR_MASK 0x7ffff
-#define HAE_ADDRESS PYXIS_HAE_MEM
+#define PYXIS_HAE_ADDRESS PYXIS_HAE_MEM
+
+/*
+ * Data structure for handling PYXIS machine checks:
+ */
+struct el_PYXIS_sysdata_mcheck {
+ u_long coma_gcr;
+ u_long coma_edsr;
+ u_long coma_ter;
+ u_long coma_elar;
+ u_long coma_ehar;
+ u_long coma_ldlr;
+ u_long coma_ldhr;
+ u_long coma_base0;
+ u_long coma_base1;
+ u_long coma_base2;
+ u_long coma_cnfg0;
+ u_long coma_cnfg1;
+ u_long coma_cnfg2;
+ u_long epic_dcsr;
+ u_long epic_pear;
+ u_long epic_sear;
+ u_long epic_tbr1;
+ u_long epic_tbr2;
+ u_long epic_pbr1;
+ u_long epic_pbr2;
+ u_long epic_pmr1;
+ u_long epic_pmr2;
+ u_long epic_harx1;
+ u_long epic_harx2;
+ u_long epic_pmlt;
+ u_long epic_tag0;
+ u_long epic_tag1;
+ u_long epic_tag2;
+ u_long epic_tag3;
+ u_long epic_tag4;
+ u_long epic_tag5;
+ u_long epic_tag6;
+ u_long epic_tag7;
+ u_long epic_data0;
+ u_long epic_data1;
+ u_long epic_data2;
+ u_long epic_data3;
+ u_long epic_data4;
+ u_long epic_data5;
+ u_long epic_data6;
+ u_long epic_data7;
+};
+
#ifdef __KERNEL__
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
/*
* Translate physical memory address as seen on (PCI) bus into
* a kernel virtual address and vv.
*/
-#if defined(CONFIG_ALPHA_RUFFIAN)
+
/* Ruffian doesn't do 1G PCI window */
-extern inline unsigned long virt_to_bus(void * address)
+static inline unsigned long pyxis_ruffian_virt_to_bus(void * address)
{
return virt_to_phys(address);
}
-extern inline void * bus_to_virt(unsigned long address)
+static inline void * pyxis_ruffian_bus_to_virt(unsigned long address)
{
return phys_to_virt(address);
}
-#else /* RUFFIAN */
-extern inline unsigned long virt_to_bus(void * address)
+
+__EXTERN_INLINE unsigned long pyxis_virt_to_bus(void * address)
{
return virt_to_phys(address) + PYXIS_DMA_WIN_BASE;
}
-extern inline void * bus_to_virt(unsigned long address)
+__EXTERN_INLINE void * pyxis_bus_to_virt(unsigned long address)
{
return phys_to_virt(address - PYXIS_DMA_WIN_BASE);
}
-#endif /* RUFFIAN */
+
/*
* I/O functions:
@@ -241,131 +290,86 @@ extern inline void * bus_to_virt(unsigned long address)
* get at PCI memory and I/O.
*/
+#define vucp volatile unsigned char *
+#define vusp volatile unsigned short *
+#define vip volatile int *
#define vuip volatile unsigned int *
+#define vulp volatile unsigned long *
-#ifdef BWIO_ENABLED
-
-extern inline unsigned int __inb(unsigned long addr)
+__EXTERN_INLINE unsigned int pyxis_bw_inb(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldbu %0,%1"
- : "=r" (result)
- : "m" (*(unsigned char *)(addr+PYXIS_BW_IO)));
-
- return result;
+ return __kernel_ldbu(*(vucp)(addr+PYXIS_BW_IO));
}
-extern inline void __outb(unsigned char b, unsigned long addr)
+__EXTERN_INLINE void pyxis_bw_outb(unsigned char b, unsigned long addr)
{
- __asm__ __volatile__ (
- "stb %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned char *)(addr+PYXIS_BW_IO)), "r" (b));
+ __kernel_stb(b, *(vucp)(addr+PYXIS_BW_IO));
+ mb();
}
-extern inline unsigned int __inw(unsigned long addr)
+__EXTERN_INLINE unsigned int pyxis_bw_inw(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldwu %0,%1"
- : "=r" (result)
- : "m" (*(unsigned short *)(addr+PYXIS_BW_IO)));
-
- return result;
+ return __kernel_ldwu(*(vusp)(addr+PYXIS_BW_IO));
}
-extern inline void __outw(unsigned short b, unsigned long addr)
+__EXTERN_INLINE void pyxis_bw_outw(unsigned short b, unsigned long addr)
{
- __asm__ __volatile__ (
- "stw %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned short *)(addr+PYXIS_BW_IO)), "r" (b));
+ __kernel_stw(b, *(vusp)(addr+PYXIS_BW_IO));
+ mb();
}
-extern inline unsigned int __inl(unsigned long addr)
+__EXTERN_INLINE unsigned int pyxis_bw_inl(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldl %0,%1"
- : "=r" (result)
- : "m" (*(unsigned int *)(addr+PYXIS_BW_IO)));
-
- return result;
+ return *(vuip)(addr+PYXIS_BW_IO);
}
-extern inline void __outl(unsigned int b, unsigned long addr)
+__EXTERN_INLINE void pyxis_bw_outl(unsigned int b, unsigned long addr)
{
- __asm__ __volatile__ (
- "stl %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned int *)(addr+PYXIS_BW_IO)), "r" (b));
+ *(vuip)(addr+PYXIS_BW_IO) = b;
+ mb();
}
-#define inb(port) __inb((port))
-#define inw(port) __inw((port))
-#define inl(port) __inl((port))
-
-#define outb(x, port) __outb((x),(port))
-#define outw(x, port) __outw((x),(port))
-#define outl(x, port) __outl((x),(port))
-
-#else /* BWIO_ENABLED */
-
-extern inline unsigned int __inb(unsigned long addr)
+__EXTERN_INLINE unsigned int pyxis_inb(unsigned long addr)
{
- long result = *(vuip) ((addr << 5) + PYXIS_IO + 0x00);
- result >>= (addr & 3) * 8;
- return 0xffUL & result;
+ long result = *(vip) ((addr << 5) + PYXIS_IO + 0x00);
+ return __kernel_extbl(result, addr & 3);
}
-extern inline void __outb(unsigned char b, unsigned long addr)
+__EXTERN_INLINE void pyxis_outb(unsigned char b, unsigned long addr)
{
unsigned int w;
- asm ("insbl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
+ w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + PYXIS_IO + 0x00) = w;
mb();
}
-extern inline unsigned int __inw(unsigned long addr)
+__EXTERN_INLINE unsigned int pyxis_inw(unsigned long addr)
{
- long result = *(vuip) ((addr << 5) + PYXIS_IO + 0x08);
- result >>= (addr & 3) * 8;
- return 0xffffUL & result;
+ long result = *(vip) ((addr << 5) + PYXIS_IO + 0x08);
+ return __kernel_extwl(result, addr & 3);
}
-extern inline void __outw(unsigned short b, unsigned long addr)
+__EXTERN_INLINE void pyxis_outw(unsigned short b, unsigned long addr)
{
unsigned int w;
- asm ("inswl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
+ w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + PYXIS_IO + 0x08) = w;
mb();
}
-extern inline unsigned int __inl(unsigned long addr)
+__EXTERN_INLINE unsigned int pyxis_inl(unsigned long addr)
{
return *(vuip) ((addr << 5) + PYXIS_IO + 0x18);
}
-extern inline void __outl(unsigned int b, unsigned long addr)
+__EXTERN_INLINE void pyxis_outl(unsigned int b, unsigned long addr)
{
*(vuip) ((addr << 5) + PYXIS_IO + 0x18) = b;
mb();
}
-#define inb(port) \
-(__builtin_constant_p((port))?__inb(port):_inb(port))
-
-#define outb(x, port) \
-(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
-
-#endif /* BWIO_ENABLED */
-
/*
* Memory functions. 64-bit and 32-bit accesses are done through
@@ -399,316 +403,282 @@ extern inline void __outl(unsigned int b, unsigned long addr)
*
*/
-#ifdef BWIO_ENABLED
-
-extern inline unsigned long __readb(unsigned long addr)
+__EXTERN_INLINE unsigned long pyxis_bw_readb(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldbu %0,%1"
- : "=r" (result)
- : "m" (*(unsigned char *)(addr+PYXIS_BW_MEM)));
-
- return result;
+ return __kernel_ldbu(*(vucp)(addr+PYXIS_BW_MEM));
}
-extern inline unsigned long __readw(unsigned long addr)
+__EXTERN_INLINE unsigned long pyxis_bw_readw(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldwu %0,%1"
- : "=r" (result)
- : "m" (*(unsigned short *)(addr+PYXIS_BW_MEM)));
-
- return result;
+ return __kernel_ldwu(*(vusp)(addr+PYXIS_BW_MEM));
}
-extern inline unsigned long __readl(unsigned long addr)
+__EXTERN_INLINE unsigned long pyxis_bw_readl(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldl %0,%1"
- : "=r" (result)
- : "m" (*(unsigned int *)(addr+PYXIS_BW_MEM)));
-
- return result;
+ return *(vuip)(addr+PYXIS_BW_MEM);
}
-extern inline void __writeb(unsigned char b, unsigned long addr)
+__EXTERN_INLINE unsigned long pyxis_bw_readq(unsigned long addr)
{
- __asm__ __volatile__ (
- "stb %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned char *)(addr+PYXIS_BW_MEM)), "r" (b));
+ return *(vulp)(addr+PYXIS_BW_MEM);
}
-extern inline void __writew(unsigned short b, unsigned long addr)
+__EXTERN_INLINE void pyxis_bw_writeb(unsigned char b, unsigned long addr)
{
- __asm__ __volatile__ (
- "stw %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned short *)(addr+PYXIS_BW_MEM)), "r" (b));
+ __kernel_stb(b, *(vucp)(addr+PYXIS_BW_MEM));
+ mb();
}
-extern inline void __writel(unsigned int b, unsigned long addr)
+__EXTERN_INLINE void pyxis_bw_writew(unsigned short b, unsigned long addr)
{
- __asm__ __volatile__ (
- "stl %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned int *)(addr+PYXIS_BW_MEM)), "r" (b));
+ __kernel_stw(b, *(vusp)(addr+PYXIS_BW_MEM));
+ mb();
}
-#define readb(addr) __readb((addr))
-#define readw(addr) __readw((addr))
-
-#define writeb(b, addr) __writeb((b),(addr))
-#define writew(b, addr) __writew((b),(addr))
-
-#else /* BWIO_ENABLED */
-
-#ifdef CONFIG_ALPHA_SRM_SETUP
+__EXTERN_INLINE void pyxis_bw_writel(unsigned int b, unsigned long addr)
+{
+ *(vuip)(addr+PYXIS_BW_MEM) = b;
+}
-extern unsigned long pyxis_sm_base_r1, pyxis_sm_base_r2, pyxis_sm_base_r3;
+__EXTERN_INLINE void pyxis_bw_writeq(unsigned long b, unsigned long addr)
+{
+ *(vulp)(addr+PYXIS_BW_MEM) = b;
+}
-extern inline unsigned long __readb(unsigned long addr)
+__EXTERN_INLINE unsigned long pyxis_srm_base(unsigned long addr)
{
- unsigned long result, shift, work;
+ unsigned long mask, base;
- if ((addr >= pyxis_sm_base_r1) &&
- (addr <= (pyxis_sm_base_r1 + MEM_R1_MASK)))
- work = (((addr & MEM_R1_MASK) << 5) + PYXIS_SPARSE_MEM + 0x00);
- else
- if ((addr >= pyxis_sm_base_r2) &&
- (addr <= (pyxis_sm_base_r2 + MEM_R2_MASK)))
- work = (((addr & MEM_R2_MASK) << 5) + PYXIS_SPARSE_MEM_R2 + 0x00);
- else
- if ((addr >= pyxis_sm_base_r3) &&
- (addr <= (pyxis_sm_base_r3 + MEM_R3_MASK)))
- work = (((addr & MEM_R3_MASK) << 5) + PYXIS_SPARSE_MEM_R3 + 0x00);
+ if (addr >= alpha_mv.sm_base_r1
+ && addr <= alpha_mv.sm_base_r1 + PYXIS_MEM_R1_MASK) {
+ mask = PYXIS_MEM_R1_MASK;
+ base = PYXIS_SPARSE_MEM;
+ }
+ else if (addr >= alpha_mv.sm_base_r2
+ && addr <= alpha_mv.sm_base_r2 + PYXIS_MEM_R2_MASK) {
+ mask = PYXIS_MEM_R2_MASK;
+ base = PYXIS_SPARSE_MEM_R2;
+ }
+ else if (addr >= alpha_mv.sm_base_r3
+ && addr <= alpha_mv.sm_base_r3 + PYXIS_MEM_R3_MASK) {
+ mask = PYXIS_MEM_R3_MASK;
+ base = PYXIS_SPARSE_MEM_R3;
+ }
else
{
#if 0
- printk("__readb: address 0x%lx not covered by HAE\n", addr);
+ printk("pyxis: address 0x%lx not covered by HAE\n", addr);
#endif
- return 0x0ffUL;
+ return 0;
}
- shift = (addr & 0x3) << 3;
- result = *(vuip) work;
- result >>= shift;
- return 0x0ffUL & result;
+
+ return ((addr & mask) << 5) + base;
}
-extern inline unsigned long __readw(unsigned long addr)
+__EXTERN_INLINE unsigned long pyxis_srm_readb(unsigned long addr)
{
- unsigned long result, shift, work;
+ unsigned long result, work;
- if ((addr >= pyxis_sm_base_r1) &&
- (addr <= (pyxis_sm_base_r1 + MEM_R1_MASK)))
- work = (((addr & MEM_R1_MASK) << 5) + PYXIS_SPARSE_MEM + 0x08);
- else
- if ((addr >= pyxis_sm_base_r2) &&
- (addr <= (pyxis_sm_base_r2 + MEM_R2_MASK)))
- work = (((addr & MEM_R2_MASK) << 5) + PYXIS_SPARSE_MEM_R2 + 0x08);
- else
- if ((addr >= pyxis_sm_base_r3) &&
- (addr <= (pyxis_sm_base_r3 + MEM_R3_MASK)))
- work = (((addr & MEM_R3_MASK) << 5) + PYXIS_SPARSE_MEM_R3 + 0x08);
- else
- {
-#if 0
- printk("__readw: address 0x%lx not covered by HAE\n", addr);
-#endif
- return 0x0ffffUL;
- }
- shift = (addr & 0x3) << 3;
- result = *(vuip) work;
- result >>= shift;
- return 0x0ffffUL & result;
+ if ((work = pyxis_srm_base(addr)) == 0)
+ return 0xff;
+ work += 0x00; /* add transfer length */
+
+ result = *(vip) work;
+ return __kernel_extbl(result, addr & 3);
}
-extern inline void __writeb(unsigned char b, unsigned long addr)
+__EXTERN_INLINE unsigned long pyxis_srm_readw(unsigned long addr)
{
- unsigned long work;
+ unsigned long result, work;
- if ((addr >= pyxis_sm_base_r1) &&
- (addr <= (pyxis_sm_base_r1 + MEM_R1_MASK)))
- work = (((addr & MEM_R1_MASK) << 5) + PYXIS_SPARSE_MEM + 0x00);
- else
- if ((addr >= pyxis_sm_base_r2) &&
- (addr <= (pyxis_sm_base_r2 + MEM_R2_MASK)))
- work = (((addr & MEM_R2_MASK) << 5) + PYXIS_SPARSE_MEM_R2 + 0x00);
- else
- if ((addr >= pyxis_sm_base_r3) &&
- (addr <= (pyxis_sm_base_r3 + MEM_R3_MASK)))
- work = (((addr & MEM_R3_MASK) << 5) + PYXIS_SPARSE_MEM_R3 + 0x00);
- else
- {
-#if 0
- printk("__writeb: address 0x%lx not covered by HAE\n", addr);
-#endif
- return;
- }
- *(vuip) work = b * 0x01010101;
+ if ((work = pyxis_srm_base(addr)) == 0)
+ return 0xffff;
+ work += 0x08; /* add transfer length */
+
+ result = *(vip) work;
+ return __kernel_extwl(result, addr & 3);
}
-extern inline void __writew(unsigned short b, unsigned long addr)
+__EXTERN_INLINE void pyxis_srm_writeb(unsigned char b, unsigned long addr)
{
- unsigned long work;
-
- if ((addr >= pyxis_sm_base_r1) &&
- (addr <= (pyxis_sm_base_r1 + MEM_R1_MASK)))
- work = (((addr & MEM_R1_MASK) << 5) + PYXIS_SPARSE_MEM + 0x00);
- else
- if ((addr >= pyxis_sm_base_r2) &&
- (addr <= (pyxis_sm_base_r2 + MEM_R2_MASK)))
- work = (((addr & MEM_R2_MASK) << 5) + PYXIS_SPARSE_MEM_R2 + 0x00);
- else
- if ((addr >= pyxis_sm_base_r3) &&
- (addr <= (pyxis_sm_base_r3 + MEM_R3_MASK)))
- work = (((addr & MEM_R3_MASK) << 5) + PYXIS_SPARSE_MEM_R3 + 0x00);
- else
- {
-#if 0
- printk("__writew: address 0x%lx not covered by HAE\n", addr);
-#endif
- return;
+ unsigned long work = pyxis_srm_base(addr);
+ if (work) {
+ work += 0x00; /* add transfer length */
+ *(vuip) work = b * 0x01010101;
}
- *(vuip) work = b * 0x00010001;
}
-#else /* SRM_SETUP */
+__EXTERN_INLINE void pyxis_srm_writew(unsigned short b, unsigned long addr)
+{
+ unsigned long work = pyxis_srm_base(addr);
+ if (work) {
+ work += 0x08; /* add transfer length */
+ *(vuip) work = b * 0x00010001;
+ }
+}
-extern inline unsigned long __readb(unsigned long addr)
+__EXTERN_INLINE unsigned long pyxis_readb(unsigned long addr)
{
- unsigned long result, shift, msb, work, temp;
+ unsigned long result, msb, work, temp;
- shift = (addr & 0x3) << 3;
msb = addr & 0xE0000000UL;
- temp = addr & MEM_R1_MASK ;
- if (msb != hae.cache) {
- set_hae(msb);
- }
+ temp = addr & PYXIS_MEM_R1_MASK ;
+ set_hae(msb);
+
work = ((temp << 5) + PYXIS_SPARSE_MEM + 0x00);
- result = *(vuip) work;
- result >>= shift;
- return 0x0ffUL & result;
+ result = *(vip) work;
+ return __kernel_extbl(result, addr & 3);
}
-extern inline unsigned long __readw(unsigned long addr)
+__EXTERN_INLINE unsigned long pyxis_readw(unsigned long addr)
{
- unsigned long result, shift, msb, work, temp;
+ unsigned long result, msb, work, temp;
- shift = (addr & 0x3) << 3;
msb = addr & 0xE0000000UL;
- temp = addr & MEM_R1_MASK ;
- if (msb != hae.cache) {
- set_hae(msb);
- }
+ temp = addr & PYXIS_MEM_R1_MASK ;
+ set_hae(msb);
+
work = ((temp << 5) + PYXIS_SPARSE_MEM + 0x08);
- result = *(vuip) work;
- result >>= shift;
- return 0x0ffffUL & result;
+ result = *(vip) work;
+ return __kernel_extwl(result, addr & 3);
}
-extern inline void __writeb(unsigned char b, unsigned long addr)
+__EXTERN_INLINE void pyxis_writeb(unsigned char b, unsigned long addr)
{
unsigned long msb ;
msb = addr & 0xE0000000 ;
- addr &= MEM_R1_MASK ;
- if (msb != hae.cache) {
- set_hae(msb);
- }
+ addr &= PYXIS_MEM_R1_MASK ;
+ set_hae(msb);
+
*(vuip) ((addr << 5) + PYXIS_SPARSE_MEM + 0x00) = b * 0x01010101;
}
-extern inline void __writew(unsigned short b, unsigned long addr)
+__EXTERN_INLINE void pyxis_writew(unsigned short b, unsigned long addr)
{
unsigned long msb ;
msb = addr & 0xE0000000 ;
- addr &= MEM_R1_MASK ;
- if (msb != hae.cache) {
- set_hae(msb);
- }
+ addr &= PYXIS_MEM_R1_MASK ;
+ set_hae(msb);
+
*(vuip) ((addr << 5) + PYXIS_SPARSE_MEM + 0x08) = b * 0x00010001;
}
-#endif /* SRM_SETUP */
-extern inline unsigned long __readl(unsigned long addr)
+__EXTERN_INLINE unsigned long pyxis_readl(unsigned long addr)
{
return *(vuip) (addr + PYXIS_DENSE_MEM);
}
-extern inline void __writel(unsigned int b, unsigned long addr)
+__EXTERN_INLINE unsigned long pyxis_readq(unsigned long addr)
+{
+ return *(vulp) (addr + PYXIS_DENSE_MEM);
+}
+
+__EXTERN_INLINE void pyxis_writel(unsigned int b, unsigned long addr)
{
*(vuip) (addr + PYXIS_DENSE_MEM) = b;
}
-#endif /* BWIO_ENABLED */
+__EXTERN_INLINE void pyxis_writeq(unsigned long b, unsigned long addr)
+{
+ *(vulp) (addr + PYXIS_DENSE_MEM) = b;
+}
-#define readl(a) __readl((unsigned long)(a))
-#define writel(v,a) __writel((v),(unsigned long)(a))
+/* Find the DENSE memory area for a given bus address. */
+__EXTERN_INLINE unsigned long pyxis_dense_mem(unsigned long addr)
+{
+ return PYXIS_DENSE_MEM;
+}
+
+#undef vucp
+#undef vusp
+#undef vip
#undef vuip
+#undef vulp
-extern unsigned long pyxis_init (unsigned long mem_start,
- unsigned long mem_end);
+#ifdef __WANT_IO_DEF
-#endif /* __KERNEL__ */
+#ifdef CONFIG_ALPHA_RUFFIAN
+#define virt_to_bus pyxis_ruffian_virt_to_bus
+#define bus_to_virt pyxis_ruffian_bus_to_virt
+#else
+#define virt_to_bus pyxis_virt_to_bus
+#define bus_to_virt pyxis_bus_to_virt
+#endif
-/*
- * Data structure for handling PYXIS machine checks:
- */
-struct el_PYXIS_sysdata_mcheck {
- u_long coma_gcr;
- u_long coma_edsr;
- u_long coma_ter;
- u_long coma_elar;
- u_long coma_ehar;
- u_long coma_ldlr;
- u_long coma_ldhr;
- u_long coma_base0;
- u_long coma_base1;
- u_long coma_base2;
- u_long coma_cnfg0;
- u_long coma_cnfg1;
- u_long coma_cnfg2;
- u_long epic_dcsr;
- u_long epic_pear;
- u_long epic_sear;
- u_long epic_tbr1;
- u_long epic_tbr2;
- u_long epic_pbr1;
- u_long epic_pbr2;
- u_long epic_pmr1;
- u_long epic_pmr2;
- u_long epic_harx1;
- u_long epic_harx2;
- u_long epic_pmlt;
- u_long epic_tag0;
- u_long epic_tag1;
- u_long epic_tag2;
- u_long epic_tag3;
- u_long epic_tag4;
- u_long epic_tag5;
- u_long epic_tag6;
- u_long epic_tag7;
- u_long epic_data0;
- u_long epic_data1;
- u_long epic_data2;
- u_long epic_data3;
- u_long epic_data4;
- u_long epic_data5;
- u_long epic_data6;
- u_long epic_data7;
-};
+#ifdef BWIO_ENABLED
+# define __inb pyxis_bw_inb
+# define __inw pyxis_bw_inw
+# define __inl pyxis_bw_inl
+# define __outb pyxis_bw_outb
+# define __outw pyxis_bw_outw
+# define __outl pyxis_bw_outl
+# define __readb pyxis_bw_readb
+# define __readw pyxis_bw_readw
+# define __writeb pyxis_bw_writeb
+# define __writew pyxis_bw_writew
+# define __readl pyxis_bw_readl
+# define __readq pyxis_bw_readq
+# define __writel pyxis_bw_writel
+# define __writeq pyxis_bw_writeq
+#else
+# define __inb pyxis_inb
+# define __inw pyxis_inw
+# define __inl pyxis_inl
+# define __outb pyxis_outb
+# define __outw pyxis_outw
+# define __outl pyxis_outl
+# ifdef CONFIG_ALPHA_SRM_SETUP
+# define __readb pyxis_srm_readb
+# define __readw pyxis_srm_readw
+# define __writeb pyxis_srm_writeb
+# define __writew pyxis_srm_writew
+# else
+# define __readb pyxis_readb
+# define __readw pyxis_readw
+# define __writeb pyxis_writeb
+# define __writew pyxis_writew
+# endif
+# define __readl pyxis_readl
+# define __readq pyxis_readq
+# define __writel pyxis_writel
+# define __writeq pyxis_writeq
+#endif /* BWIO */
+
+#define dense_mem pyxis_dense_mem
+
+#ifdef BWIO_ENABLED
+# define inb(port) __inb((port))
+# define inw(port) __inw((port))
+# define inl(port) __inl((port))
+# define outb(x, port) __outb((x),(port))
+# define outw(x, port) __outw((x),(port))
+# define outl(x, port) __outl((x),(port))
+# define readb(addr) __readb((addr))
+# define readw(addr) __readw((addr))
+# define writeb(b, addr) __writeb((b),(addr))
+# define writew(b, addr) __writew((b),(addr))
+#else
+# define inb(port) \
+ (__builtin_constant_p((port))?__inb(port):_inb(port))
+# define outb(x, port) \
+ (__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
+#endif /* BWIO */
+
+#define readl(a) __readl((unsigned long)(a))
+#define readq(a) __readq((unsigned long)(a))
+#define writel(v,a) __writel((v),(unsigned long)(a))
+#define writeq(v,a) __writeq((v),(unsigned long)(a))
+
+#endif /* __WANT_IO_DEF */
-#define RTC_PORT(x) (0x70 + (x))
-#define RTC_ADDR(x) (0x80 | (x))
-#define RTC_ALWAYS_BCD 0
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
#endif /* __ALPHA_PYXIS__H__ */
diff --git a/include/asm-alpha/t2.h b/include/asm-alpha/core_t2.h
index 524d6f765..1f0984b38 100644
--- a/include/asm-alpha/t2.h
+++ b/include/asm-alpha/core_t2.h
@@ -3,6 +3,8 @@
#include <linux/config.h>
#include <linux/types.h>
+#include <asm/compiler.h>
+
/*
* T2 is the internal name for the core logic chipset which provides
@@ -17,29 +19,29 @@
*
*/
-#define BYTE_ENABLE_SHIFT 5
-#define TRANSFER_LENGTH_SHIFT 3
-#define MEM_R1_MASK 0x03ffffff /* Mem sparse space region 1 mask is 26 bits */
-
-#ifdef CONFIG_ALPHA_SRM_SETUP
-/* if we are using the SRM PCI setup, we'll need to use variables instead */
-#define T2_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
-#define T2_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
+#define T2_MEM_R1_MASK 0x03ffffff /* Mem sparse region 1 mask is 26 bits */
-extern unsigned int T2_DMA_WIN_BASE;
-extern unsigned int T2_DMA_WIN_SIZE;
+#define T2_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
+#define T2_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
-#else /* SRM_SETUP */
-#define T2_DMA_WIN_BASE (1024*1024*1024)
-#define T2_DMA_WIN_SIZE (1024*1024*1024)
-#endif /* SRM_SETUP */
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM_SETUP)
+#define T2_DMA_WIN_BASE alpha_mv.dma_win_base
+#define T2_DMA_WIN_SIZE alpha_mv.dma_win_size
+#else
+#define T2_DMA_WIN_BASE T2_DMA_WIN_BASE_DEFAULT
+#define T2_DMA_WIN_SIZE T2_DMA_WIN_SIZE_DEFAULT
+#endif
/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
-#ifdef CONFIG_ALPHA_GAMMA
-# define GAMMA_BIAS 0x8000000000UL
-#else /* GAMMA */
-# define GAMMA_BIAS 0x0000000000UL
-#endif /* GAMMA */
+#define _GAMMA_BIAS 0x8000000000UL
+
+#if defined(CONFIG_ALPHA_GENERIC)
+#define GAMMA_BIAS alpha_mv.sys.t2.gamma_bias
+#elif defined(CONFIG_ALPHA_GAMMA)
+#define GAMMA_BIAS _GAMMA_BIAS
+#else
+#define GAMMA_BIAS 0
+#endif
/*
* Memory spaces:
@@ -70,7 +72,7 @@ extern unsigned int T2_DMA_WIN_SIZE;
#define T2_HAE_3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000240UL)
#define T2_HAE_4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000260UL)
-#define HAE_ADDRESS T2_HAE_1
+#define T2_HAE_ADDRESS T2_HAE_1
/* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to
3.8fff.ffff
@@ -105,344 +107,17 @@ extern unsigned int T2_DMA_WIN_SIZE;
*
*
*/
-#define CPU0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x380000000L)
-#define CPU1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x381000000L)
-#define CPU2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x382000000L)
-#define CPU3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x383000000L)
-#define MEM0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x388000000L)
-#define MEM1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x389000000L)
-#define MEM2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38a000000L)
-#define MEM3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38b000000L)
-
-#ifdef __KERNEL__
-
-/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-extern inline unsigned long virt_to_bus(void * address)
-{
- return virt_to_phys(address) + T2_DMA_WIN_BASE;
-}
-
-extern inline void * bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address - T2_DMA_WIN_BASE);
-}
-
-/*
- * I/O functions:
- *
- * T2 (the core logic PCI/memory support chipset for the SABLE
- * series of processors uses a sparse address mapping scheme to
- * get at PCI memory and I/O.
- */
-
-#define vuip volatile unsigned int *
-
-extern inline unsigned int __inb(unsigned long addr)
-{
- long result = *(vuip) ((addr << 5) + T2_IO + 0x00);
- result >>= (addr & 3) * 8;
- return 0xffUL & result;
-}
-
-extern inline void __outb(unsigned char b, unsigned long addr)
-{
- unsigned int w;
-
- asm ("insbl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
- *(vuip) ((addr << 5) + T2_IO + 0x00) = w;
- mb();
-}
-
-extern inline unsigned int __inw(unsigned long addr)
-{
- long result = *(vuip) ((addr << 5) + T2_IO + 0x08);
- result >>= (addr & 3) * 8;
- return 0xffffUL & result;
-}
-
-extern inline void __outw(unsigned short b, unsigned long addr)
-{
- unsigned int w;
-
- asm ("inswl %2,%1,%0" : "r="(w) : "ri"(addr & 0x3), "r"(b));
- *(vuip) ((addr << 5) + T2_IO + 0x08) = w;
- mb();
-}
-
-extern inline unsigned int __inl(unsigned long addr)
-{
- return *(vuip) ((addr << 5) + T2_IO + 0x18);
-}
-
-extern inline void __outl(unsigned int b, unsigned long addr)
-{
- *(vuip) ((addr << 5) + T2_IO + 0x18) = b;
- mb();
-}
+#define T2_CPU0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x380000000L)
+#define T2_CPU1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x381000000L)
+#define T2_CPU2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x382000000L)
+#define T2_CPU3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x383000000L)
+#define T2_MEM0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x388000000L)
+#define T2_MEM1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x389000000L)
+#define T2_MEM2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38a000000L)
+#define T2_MEM3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38b000000L)
/*
- * Memory functions. 64-bit and 32-bit accesses are done through
- * dense memory space, everything else through sparse space.
- *
- * For reading and writing 8 and 16 bit quantities we need to
- * go through one of the three sparse address mapping regions
- * and use the HAE_MEM CSR to provide some bits of the address.
- * The following few routines use only sparse address region 1
- * which gives 1Gbyte of accessible space which relates exactly
- * to the amount of PCI memory mapping *into* system address space.
- * See p 6-17 of the specification but it looks something like this:
- *
- * 21164 Address:
- *
- * 3 2 1
- * 9876543210987654321098765432109876543210
- * 1ZZZZ0.PCI.QW.Address............BBLL
- *
- * ZZ = SBZ
- * BB = Byte offset
- * LL = Transfer length
- *
- * PCI Address:
- *
- * 3 2 1
- * 10987654321098765432109876543210
- * HHH....PCI.QW.Address........ 00
- *
- * HHH = 31:29 HAE_MEM CSR
- *
- */
-#ifdef CONFIG_ALPHA_SRM_SETUP
-
-extern unsigned long t2_sm_base;
-
-extern inline unsigned long __readb(unsigned long addr)
-{
- unsigned long result, shift, work;
-
- if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
- work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
- else
- if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
- work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
- else
- {
-#if 0
- printk("__readb: address 0x%lx not covered by HAE\n", addr);
-#endif
- return 0x0ffUL;
- }
- shift = (addr & 0x3) << 3;
- result = *(vuip) work;
- result >>= shift;
- return 0x0ffUL & result;
-}
-
-extern inline unsigned long __readw(unsigned long addr)
-{
- unsigned long result, shift, work;
-
- if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
- work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
- else
- if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
- work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
- else
- {
-#if 0
- printk("__readw: address 0x%lx not covered by HAE\n", addr);
-#endif
- return 0x0ffffUL;
- }
- shift = (addr & 0x3) << 3;
- result = *(vuip) work;
- result >>= shift;
- return 0x0ffffUL & result;
-}
-
-/* on SABLE with T2, we must use SPARSE memory even for 32-bit access */
-extern inline unsigned long __readl(unsigned long addr)
-{
- unsigned long result, work;
-
- if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
- work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
- else
- if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
- work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
- else
- {
-#if 0
- printk("__readl: address 0x%lx not covered by HAE\n", addr);
-#endif
- return 0x0ffffffffUL;
- }
- result = *(vuip) work;
- return 0xffffffffUL & result;
-}
-
-extern inline void __writeb(unsigned char b, unsigned long addr)
-{
- unsigned long work;
-
- if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
- work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
- else
- if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
- work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x00);
- else
- {
-#if 0
- printk("__writeb: address 0x%lx not covered by HAE\n", addr);
-#endif
- return;
- }
- *(vuip) work = b * 0x01010101;
-}
-
-extern inline void __writew(unsigned short b, unsigned long addr)
-{
- unsigned long work;
-
- if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
- work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
- else
- if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
- work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x08);
- else
- {
-#if 0
- printk("__writew: address 0x%lx not covered by HAE\n", addr);
-#endif
- return;
- }
- *(vuip) work = b * 0x00010001;
-}
-
-/* on SABLE with T2, we must use SPARSE memory even for 32-bit access */
-extern inline void __writel(unsigned int b, unsigned long addr)
-{
- unsigned long work;
-
- if ((addr >= t2_sm_base) && (addr <= (t2_sm_base + MEM_R1_MASK)))
- work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
- else
- if ((addr >= 512*1024) && (addr < 1024*1024)) /* check HOLE */
- work = (((addr & MEM_R1_MASK) << 5) + T2_SPARSE_MEM + 0x18);
- {
-#if 0
- printk("__writel: address 0x%lx not covered by HAE\n", addr);
-#endif
- return;
- }
- *(vuip) work = b;
-}
-
-#else /* SRM_SETUP */
-
-extern inline unsigned long __readb(unsigned long addr)
-{
- unsigned long result, shift, msb;
-
- shift = (addr & 0x3) * 8 ;
- msb = addr & 0xE0000000 ;
- addr &= MEM_R1_MASK ;
- if (msb != hae.cache) {
- set_hae(msb);
- }
- result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) ;
- result >>= shift;
- return 0xffUL & result;
-}
-
-extern inline unsigned long __readw(unsigned long addr)
-{
- unsigned long result, shift, msb;
-
- shift = (addr & 0x3) * 8;
- msb = addr & 0xE0000000 ;
- addr &= MEM_R1_MASK ;
- if (msb != hae.cache) {
- set_hae(msb);
- }
- result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
- result >>= shift;
- return 0xffffUL & result;
-}
-
-/* on SABLE with T2, we must use SPARSE memory even for 32-bit access */
-extern inline unsigned long __readl(unsigned long addr)
-{
- unsigned long result, msb;
-
- msb = addr & 0xE0000000 ;
- addr &= MEM_R1_MASK ;
- if (msb != hae.cache) {
- set_hae(msb);
- }
- result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
- return 0xffffffffUL & result;
-}
-
-extern inline void __writeb(unsigned char b, unsigned long addr)
-{
- unsigned long msb ;
-
- msb = addr & 0xE0000000 ;
- addr &= MEM_R1_MASK ;
- if (msb != hae.cache) {
- set_hae(msb);
- }
- *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = b * 0x01010101;
-}
-
-extern inline void __writew(unsigned short b, unsigned long addr)
-{
- unsigned long msb ;
-
- msb = addr & 0xE0000000 ;
- addr &= MEM_R1_MASK ;
- if (msb != hae.cache) {
- set_hae(msb);
- }
- *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = b * 0x00010001;
-}
-
-/* on SABLE with T2, we must use SPARSE memory even for 32-bit access */
-extern inline void __writel(unsigned int b, unsigned long addr)
-{
- unsigned long msb ;
-
- msb = addr & 0xE0000000 ;
- addr &= MEM_R1_MASK ;
- if (msb != hae.cache) {
- set_hae(msb);
- }
- *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
-}
-
-#endif /* SRM_SETUP */
-
-#define inb(port) \
-(__builtin_constant_p((port))?__inb(port):_inb(port))
-
-#define outb(x, port) \
-(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
-
-#define readl(a) __readl((unsigned long)(a))
-#define writel(v,a) __writel((v),(unsigned long)(a))
-
-#undef vuip
-
-extern unsigned long t2_init (unsigned long mem_start,
- unsigned long mem_end);
-
-#endif /* __KERNEL__ */
-
-/*
* Sable CPU Module CSRS
*
* These are CSRs for hardware other than the CPU chip on the CPU module.
@@ -452,22 +127,22 @@ extern unsigned long t2_init (unsigned long mem_start,
*/
struct sable_cpu_csr {
-unsigned long bcc; long fill_00[3]; /* Backup Cache Control */
-unsigned long bcce; long fill_01[3]; /* Backup Cache Correctable Error */
-unsigned long bccea; long fill_02[3]; /* B-Cache Corr Err Address Latch */
-unsigned long bcue; long fill_03[3]; /* B-Cache Uncorrectable Error */
-unsigned long bcuea; long fill_04[3]; /* B-Cache Uncorr Err Addr Latch */
-unsigned long dter; long fill_05[3]; /* Duplicate Tag Error */
-unsigned long cbctl; long fill_06[3]; /* CBus Control */
-unsigned long cbe; long fill_07[3]; /* CBus Error */
-unsigned long cbeal; long fill_08[3]; /* CBus Error Addr Latch low */
-unsigned long cbeah; long fill_09[3]; /* CBus Error Addr Latch high */
-unsigned long pmbx; long fill_10[3]; /* Processor Mailbox */
-unsigned long ipir; long fill_11[3]; /* Inter-Processor Int Request */
-unsigned long sic; long fill_12[3]; /* System Interrupt Clear */
-unsigned long adlk; long fill_13[3]; /* Address Lock (LDxL/STxC) */
-unsigned long madrl; long fill_14[3]; /* CBus Miss Address */
-unsigned long rev; long fill_15[3]; /* CMIC Revision */
+ unsigned long bcc; long fill_00[3]; /* Backup Cache Control */
+ unsigned long bcce; long fill_01[3]; /* Backup Cache Correctable Error */
+ unsigned long bccea; long fill_02[3]; /* B-Cache Corr Err Address Latch */
+ unsigned long bcue; long fill_03[3]; /* B-Cache Uncorrectable Error */
+ unsigned long bcuea; long fill_04[3]; /* B-Cache Uncorr Err Addr Latch */
+ unsigned long dter; long fill_05[3]; /* Duplicate Tag Error */
+ unsigned long cbctl; long fill_06[3]; /* CBus Control */
+ unsigned long cbe; long fill_07[3]; /* CBus Error */
+ unsigned long cbeal; long fill_08[3]; /* CBus Error Addr Latch low */
+ unsigned long cbeah; long fill_09[3]; /* CBus Error Addr Latch high */
+ unsigned long pmbx; long fill_10[3]; /* Processor Mailbox */
+ unsigned long ipir; long fill_11[3]; /* Inter-Processor Int Request */
+ unsigned long sic; long fill_12[3]; /* System Interrupt Clear */
+ unsigned long adlk; long fill_13[3]; /* Address Lock (LDxL/STxC) */
+ unsigned long madrl; long fill_14[3]; /* CBus Miss Address */
+ unsigned long rev; long fill_15[3]; /* CMIC Revision */
};
/*
@@ -495,7 +170,7 @@ struct el_t2_procdata_mcheck {
unsigned long elfmc_biu_stat; /* BIU Status. */
unsigned long elfmc_biu_addr; /* BUI Address. */
unsigned long elfmc_biu_ctl; /* BIU Control. */
- unsigned long elfmc_fill_syndrome; /* For correcting ECC errors. */
+ unsigned long elfmc_fill_syndrome; /* For correcting ECC errors. */
unsigned long elfmc_fill_addr;/* Cache block which was being read. */
unsigned long elfmc_va; /* Effective VA of fault or miss. */
unsigned long elfmc_bc_tag; /* Backup Cache Tag Probe Results. */
@@ -507,10 +182,10 @@ struct el_t2_procdata_mcheck {
struct el_t2_logout_header {
unsigned int elfl_size; /* size in bytes of logout area. */
- int elfl_sbz1:31; /* Should be zero. */
- char elfl_retry:1; /* Retry flag. */
- unsigned int elfl_procoffset; /* Processor-specific offset. */
- unsigned int elfl_sysoffset; /* Offset of system-specific. */
+ int elfl_sbz1:31; /* Should be zero. */
+ char elfl_retry:1; /* Retry flag. */
+ unsigned int elfl_procoffset; /* Processor-specific offset. */
+ unsigned int elfl_sysoffset; /* Offset of system-specific. */
unsigned int elfl_error_type; /* PAL error type code. */
unsigned int elfl_frame_rev; /* PAL Frame revision. */
};
@@ -554,7 +229,7 @@ struct el_t2_data_memory {
/*
- * Sable other cpu error frame - sable pfms section 3.43
+ * Sable other CPU error frame - sable pfms section 3.43
*/
struct el_t2_data_other_cpu {
short elco_cpuid; /* CPU ID */
@@ -578,7 +253,7 @@ struct el_t2_data_other_cpu {
};
/*
- * Sable other cpu error frame - sable pfms section 3.44
+ * Sable other CPU error frame - sable pfms section 3.44
*/
struct el_t2_data_t2{
struct el_t2_frame_header elct_hdr; /* ID$T2-FRAME */
@@ -647,8 +322,370 @@ struct el_t2_frame_corrected {
};
-#define RTC_PORT(x) (0x70 + (x))
-#define RTC_ADDR(x) (0x80 | (x))
-#define RTC_ALWAYS_BCD 0
+#ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
+/*
+ * Translate physical memory address as seen on (PCI) bus into
+ * a kernel virtual address and vv.
+ */
+
+__EXTERN_INLINE unsigned long t2_virt_to_bus(void * address)
+{
+ return virt_to_phys(address) + T2_DMA_WIN_BASE;
+}
+
+__EXTERN_INLINE void * t2_bus_to_virt(unsigned long address)
+{
+ return phys_to_virt(address - T2_DMA_WIN_BASE);
+}
+
+/*
+ * I/O functions:
+ *
+ * T2 (the core logic PCI/memory support chipset for the SABLE
+ * series of processors uses a sparse address mapping scheme to
+ * get at PCI memory and I/O.
+ */
+
+#define vip volatile int *
+#define vuip volatile unsigned int *
+
+__EXTERN_INLINE unsigned int t2_inb(unsigned long addr)
+{
+ long result = *(vip) ((addr << 5) + T2_IO + 0x00);
+ return __kernel_extbl(result, addr & 3);
+}
+
+__EXTERN_INLINE void t2_outb(unsigned char b, unsigned long addr)
+{
+ unsigned long w;
+
+ w = __kernel_insbl(b, addr & 3);
+ *(vuip) ((addr << 5) + T2_IO + 0x00) = w;
+ mb();
+}
+
+__EXTERN_INLINE unsigned int t2_inw(unsigned long addr)
+{
+ long result = *(vip) ((addr << 5) + T2_IO + 0x08);
+ return __kernel_extwl(result, addr & 3);
+}
+
+__EXTERN_INLINE void t2_outw(unsigned short b, unsigned long addr)
+{
+ unsigned int w;
+
+ w = __kernel_inswl(b, addr & 3);
+ *(vuip) ((addr << 5) + T2_IO + 0x08) = w;
+ mb();
+}
+
+__EXTERN_INLINE unsigned int t2_inl(unsigned long addr)
+{
+ return *(vuip) ((addr << 5) + T2_IO + 0x18);
+}
+
+__EXTERN_INLINE void t2_outl(unsigned int b, unsigned long addr)
+{
+ *(vuip) ((addr << 5) + T2_IO + 0x18) = b;
+ mb();
+}
+
+
+/*
+ * Memory functions. 64-bit and 32-bit accesses are done through
+ * dense memory space, everything else through sparse space.
+ *
+ * For reading and writing 8 and 16 bit quantities we need to
+ * go through one of the three sparse address mapping regions
+ * and use the HAE_MEM CSR to provide some bits of the address.
+ * The following few routines use only sparse address region 1
+ * which gives 1Gbyte of accessible space which relates exactly
+ * to the amount of PCI memory mapping *into* system address space.
+ * See p 6-17 of the specification but it looks something like this:
+ *
+ * 21164 Address:
+ *
+ * 3 2 1
+ * 9876543210987654321098765432109876543210
+ * 1ZZZZ0.PCI.QW.Address............BBLL
+ *
+ * ZZ = SBZ
+ * BB = Byte offset
+ * LL = Transfer length
+ *
+ * PCI Address:
+ *
+ * 3 2 1
+ * 10987654321098765432109876543210
+ * HHH....PCI.QW.Address........ 00
+ *
+ * HHH = 31:29 HAE_MEM CSR
+ *
+ */
+
+__EXTERN_INLINE unsigned long t2_srm_base(unsigned long addr)
+{
+ if ((addr >= alpha_mv.sm_base_r1
+ && addr <= alpha_mv.sm_base_r1 + T2_MEM_R1_MASK)
+ || (addr >= 512*1024 && addr < 1024*1024)) {
+ return ((addr & T2_MEM_R1_MASK) << 5) + T2_SPARSE_MEM;
+ }
+#if 0
+ printk("T2: address 0x%lx not covered by HAE\n", addr);
+#endif
+ return 0;
+}
+
+__EXTERN_INLINE unsigned long t2_srm_readb(unsigned long addr)
+{
+ unsigned long result, work;
+
+ if ((work = t2_srm_base(addr)) == 0)
+ return 0xff;
+ work += 0x00; /* add transfer length */
+
+ result = *(vip) work;
+ return __kernel_extbl(result, addr & 3);
+}
+
+__EXTERN_INLINE unsigned long t2_srm_readw(unsigned long addr)
+{
+ unsigned long result, work;
+
+ if ((work = t2_srm_base(addr)) == 0)
+ return 0xffff;
+ work += 0x08; /* add transfer length */
+
+ result = *(vip) work;
+ return __kernel_extwl(result, addr & 3);
+}
+
+/* On SABLE with T2, we must use SPARSE memory even for 32-bit access ... */
+__EXTERN_INLINE unsigned long t2_srm_readl(unsigned long addr)
+{
+ unsigned long work;
+
+ if ((work = t2_srm_base(addr)) == 0)
+ return 0xffffffff;
+ work += 0x18; /* add transfer length */
+
+ return *(vuip) work;
+}
+
+/* ... which makes me wonder why we advertise we have DENSE memory at all.
+ Anyway, guess that means we should emulate 64-bit access as two cycles. */
+__EXTERN_INLINE unsigned long t2_srm_readq(unsigned long addr)
+{
+ unsigned long work, r0, r1;
+
+ if ((work = t2_srm_base(addr)) == 0)
+ return ~0UL;
+ work += 0x18; /* add transfer length */
+
+ r0 = *(vuip) work;
+ r1 = *(vuip) (work + (4 << 5));
+ return r1 << 32 | r0;
+}
+
+__EXTERN_INLINE void t2_srm_writeb(unsigned char b, unsigned long addr)
+{
+ unsigned long work = t2_srm_base(addr);
+ if (work) {
+ work += 0x00; /* add transfer length */
+ *(vuip) work = b * 0x01010101;
+ }
+}
+
+__EXTERN_INLINE void t2_srm_writew(unsigned short b, unsigned long addr)
+{
+ unsigned long work = t2_srm_base(addr);
+ if (work) {
+ work += 0x08; /* add transfer length */
+ *(vuip) work = b * 0x00010001;
+ }
+}
+
+/* On SABLE with T2, we must use SPARSE memory even for 32-bit access ... */
+__EXTERN_INLINE void t2_srm_writel(unsigned int b, unsigned long addr)
+{
+ unsigned long work = t2_srm_base(addr);
+ if (work) {
+ work += 0x18; /* add transfer length */
+ *(vuip) work = b;
+ }
+}
+
+/* ... which makes me wonder why we advertise we have DENSE memory at all.
+ Anyway, guess that means we should emulate 64-bit access as two cycles. */
+__EXTERN_INLINE void t2_srm_writeq(unsigned long b, unsigned long addr)
+{
+ unsigned long work = t2_srm_base(addr);
+ if (work) {
+ work += 0x18; /* add transfer length */
+ *(vuip) work = b;
+ *(vuip) (work + (4 << 5)) = b >> 32;
+ }
+}
+
+__EXTERN_INLINE unsigned long t2_readb(unsigned long addr)
+{
+ unsigned long result, msb;
+
+ msb = addr & 0xE0000000 ;
+ addr &= T2_MEM_R1_MASK ;
+ set_hae(msb);
+
+ result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00) ;
+ return __kernel_extbl(result, addr & 3);
+}
+
+__EXTERN_INLINE unsigned long t2_readw(unsigned long addr)
+{
+ unsigned long result, msb;
+
+ msb = addr & 0xE0000000 ;
+ addr &= T2_MEM_R1_MASK ;
+ set_hae(msb);
+
+ result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
+ return __kernel_extwl(result, addr & 3);
+}
+
+/* On SABLE with T2, we must use SPARSE memory even for 32-bit access. */
+__EXTERN_INLINE unsigned long t2_readl(unsigned long addr)
+{
+ unsigned long msb;
+
+ msb = addr & 0xE0000000 ;
+ addr &= T2_MEM_R1_MASK ;
+ set_hae(msb);
+
+ return *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
+}
+
+__EXTERN_INLINE unsigned long t2_readq(unsigned long addr)
+{
+ unsigned long r0, r1, work, msb;
+
+ msb = addr & 0xE0000000 ;
+ addr &= T2_MEM_R1_MASK ;
+ set_hae(msb);
+
+ work = (addr << 5) + T2_SPARSE_MEM + 0x18;
+ r0 = *(vuip)(work);
+ r1 = *(vuip)(work + (4 << 5));
+ return r1 << 32 | r0;
+}
+
+__EXTERN_INLINE void t2_writeb(unsigned char b, unsigned long addr)
+{
+ unsigned long msb ;
+
+ msb = addr & 0xE0000000 ;
+ addr &= T2_MEM_R1_MASK ;
+ set_hae(msb);
+
+ *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = b * 0x01010101;
+}
+
+__EXTERN_INLINE void t2_writew(unsigned short b, unsigned long addr)
+{
+ unsigned long msb ;
+
+ msb = addr & 0xE0000000 ;
+ addr &= T2_MEM_R1_MASK ;
+ set_hae(msb);
+
+ *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = b * 0x00010001;
+}
+
+/* On SABLE with T2, we must use SPARSE memory even for 32-bit access. */
+__EXTERN_INLINE void t2_writel(unsigned int b, unsigned long addr)
+{
+ unsigned long msb ;
+
+ msb = addr & 0xE0000000 ;
+ addr &= T2_MEM_R1_MASK ;
+ set_hae(msb);
+
+ *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
+}
+
+__EXTERN_INLINE void t2_writeq(unsigned long b, unsigned long addr)
+{
+ unsigned long msb, work;
+
+ msb = addr & 0xE0000000 ;
+ addr &= T2_MEM_R1_MASK ;
+ set_hae(msb);
+
+ work = (addr << 5) + T2_SPARSE_MEM + 0x18;
+ *(vuip)work = b;
+ *(vuip)(work + (4 << 5)) = b >> 32;
+}
+
+/* Find the DENSE memory area for a given bus address. */
+
+__EXTERN_INLINE unsigned long t2_dense_mem(unsigned long addr)
+{
+ return T2_DENSE_MEM;
+}
+
+#undef vip
+#undef vuip
+
+#ifdef __WANT_IO_DEF
+
+#define virt_to_bus t2_virt_to_bus
+#define bus_to_virt t2_bus_to_virt
+#define __inb t2_inb
+#define __inw t2_inw
+#define __inl t2_inl
+#define __outb t2_outb
+#define __outw t2_outw
+#define __outl t2_outl
+
+#ifdef CONFIG_ALPHA_SRM_SETUP
+#define __readb t2_srm_readb
+#define __readw t2_srm_readw
+#define __readl t2_srm_readl
+#define __readq t2_srm_readq
+#define __writeb t2_srm_writeb
+#define __writew t2_srm_writew
+#define __writel t2_srm_writel
+#define __writeq t2_srm_writeq
+#else
+#define __readb t2_readb
+#define __readw t2_readw
+#define __readl t2_readl
+#define __readq t2_readq
+#define __writeb t2_writeb
+#define __writew t2_writew
+#define __writel t2_writel
+#define __writeq t2_writeq
+#endif
+
+#define dense_mem t2_dense_mem
+
+#define inb(port) \
+(__builtin_constant_p((port))?__inb(port):_inb(port))
+
+#define outb(x, port) \
+(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
+
+#endif /* __WANT_IO_DEF */
+
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
#endif /* __ALPHA_T2__H__ */
diff --git a/include/asm-alpha/tsunami.h b/include/asm-alpha/core_tsunami.h
index 2c6be4e23..5da9c08bb 100644
--- a/include/asm-alpha/tsunami.h
+++ b/include/asm-alpha/core_tsunami.h
@@ -3,6 +3,7 @@
#include <linux/config.h>
#include <linux/types.h>
+#include <asm/compiler.h>
/*
* TSUNAMI/TYPHOON are the internal names for the core logic chipset which
@@ -15,22 +16,18 @@
*
*/
-#define BYTE_ENABLE_SHIFT 5
-#define TRANSFER_LENGTH_SHIFT 3
-
-#ifdef CONFIG_ALPHA_SRM_SETUP
-/* if we are using the SRM PCI setup, we'll need to use variables instead */
#define TSUNAMI_DMA_WIN_BASE_DEFAULT (1024*1024*1024)
#define TSUNAMI_DMA_WIN_SIZE_DEFAULT (1024*1024*1024)
-extern unsigned int TSUNAMI_DMA_WIN_BASE;
-extern unsigned int TSUNAMI_DMA_WIN_SIZE;
-
-#else /* SRM_SETUP */
-#define TSUNAMI_DMA_WIN_BASE (1024*1024*1024)
-#define TSUNAMI_DMA_WIN_SIZE (1024*1024*1024)
-#endif /* SRM_SETUP */
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM_SETUP)
+#define TSUNAMI_DMA_WIN_BASE alpha_mv.dma_win_base
+#define TSUNAMI_DMA_WIN_SIZE alpha_mv.dma_win_size
+#else
+#define TSUNAMI_DMA_WIN_BASE TSUNAMI_DMA_WIN_BASE_DEFAULT
+#define TSUNAMI_DMA_WIN_SIZE TSUNAMI_DMA_WIN_SIZE_DEFAULT
+#endif
+/* XXX: Do we need to conditionalize on this? */
#ifdef USE_48_BIT_KSEG
#define TS_BIAS 0x80000000000UL
#else
@@ -287,20 +284,30 @@ union TPchipPERRMASK {
#define TSUNAMI_PCI1_IO (IDENT_ADDR + TS_BIAS + 0x3FC000000UL)
#define TSUNAMI_PCI1_CONF (IDENT_ADDR + TS_BIAS + 0x3FE000000UL)
-#define HAE_ADDRESS 0
+/*
+ * Data structure for handling TSUNAMI machine checks:
+ */
+struct el_TSUNAMI_sysdata_mcheck {
+};
+
#ifdef __KERNEL__
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
/*
* Translate physical memory address as seen on (PCI) bus into
* a kernel virtual address and vv.
*/
-extern inline unsigned long virt_to_bus(void * address)
+__EXTERN_INLINE unsigned long tsunami_virt_to_bus(void * address)
{
return virt_to_phys(address) + TSUNAMI_DMA_WIN_BASE;
}
-extern inline void * bus_to_virt(unsigned long address)
+__EXTERN_INLINE void * tsunami_bus_to_virt(unsigned long address)
{
return phys_to_virt(address - TSUNAMI_DMA_WIN_BASE);
}
@@ -315,138 +322,132 @@ extern inline void * bus_to_virt(unsigned long address)
/* HACK ALERT! HACK ALERT! */
/* HACK ALERT! HACK ALERT! */
-/* only using PCI bus 0 for now in all routines */
+/* Only using PCI bus 0 for now in all routines. */
+
+#define TSUNAMI_IACK_SC TSUNAMI_PCI0_IACK_SC
/* HACK ALERT! HACK ALERT! */
/* HACK ALERT! HACK ALERT! */
-
+#define vucp volatile unsigned char *
+#define vusp volatile unsigned short *
#define vuip volatile unsigned int *
+#define vulp volatile unsigned long *
-extern inline unsigned int __inb(unsigned long addr)
+__EXTERN_INLINE unsigned int tsunami_inb(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldbu %0,%1"
- : "=r" (result)
- : "m" (*(unsigned char *)(addr+TSUNAMI_PCI0_IO)));
-
- return result;
+ return __kernel_ldbu(*(vucp)(addr + TSUNAMI_PCI0_IO));
}
-extern inline void __outb(unsigned char b, unsigned long addr)
+__EXTERN_INLINE void tsunami_outb(unsigned char b, unsigned long addr)
{
- __asm__ __volatile__ (
- "stb %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned char *)(addr+TSUNAMI_PCI0_IO)), "r" (b));
+ __kernel_stb(b, *(vucp)(addr + TSUNAMI_PCI0_IO));
+ mb();
}
-extern inline unsigned int __inw(unsigned long addr)
+__EXTERN_INLINE unsigned int tsunami_inw(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldwu %0,%1"
- : "=r" (result)
- : "m" (*(unsigned short *)(addr+TSUNAMI_PCI0_IO)));
-
- return result;
+ return __kernel_ldwu(*(vusp)(addr+TSUNAMI_PCI0_IO));
}
-extern inline void __outw(unsigned short b, unsigned long addr)
+__EXTERN_INLINE void tsunami_outw(unsigned short b, unsigned long addr)
{
- __asm__ __volatile__ (
- "stw %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned short *)(addr+TSUNAMI_PCI0_IO)), "r" (b));
+ __kernel_stw(b, *(vusp)(addr+TSUNAMI_PCI0_IO));
+ mb();
}
-extern inline unsigned int __inl(unsigned long addr)
+__EXTERN_INLINE unsigned int tsunami_inl(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldl %0,%1"
- : "=r" (result)
- : "m" (*(unsigned int *)(addr+TSUNAMI_PCI0_IO)));
-
- return result;
+ return *(vuip)(addr+TSUNAMI_PCI0_IO);
}
-extern inline void __outl(unsigned int b, unsigned long addr)
+__EXTERN_INLINE void tsunami_outl(unsigned int b, unsigned long addr)
{
- __asm__ __volatile__ (
- "stl %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned int *)(addr+TSUNAMI_PCI0_IO)), "r" (b));
+ *(vuip)(addr+TSUNAMI_PCI0_IO) = b;
+ mb();
}
/*
* Memory functions. all accesses are done through linear space.
*/
-extern inline unsigned long __readb(unsigned long addr)
+__EXTERN_INLINE unsigned long tsunami_readb(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldbu %0,%1"
- : "=r" (result)
- : "m" (*(unsigned char *)(addr+TSUNAMI_PCI0_MEM)));
-
- return result;
+ return __kernel_ldbu(*(vucp)(addr+TSUNAMI_PCI0_MEM));
}
-extern inline unsigned long __readw(unsigned long addr)
+__EXTERN_INLINE unsigned long tsunami_readw(unsigned long addr)
{
- register unsigned long result;
-
- __asm__ __volatile__ (
- "ldwu %0,%1"
- : "=r" (result)
- : "m" (*(unsigned short *)(addr+TSUNAMI_PCI0_MEM)));
+ return __kernel_ldwu(*(vusp)(addr+TSUNAMI_PCI0_MEM));
+}
- return result;
+__EXTERN_INLINE unsigned long tsunami_readl(unsigned long addr)
+{
+ return *(vuip)(addr+TSUNAMI_PCI0_MEM);
}
-extern inline unsigned long __readl(unsigned long addr)
+__EXTERN_INLINE unsigned long tsunami_readq(unsigned long addr)
{
- register unsigned long result;
+ return *(vulp)(addr+TSUNAMI_PCI0_MEM);
+}
- __asm__ __volatile__ (
- "ldl %0,%1"
- : "=r" (result)
- : "m" (*(unsigned int *)(addr+TSUNAMI_PCI0_MEM)));
+__EXTERN_INLINE void tsunami_writeb(unsigned char b, unsigned long addr)
+{
+ __kernel_stb(b, *(vucp)(addr+TSUNAMI_PCI0_MEM));
+ mb();
+}
- return result;
+__EXTERN_INLINE void tsunami_writew(unsigned short b, unsigned long addr)
+{
+ __kernel_stw(b, *(vusp)(addr+TSUNAMI_PCI0_MEM));
+ mb();
}
-extern inline void __writeb(unsigned char b, unsigned long addr)
+__EXTERN_INLINE void tsunami_writel(unsigned int b, unsigned long addr)
{
- __asm__ __volatile__ (
- "stb %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned char *)(addr+TSUNAMI_PCI0_MEM)), "r" (b));
+ *(vuip)(addr+TSUNAMI_PCI0_MEM) = b;
+ mb();
}
-extern inline void __writew(unsigned short b, unsigned long addr)
+__EXTERN_INLINE void tsunami_writeq(unsigned long b, unsigned long addr)
{
- __asm__ __volatile__ (
- "stw %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned short *)(addr+TSUNAMI_PCI0_MEM)), "r" (b));
+ *(vulp)(addr+TSUNAMI_PCI0_MEM) = b;
+ mb();
}
-extern inline void __writel(unsigned int b, unsigned long addr)
+/* Find the DENSE memory area for a given bus address. */
+
+__EXTERN_INLINE unsigned long tsunami_dense_mem(unsigned long addr)
{
- __asm__ __volatile__ (
- "stl %1,%0\n\t"
- "mb"
- : : "m" (*(unsigned int *)(addr+TSUNAMI_PCI0_MEM)), "r" (b));
+ return TSUNAMI_PCI0_MEM;
}
+#undef vucp
+#undef vusp
+#undef vuip
+#undef vulp
+
+#ifdef __WANT_IO_DEF
+
+#define virt_to_bus tsunami_virt_to_bus
+#define bus_to_virt tsunami_bus_to_virt
+
+#define __inb tsunami_inb
+#define __inw tsunami_inw
+#define __inl tsunami_inl
+#define __outb tsunami_outb
+#define __outw tsunami_outw
+#define __outl tsunami_outl
+#define __readb tsunami_readb
+#define __readw tsunami_readw
+#define __writeb tsunami_writeb
+#define __writew tsunami_writew
+#define __readl tsunami_readl
+#define __readq tsunami_readq
+#define __writel tsunami_writel
+#define __writeq tsunami_writeq
+#define dense_mem tsunami_dense_mem
+
#define inb(port) __inb((port))
#define inw(port) __inw((port))
#define inl(port) __inl((port))
@@ -458,26 +459,20 @@ extern inline void __writel(unsigned int b, unsigned long addr)
#define readb(a) __readb((unsigned long)(a))
#define readw(a) __readw((unsigned long)(a))
#define readl(a) __readl((unsigned long)(a))
+#define readq(a) __readq((unsigned long)(a))
#define writeb(v,a) __writeb((v),(unsigned long)(a))
#define writew(v,a) __writew((v),(unsigned long)(a))
#define writel(v,a) __writel((v),(unsigned long)(a))
+#define writeq(v,a) __writeq((v),(unsigned long)(a))
-#undef vuip
+#endif /* __WANT_IO_DEF */
-extern unsigned long tsunami_init (unsigned long mem_start,
- unsigned long mem_end);
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
#endif /* __KERNEL__ */
-/*
- * Data structure for handling TSUNAMI machine checks:
- */
-struct el_TSUNAMI_sysdata_mcheck {
-};
-
-#define RTC_PORT(x) (0x70 + (x))
-#define RTC_ADDR(x) (0x80 | (x))
-#define RTC_ALWAYS_BCD 0
-
#endif /* __ALPHA_TSUNAMI__H__ */
diff --git a/include/asm-alpha/delay.h b/include/asm-alpha/delay.h
index df73028c9..87e69f657 100644
--- a/include/asm-alpha/delay.h
+++ b/include/asm-alpha/delay.h
@@ -35,13 +35,4 @@ extern __inline__ void udelay(unsigned long usecs)
__delay(usecs);
}
-/*
- * 64-bit integers means we don't have to worry about overflow as
- * on some other architectures..
- */
-extern __inline__ unsigned long muldiv(unsigned long a, unsigned long b, unsigned long c)
-{
- return (a*b)/c;
-}
-
#endif /* defined(__ALPHA_DELAY_H) */
diff --git a/include/asm-alpha/dma.h b/include/asm-alpha/dma.h
index d8077298d..686921599 100644
--- a/include/asm-alpha/dma.h
+++ b/include/asm-alpha/dma.h
@@ -19,8 +19,7 @@
#define _ASM_DMA_H
#include <linux/config.h>
-
-#include <asm/io.h> /* need byte IO */
+#include <asm/io.h>
#define dma_outb outb
#define dma_inb inb
@@ -75,7 +74,6 @@
#define MAX_DMA_CHANNELS 8
-#ifdef CONFIG_ALPHA_XL
/* The maximum address that we can perform a DMA transfer to on Alpha XL,
due to a hardware SIO (PCI<->ISA bus bridge) chip limitation, is 64MB.
See <asm/apecs.h> for more info.
@@ -86,12 +84,18 @@
We MUST coordinate the maximum with <asm/apecs.h> for consistency.
For now, this limit is set to 48Mb...
*/
-#define MAX_DMA_ADDRESS (0xfffffc0003000000UL)
-#else /* CONFIG_ALPHA_XL */
-/* The maximum address that we can perform a DMA transfer to on normal
- Alpha platforms */
-#define MAX_DMA_ADDRESS (~0UL)
-#endif /* CONFIG_ALPHA_XL */
+#define ALPHA_XL_MAX_DMA_ADDRESS (0xfffffc0003000000UL)
+#define ALPHA_MAX_DMA_ADDRESS (~0UL)
+
+#ifdef CONFIG_ALPHA_GENERIC
+# define MAX_DMA_ADDRESS (alpha_mv.max_dma_address)
+#else
+# ifdef CONFIG_ALPHA_XL
+# define MAX_DMA_ADDRESS ALPHA_XL_MAX_DMA_ADDRESS
+# else
+# define MAX_DMA_ADDRESS ALPHA_MAX_DMA_ADDRESS
+# endif
+#endif
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
diff --git a/include/asm-alpha/elf.h b/include/asm-alpha/elf.h
index 89657ba78..8d614e38f 100644
--- a/include/asm-alpha/elf.h
+++ b/include/asm-alpha/elf.h
@@ -99,7 +99,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
}
/* This yields a mask that user programs can use to figure out what
- instruction set this cpu supports. This is trivial on Alpha,
+ instruction set this CPU supports. This is trivial on Alpha,
but not so on other machines. */
#define ELF_HWCAP \
diff --git a/include/asm-alpha/floppy.h b/include/asm-alpha/floppy.h
index 39c1e491c..2f847ddb0 100644
--- a/include/asm-alpha/floppy.h
+++ b/include/asm-alpha/floppy.h
@@ -53,13 +53,24 @@ static int FDC2 = -1;
#define FLOPPY_MOTOR_MASK 0xf0
/*
- * Most Alphas have no problems with floppy DMA crossing 64k borders. Sigh...
+ * Most Alphas have no problems with floppy DMA crossing 64k borders,
+ * except for XL. It is also the only one with DMA limits, so we use
+ * that to test in the generic kernel.
*/
-#ifdef CONFIG_ALPHA_XL
-#define CROSS_64KB(a,s) \
- ((unsigned long)(a)/0x10000 != ((unsigned long)(a) + (s) - 1) / 0x10000)
-#else /* CONFIG_ALPHA_XL */
-#define CROSS_64KB(a,s) (0)
-#endif /* CONFIG_ALPHA_XL */
+
+#define __CROSS_64KB(a,s) \
+({ unsigned long __s64 = (unsigned long)(a); \
+ unsigned long __e64 = __s64 + (unsigned long)(s) - 1; \
+ (__s64 ^ __e64) & ~0xfffful; })
+
+#ifdef CONFIG_ALPHA_GENERIC
+# define CROSS_64KB(a,s) (__CROSS_64KB(a,s) && ~alpha_mv.max_dma_address)
+#else
+# ifdef CONFIG_ALPHA_XL
+# define CROSS_64KB(a,s) __CROSS_64KB(a,s)
+# else
+# define CROSS_64KB(a,s) (0)
+# endif
+#endif
#endif /* __ASM_ALPHA_FLOPPY_H */
diff --git a/include/asm-alpha/hardirq.h b/include/asm-alpha/hardirq.h
index 556e744d2..27abc50fa 100644
--- a/include/asm-alpha/hardirq.h
+++ b/include/asm-alpha/hardirq.h
@@ -4,6 +4,7 @@
#include <linux/tasks.h>
extern unsigned int local_irq_count[NR_CPUS];
+#define in_interrupt() (local_irq_count[smp_processor_id()] + local_bh_count[smp_processor_id()] != 0)
#ifndef __SMP__
diff --git a/include/asm-alpha/hwrpb.h b/include/asm-alpha/hwrpb.h
index 6909f0b6b..5aaf94223 100644
--- a/include/asm-alpha/hwrpb.h
+++ b/include/asm-alpha/hwrpb.h
@@ -1,5 +1,5 @@
-#ifndef _HWRPB_H
-#define _HWRPB_H
+#ifndef __ALPHA_HWRPB_H
+#define __ALPHA_HWRPB_H
#define INIT_HWRPB ((struct hwrpb_struct *) 0x10000000)
@@ -34,6 +34,7 @@
#define ST_DEC_AXPPCI_33 11 /* NoName system type */
#define ST_DEC_TLASER 12 /* Turbolaser systype */
#define ST_DEC_2100_A50 13 /* Avanti systype */
+#define ST_DEC_MUSTANG 14 /* Mustang systype */
#define ST_DEC_ALCOR 15 /* Alcor (EV5) systype */
#define ST_DEC_1000 17 /* Mikasa systype */
#define ST_DEC_EB64 18 /* EB64 systype */
@@ -92,7 +93,7 @@ struct percpu_struct {
unsigned long halt_pv;
unsigned long halt_reason;
unsigned long res;
- unsigned long ipc_buffer[21];
+ char ipc_buffer[168];
unsigned long palcode_avail[16];
unsigned long compatibility;
};
@@ -184,4 +185,6 @@ struct hwrpb_struct {
unsigned long dsr_offset; /* "Dynamic System Recognition Data Block Table" */
};
-#endif
+extern struct hwrpb_struct *hwrpb;
+
+#endif /* __ALPHA_HWRPB_H */
diff --git a/include/asm-alpha/init.h b/include/asm-alpha/init.h
index 9ce618965..7d769dfcd 100644
--- a/include/asm-alpha/init.h
+++ b/include/asm-alpha/init.h
@@ -7,12 +7,6 @@
__arginit __init; \
__arginit
-#if __GNUC__ >= 2 && __GNUC_MINOR__ >= 8
-#define __initlocaldata __initdata
-#else
-#define __initlocaldata
-#endif
-
/* For assembly routines */
#define __INIT .section .text.init,"ax"
#define __FINIT .previous
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h
index d2668b973..61da10c8e 100644
--- a/include/asm-alpha/io.h
+++ b/include/asm-alpha/io.h
@@ -2,29 +2,17 @@
#define __ALPHA_IO_H
#include <linux/config.h>
-
#include <asm/system.h>
+#include <asm/machvec.h>
-/* We don't use IO slowdowns on the alpha, but.. */
+/* We don't use IO slowdowns on the Alpha, but.. */
#define __SLOW_DOWN_IO do { } while (0)
#define SLOW_DOWN_IO do { } while (0)
/*
- * The hae (hardware address extension) register is used to
- * access high IO addresses. To avoid doing an external cycle
- * every time we need to set the hae, we have a hae cache in
- * memory. The kernel entry code makes sure that the hae is
- * preserved across interrupts, so it is safe to set the hae
- * once and then depend on it staying the same in kernel code.
- */
-extern struct hae {
- unsigned long cache;
- unsigned long *reg;
-} hae;
-
-/*
* Virtual -> physical identity mapping starts at this offset
*/
+/* XXX: Do we need to conditionalize on this? */
#ifdef USE_48_BIT_KSEG
#define IDENT_ADDR (0xffff800000000000UL)
#else
@@ -40,25 +28,34 @@ extern struct hae {
* register not being up-to-date with respect to the hardware
* value.
*/
-extern inline void set_hae(unsigned long new_hae)
+static inline void __set_hae(unsigned long new_hae)
{
unsigned long ipl = swpipl(7);
- hae.cache = new_hae;
- *hae.reg = new_hae;
+
+ alpha_mv.hae_cache = new_hae;
+ *alpha_mv.hae_register = new_hae;
mb();
- new_hae = *hae.reg; /* read to make sure it was written */
+
+ /* Re-read to make sure it was written. */
+ new_hae = *alpha_mv.hae_register;
setipl(ipl);
}
+static inline void set_hae(unsigned long new_hae)
+{
+ if (new_hae != alpha_mv.hae_cache)
+ __set_hae(new_hae);
+}
+
/*
* Change virtual addresses to physical addresses and vv.
*/
-extern inline unsigned long virt_to_phys(volatile void * address)
+static inline unsigned long virt_to_phys(volatile void * address)
{
return 0xffffffffUL & (unsigned long) address;
}
-extern inline void * phys_to_virt(unsigned long address)
+static inline void * phys_to_virt(unsigned long address)
{
return (void *) (address + IDENT_ADDR);
}
@@ -78,24 +75,77 @@ extern void _sethae (unsigned long addr); /* cached version */
/*
* There are different chipsets to interface the Alpha CPUs to the world.
*/
-#if defined(CONFIG_ALPHA_LCA)
-# include <asm/lca.h> /* get chip-specific definitions */
-#elif defined(CONFIG_ALPHA_APECS)
-# include <asm/apecs.h> /* get chip-specific definitions */
+
+#ifdef CONFIG_ALPHA_GENERIC
+
+/* In a generic kernel, we always go through the machine vector. */
+
+# define virt_to_bus(a) alpha_mv.mv_virt_to_bus(a)
+# define bus_to_virt(a) alpha_mv.mv_bus_to_virt(a)
+
+# define __inb alpha_mv.mv_inb
+# define __inw alpha_mv.mv_inw
+# define __inl alpha_mv.mv_inl
+# define __outb alpha_mv.mv_outb
+# define __outw alpha_mv.mv_outw
+# define __outl alpha_mv.mv_outl
+
+# define __readb(a) alpha_mv.mv_readb((unsigned long)(a))
+# define __readw(a) alpha_mv.mv_readw((unsigned long)(a))
+# define __readl(a) alpha_mv.mv_readl((unsigned long)(a))
+# define __readq(a) alpha_mv.mv_readq((unsigned long)(a))
+# define __writeb(v,a) alpha_mv.mv_writeb((v),(unsigned long)(a))
+# define __writew(v,a) alpha_mv.mv_writew((v),(unsigned long)(a))
+# define __writel(v,a) alpha_mv.mv_writel((v),(unsigned long)(a))
+# define __writeq(v,a) alpha_mv.mv_writeq((v),(unsigned long)(a))
+
+# define inb __inb
+# define inw __inw
+# define inl __inl
+# define outb __outb
+# define outw __outw
+# define outl __outl
+
+# define readb __readb
+# define readw __readw
+# define readl __readl
+# define readq __readq
+# define writeb __writeb
+# define writew __writew
+# define writel __writel
+# define writeq __writeq
+
+# define dense_mem(a) alpha_mv.mv_dense_mem(a)
+
+#else
+
+/* Control how and what gets defined within the core logic headers. */
+#define __WANT_IO_DEF
+
+#if defined(CONFIG_ALPHA_APECS)
+# include <asm/core_apecs.h>
#elif defined(CONFIG_ALPHA_CIA)
-# include <asm/cia.h> /* get chip-specific definitions */
-#elif defined(CONFIG_ALPHA_T2)
-# include <asm/t2.h> /* get chip-specific definitions */
+# include <asm/core_cia.h>
+#elif defined(CONFIG_ALPHA_LCA)
+# include <asm/core_lca.h>
+#elif defined(CONFIG_ALPHA_MCPCIA)
+# include <asm/core_mcpcia.h>
#elif defined(CONFIG_ALPHA_PYXIS)
-# include <asm/pyxis.h> /* get chip-specific definitions */
+# include <asm/core_pyxis.h>
+#elif defined(CONFIG_ALPHA_T2)
+# include <asm/core_t2.h>
#elif defined(CONFIG_ALPHA_TSUNAMI)
-# include <asm/tsunami.h> /* get chip-specific definitions */
-#elif defined(CONFIG_ALPHA_MCPCIA)
-# include <asm/mcpcia.h> /* get chip-specific definitions */
-#else
+# include <asm/core_tsunami.h>
+#elif defined(CONFIG_ALPHA_JENSEN)
# include <asm/jensen.h>
+#else
+#error "What system is this?"
#endif
+#undef __WANT_IO_DEF
+
+#endif /* GENERIC */
+
/*
* The convention used for inb/outb etc. is that names starting with
* two underscores are the inline versions, names starting with a
@@ -114,9 +164,11 @@ extern void _outl (unsigned int l,unsigned long port);
extern unsigned long _readb(unsigned long addr);
extern unsigned long _readw(unsigned long addr);
extern unsigned long _readl(unsigned long addr);
+extern unsigned long _readq(unsigned long addr);
extern void _writeb(unsigned char b, unsigned long addr);
extern void _writew(unsigned short b, unsigned long addr);
extern void _writel(unsigned int b, unsigned long addr);
+extern void _writeq(unsigned long b, unsigned long addr);
/*
* The platform header files may define some of these macros to use
@@ -169,12 +221,12 @@ extern void _writel(unsigned int b, unsigned long addr);
* On the alpha, we have the whole physical address space mapped at all
* times, so "ioremap()" and "iounmap()" do not need to do anything.
*/
-extern inline void * ioremap(unsigned long offset, unsigned long size)
+static inline void * ioremap(unsigned long offset, unsigned long size)
{
return (void *) offset;
}
-extern inline void iounmap(void *addr)
+static inline void iounmap(void *addr)
{
}
@@ -187,6 +239,9 @@ extern inline void iounmap(void *addr)
#ifndef readl
# define readl(a) _readl((unsigned long)(a))
#endif
+#ifndef readq
+# define readq(a) _readq((unsigned long)(a))
+#endif
#ifndef writeb
# define writeb(v,a) _writeb((v),(unsigned long)(a))
#endif
@@ -196,19 +251,29 @@ extern inline void iounmap(void *addr)
#ifndef writel
# define writel(v,a) _writel((v),(unsigned long)(a))
#endif
+#ifndef writeq
+# define writeq(v,a) _writeq((v),(unsigned long)(a))
+#endif
#ifdef __KERNEL__
/*
* String version of IO memory access ops:
*/
-extern void _memcpy_fromio(void *, unsigned long, unsigned long);
-extern void _memcpy_toio(unsigned long, void *, unsigned long);
-extern void _memset_io(unsigned long, int, unsigned long);
+extern void _memcpy_fromio(void *, unsigned long, long);
+extern void _memcpy_toio(unsigned long, void *, long);
+extern void _memset_c_io(unsigned long, unsigned long, long);
-#define memcpy_fromio(to,from,len) _memcpy_fromio((to),(unsigned long)(from),(len))
-#define memcpy_toio(to,from,len) _memcpy_toio((unsigned long)(to),(from),(len))
-#define memset_io(addr,c,len) _memset_io((unsigned long)(addr),(c),(len))
+#define memcpy_fromio(to,from,len) \
+ _memcpy_fromio((to),(unsigned long)(from),(len))
+#define memcpy_toio(to,from,len) \
+ _memcpy_toio((unsigned long)(to),(from),(len))
+#define memset_io(addr,c,len) \
+ _memset_c_io((unsigned long)(addr),0x0101010101010101UL*(u8)(c),(len))
+
+#define __HAVE_ARCH_MEMSETW_IO
+#define memsetw_io(addr,c,len) \
+ _memset_c_io((unsigned long)(addr),0x0001000100010001UL*(u16)(c),(len))
/*
* String versions of in/out ops:
@@ -223,13 +288,15 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
/*
* XXX - We don't have csum_partial_copy_fromio() yet, so we cheat here and
* just copy it. The net code will then do the checksum later. Presently
- * only used by some shared memory 8390 ethernet cards anyway.
+ * only used by some shared memory 8390 Ethernet cards anyway.
*/
-#define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len))
+#define eth_io_copy_and_sum(skb,src,len,unused) \
+ memcpy_fromio((skb)->data,(src),(len))
-static inline int check_signature(unsigned long io_addr,
- const unsigned char *signature, int length)
+static inline int
+check_signature(unsigned long io_addr, const unsigned char *signature,
+ int length)
{
int retval = 0;
do {
@@ -244,6 +311,29 @@ out:
return retval;
}
-#endif /* __KERNEL__ */
+/*
+ * The Alpha Jensen hardware for some rather strange reason puts
+ * the RTC clock at 0x170 instead of 0x70. Probably due to some
+ * misguided idea about using 0x70 for NMI stuff.
+ *
+ * These defines will override the defaults when doing RTC queries
+ */
+#ifdef CONFIG_ALPHA_GENERIC
+# define RTC_PORT(x) ((x) + alpha_mv.rtc_port)
+# define RTC_ADDR(x) ((x) | alpha_mv.rtc_addr)
+#else
+# ifdef CONFIG_ALPHA_JENSEN
+# define RTC_PORT(x) (0x170+(x))
+# define RTC_ADDR(x) (x)
+# else
+# define RTC_PORT(x) (0x70 + (x))
+# define RTC_ADDR(x) (0x80 | (x))
+# endif
#endif
+
+#define RTC_ALWAYS_BCD 0
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_IO_H */
diff --git a/include/asm-alpha/irq.h b/include/asm-alpha/irq.h
index 7140a1437..7f8853c55 100644
--- a/include/asm-alpha/irq.h
+++ b/include/asm-alpha/irq.h
@@ -10,18 +10,24 @@
#include <linux/linkage.h>
#include <linux/config.h>
-#if defined(CONFIG_ALPHA_CABRIOLET) || \
+#if defined(CONFIG_ALPHA_GENERIC)
+
+/* Here NR_IRQS is not exact, but rather an upper bound. This is used
+ many places throughout the kernel to size static arrays. That's ok,
+ we'll use alpha_mv.nr_irqs when we want the real thing. */
+
+# define NR_IRQS 64
+
+#elif defined(CONFIG_ALPHA_CABRIOLET) || \
defined(CONFIG_ALPHA_EB66P) || \
defined(CONFIG_ALPHA_EB164) || \
defined(CONFIG_ALPHA_PC164) || \
- defined(CONFIG_ALPHA_LX164)
-
+ defined(CONFIG_ALPHA_LX164)
# define NR_IRQS 35
#elif defined(CONFIG_ALPHA_EB66) || \
defined(CONFIG_ALPHA_EB64P) || \
- defined(CONFIG_ALPHA_MIKASA)
-
+ defined(CONFIG_ALPHA_MIKASA)
# define NR_IRQS 32
#elif defined(CONFIG_ALPHA_ALCOR) || \
@@ -29,29 +35,52 @@
defined(CONFIG_ALPHA_MIATA) || \
defined(CONFIG_ALPHA_RUFFIAN) || \
defined(CONFIG_ALPHA_NORITAKE)
-
# define NR_IRQS 48
#elif defined(CONFIG_ALPHA_SABLE) || \
defined(CONFIG_ALPHA_SX164)
-
# define NR_IRQS 40
#elif defined(CONFIG_ALPHA_DP264) || \
defined(CONFIG_ALPHA_RAWHIDE)
-
# define NR_IRQS 64
#elif defined(CONFIG_ALPHA_TAKARA)
-
# define NR_IRQS 20
#else /* everyone else */
-
# define NR_IRQS 16
+#endif
+
+/*
+ * PROBE_MASK is the bitset of irqs that we consider for autoprobing.
+ */
+
+/* The normal mask includes all the IRQs except the timer. */
+#define _PROBE_MASK(nr_irqs) (((1UL << (nr_irqs & 63)) - 1) & ~1UL)
+/* Mask out unused timer irq 0 and RTC irq 8. */
+#define P2K_PROBE_MASK (_PROBE_MASK(16) & ~0x101UL)
+
+/* Mask out unused timer irq 0, "irqs" 20-30, and the EISA cascade. */
+#define ALCOR_PROBE_MASK (_PROBE_MASK(48) & ~0xfff000000001UL)
+
+/* Leave timer irq 0 in the mask. */
+#define RUFFIAN_PROBE_MASK (_PROBE_MASK(48) | 1UL)
+
+#if defined(CONFIG_ALPHA_GENERIC)
+# define PROBE_MASK alpha_mv.irq_probe_mask
+#elif defined(CONFIG_ALPHA_P2K)
+# define PROBE_MASK P2K_PROBE_MASK
+#elif defined(CONFIG_ALPHA_ALCOR) || defined(CONFIG_ALPHA_XLT)
+# define PROBE_MASK ALCOR_PROBE_MASK
+#elif defined(CONFIG_ALPHA_RUFFIAN)
+# define PROBE_MASK RUFFIAN_PROBE_MASK
+#else
+# define PROBE_MASK _PROBE_MASK(NR_IRQS)
#endif
+
static __inline__ int irq_cannonicalize(int irq)
{
/*
@@ -64,4 +93,4 @@ static __inline__ int irq_cannonicalize(int irq)
extern void disable_irq(unsigned int);
extern void enable_irq(unsigned int);
-#endif
+#endif /* _ALPHA_IRQ_H */
diff --git a/include/asm-alpha/jensen.h b/include/asm-alpha/jensen.h
index 7a3b07851..75f99ea33 100644
--- a/include/asm-alpha/jensen.h
+++ b/include/asm-alpha/jensen.h
@@ -1,6 +1,8 @@
#ifndef __ALPHA_JENSEN_H
#define __ALPHA_JENSEN_H
+#include <asm/compiler.h>
+
/*
* Defines for the AlphaPC EISA IO and memory address space.
*/
@@ -66,6 +68,14 @@
*/
#define EISA_IO (IDENT_ADDR + 0x300000000UL)
+
+#ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
/*
* Change virtual addresses to bus addresses and vv.
*
@@ -73,10 +83,16 @@
* as the bus address, but this is not necessarily true on
* other alpha hardware.
*/
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
+__EXTERN_INLINE unsigned long jensen_virt_to_bus(void * address)
+{
+ return virt_to_phys(address);
+}
+
+__EXTERN_INLINE void * jensen_bus_to_virt(unsigned long address)
+{
+ return phys_to_virt(address);
+}
-#define HAE_ADDRESS EISA_HAE
/*
* Handle the "host address register". This needs to be set
@@ -86,16 +102,19 @@
*
* HAE isn't needed for the local IO operations, though.
*/
-#define __HAE_MASK 0x1ffffff
-extern inline void __set_hae(unsigned long addr)
+
+#define JENSEN_HAE_ADDRESS EISA_HAE
+#define JENSEN_HAE_MASK 0x1ffffff
+
+__EXTERN_INLINE void jensen_set_hae(unsigned long addr)
{
/* hae on the Jensen is bits 31:25 shifted right */
addr >>= 25;
- if (addr != hae.cache)
+ if (addr != alpha_mv.hae_cache)
set_hae(addr);
}
-#ifdef __KERNEL__
+#define vuip volatile unsigned int *
/*
* IO functions
@@ -108,36 +127,31 @@ extern inline void __set_hae(unsigned long addr)
* gone in the PCI version. I hope I can get DEC suckered^H^H^H^H^H^H^H^H
* convinced that I need one of the newer machines.
*/
-extern inline unsigned int __local_inb(unsigned long addr)
+
+static inline unsigned int jensen_local_inb(unsigned long addr)
{
- long result = *(volatile int *) ((addr << 9) + EISA_VL82C106);
- return 0xffUL & result;
+ return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
}
-extern inline void __local_outb(unsigned char b, unsigned long addr)
+static inline void jensen_local_outb(unsigned char b, unsigned long addr)
{
- *(volatile unsigned int *) ((addr << 9) + EISA_VL82C106) = b;
+ *(vuip)((addr << 9) + EISA_VL82C106) = b;
mb();
}
-extern unsigned int _bus_inb(unsigned long addr);
-
-extern inline unsigned int __bus_inb(unsigned long addr)
+static inline unsigned int jensen_bus_inb(unsigned long addr)
{
long result;
- __set_hae(0);
- result = *(volatile int *) ((addr << 7) + EISA_IO + 0x00);
- result >>= (addr & 3) * 8;
- return 0xffUL & result;
+ jensen_set_hae(0);
+ result = *(volatile int *)((addr << 7) + EISA_IO + 0x00);
+ return __kernel_extbl(result, addr & 3);
}
-extern void _bus_outb(unsigned char b, unsigned long addr);
-
-extern inline void __bus_outb(unsigned char b, unsigned long addr)
+static inline void jensen_bus_outb(unsigned char b, unsigned long addr)
{
- __set_hae(0);
- *(volatile unsigned int *) ((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
+ jensen_set_hae(0);
+ *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
mb();
}
@@ -146,111 +160,165 @@ extern inline void __bus_outb(unsigned char b, unsigned long addr)
* operations that result in operations across inline functions.
* Which is why this is a macro.
*/
-#define __is_local(addr) ( \
+
+#define jensen_is_local(addr) ( \
/* keyboard */ (addr == 0x60 || addr == 0x64) || \
/* RTC */ (addr == 0x170 || addr == 0x171) || \
/* mb COM2 */ (addr >= 0x2f8 && addr <= 0x2ff) || \
/* mb LPT1 */ (addr >= 0x3bc && addr <= 0x3be) || \
/* mb COM2 */ (addr >= 0x3f8 && addr <= 0x3ff))
-extern inline unsigned int __inb(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_inb(unsigned long addr)
{
- if (__is_local(addr))
- return __local_inb(addr);
- return _bus_inb(addr);
+ if (jensen_is_local(addr))
+ return jensen_local_inb(addr);
+ else
+ return jensen_bus_inb(addr);
}
-extern inline void __outb(unsigned char b, unsigned long addr)
+__EXTERN_INLINE void jensen_outb(unsigned char b, unsigned long addr)
{
- if (__is_local(addr))
- __local_outb(b, addr);
+ if (jensen_is_local(addr))
+ jensen_local_outb(b, addr);
else
- _bus_outb(b, addr);
+ jensen_bus_outb(b, addr);
}
-extern inline unsigned int __inw(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_inw(unsigned long addr)
{
long result;
- __set_hae(0);
+ jensen_set_hae(0);
result = *(volatile int *) ((addr << 7) + EISA_IO + 0x20);
result >>= (addr & 3) * 8;
return 0xffffUL & result;
}
-extern inline unsigned int __inl(unsigned long addr)
+__EXTERN_INLINE unsigned int jensen_inl(unsigned long addr)
{
- __set_hae(0);
- return *(volatile unsigned int *) ((addr << 7) + EISA_IO + 0x60);
+ jensen_set_hae(0);
+ return *(vuip) ((addr << 7) + EISA_IO + 0x60);
}
-extern inline void __outw(unsigned short b, unsigned long addr)
+__EXTERN_INLINE void jensen_outw(unsigned short b, unsigned long addr)
{
- __set_hae(0);
- *(volatile unsigned int *) ((addr << 7) + EISA_IO + 0x20) = b * 0x00010001;
+ jensen_set_hae(0);
+ *(vuip) ((addr << 7) + EISA_IO + 0x20) = b * 0x00010001;
mb();
}
-extern inline void __outl(unsigned int b, unsigned long addr)
+__EXTERN_INLINE void jensen_outl(unsigned int b, unsigned long addr)
{
- __set_hae(0);
- *(volatile unsigned int *) ((addr << 7) + EISA_IO + 0x60) = b;
+ jensen_set_hae(0);
+ *(vuip) ((addr << 7) + EISA_IO + 0x60) = b;
mb();
}
/*
* Memory functions.
*/
-extern inline unsigned long __readb(unsigned long addr)
+
+__EXTERN_INLINE unsigned long jensen_readb(unsigned long addr)
{
long result;
- __set_hae(addr);
- addr &= __HAE_MASK;
+ jensen_set_hae(addr);
+ addr &= JENSEN_HAE_MASK;
result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x00);
result >>= (addr & 3) * 8;
return 0xffUL & result;
}
-extern inline unsigned long __readw(unsigned long addr)
+__EXTERN_INLINE unsigned long jensen_readw(unsigned long addr)
{
long result;
- __set_hae(addr);
- addr &= __HAE_MASK;
+ jensen_set_hae(addr);
+ addr &= JENSEN_HAE_MASK;
result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x20);
result >>= (addr & 3) * 8;
return 0xffffUL & result;
}
-extern inline unsigned long __readl(unsigned long addr)
+__EXTERN_INLINE unsigned long jensen_readl(unsigned long addr)
+{
+ jensen_set_hae(addr);
+ addr &= JENSEN_HAE_MASK;
+ return *(vuip) ((addr << 7) + EISA_MEM + 0x60);
+}
+
+__EXTERN_INLINE unsigned long jensen_readq(unsigned long addr)
{
- __set_hae(addr);
- addr &= __HAE_MASK;
- return *(volatile unsigned int *) ((addr << 7) + EISA_MEM + 0x60);
+ unsigned long r0, r1;
+
+ jensen_set_hae(addr);
+ addr &= JENSEN_HAE_MASK;
+ addr = (addr << 7) + EISA_MEM + 0x60;
+ r0 = *(vuip) (addr);
+ r1 = *(vuip) (addr + (4 << 7));
+ return r1 << 32 | r0;
}
-extern inline void __writeb(unsigned short b, unsigned long addr)
+__EXTERN_INLINE void jensen_writeb(unsigned char b, unsigned long addr)
{
- __set_hae(addr);
- addr &= __HAE_MASK;
- *(volatile unsigned int *) ((addr << 7) + EISA_MEM + 0x00) = b * 0x01010101;
+ jensen_set_hae(addr);
+ addr &= JENSEN_HAE_MASK;
+ *(vuip) ((addr << 7) + EISA_MEM + 0x00) = b * 0x01010101;
}
-extern inline void __writew(unsigned short b, unsigned long addr)
+__EXTERN_INLINE void jensen_writew(unsigned short b, unsigned long addr)
{
- __set_hae(addr);
- addr &= __HAE_MASK;
- *(volatile unsigned int *) ((addr << 7) + EISA_MEM + 0x20) = b * 0x00010001;
+ jensen_set_hae(addr);
+ addr &= JENSEN_HAE_MASK;
+ *(vuip) ((addr << 7) + EISA_MEM + 0x20) = b * 0x00010001;
}
-extern inline void __writel(unsigned int b, unsigned long addr)
+__EXTERN_INLINE void jensen_writel(unsigned int b, unsigned long addr)
{
- __set_hae(addr);
- addr &= __HAE_MASK;
- *(volatile unsigned int *) ((addr << 7) + EISA_MEM + 0x60) = b;
+ jensen_set_hae(addr);
+ addr &= JENSEN_HAE_MASK;
+ *(vuip) ((addr << 7) + EISA_MEM + 0x60) = b;
}
+__EXTERN_INLINE void jensen_writeq(unsigned long b, unsigned long addr)
+{
+ jensen_set_hae(addr);
+ addr &= JENSEN_HAE_MASK;
+ addr = (addr << 7) + EISA_MEM + 0x60;
+ *(vuip) (addr) = b;
+ *(vuip) (addr + (4 << 7)) = b >> 32;
+}
+
+/* Find the DENSE memory area for a given bus address.
+ Whee, there is none. */
+
+__EXTERN_INLINE unsigned long jensen_dense_mem(unsigned long addr)
+{
+ return 0;
+}
+
+#undef vuip
+
+#ifdef __WANT_IO_DEF
+
+#define virt_to_bus jensen_virt_to_bus
+#define bus_to_virt jensen_bus_to_virt
+#define __inb jensen_inb
+#define __inw jensen_inw
+#define __inl jensen_inl
+#define __outb jensen_outb
+#define __outw jensen_outw
+#define __outl jensen_outl
+#define __readb jensen_readb
+#define __readw jensen_readw
+#define __writeb jensen_writeb
+#define __writew jensen_writew
+#define __readl jensen_readl
+#define __readq jensen_readq
+#define __writel jensen_writel
+#define __writeq jensen_writeq
+#define dense_mem jensen_dense_mem
+
/*
* The above have so much overhead that it probably doesn't make
* sense to have them inlined (better icache behaviour).
@@ -261,17 +329,13 @@ extern inline void __writel(unsigned int b, unsigned long addr)
#define outb(x, port) \
(__builtin_constant_p((port))?__outb((x),(port)):_outb((x),(port)))
-#endif /* __KERNEL__ */
-
-/*
- * The Alpha Jensen hardware for some rather strange reason puts
- * the RTC clock at 0x170 instead of 0x70. Probably due to some
- * misguided idea about using 0x70 for NMI stuff.
- *
- * These defines will override the defaults when doing RTC queries
- */
-#define RTC_PORT(x) (0x170+(x))
-#define RTC_ADDR(x) (x)
-#define RTC_ALWAYS_BCD 0
+#endif /* __WANT_IO_DEF */
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_JENSEN_H */
diff --git a/include/asm-alpha/linux_logo.h b/include/asm-alpha/linux_logo.h
new file mode 100644
index 000000000..c7918fb97
--- /dev/null
+++ b/include/asm-alpha/linux_logo.h
@@ -0,0 +1,47 @@
+/* $Id: linux_logo.h,v 1.3 1998/06/29 19:36:17 geert Exp $
+ * include/asm-alpha/linux_logo.h: This is a linux logo
+ * to be displayed on boot.
+ *
+ * Copyright (C) 1996 Larry Ewing (lewing@isc.tamu.edu)
+ * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ *
+ * You can put anything here, but:
+ * LINUX_LOGO_COLORS has to be less than 224
+ * image size has to be 80x80
+ * values have to start from 0x20
+ * (i.e. RGB(linux_logo_red[0],
+ * linux_logo_green[0],
+ * linux_logo_blue[0]) is color 0x20)
+ * BW image has to be 80x80 as well, with MS bit
+ * on the left
+ * Serial_console ascii image can be any size,
+ * but should contain %s to display the version
+ */
+
+#include <linux/init.h>
+#include <linux/version.h>
+
+#define linux_logo_banner "Linux/AXP version " UTS_RELEASE
+
+#define LINUX_LOGO_COLORS 221
+
+#ifdef INCLUDE_LINUX_LOGO_DATA
+
+#define INCLUDE_LINUX_LOGO16
+
+#include <linux/linux_logo.h>
+
+#else
+
+/* prototypes only */
+extern unsigned char linux_logo_red[];
+extern unsigned char linux_logo_green[];
+extern unsigned char linux_logo_blue[];
+extern unsigned char linux_logo[];
+extern unsigned char linux_logo_bw[];
+extern unsigned char linux_logo16_red[];
+extern unsigned char linux_logo16_green[];
+extern unsigned char linux_logo16_blue[];
+extern unsigned char linux_logo16[];
+
+#endif
diff --git a/include/asm-alpha/machvec.h b/include/asm-alpha/machvec.h
new file mode 100644
index 000000000..e3d247a9a
--- /dev/null
+++ b/include/asm-alpha/machvec.h
@@ -0,0 +1,118 @@
+#ifndef __ALPHA_MACHVEC_H
+#define __ALPHA_MACHVEC_H 1
+
+#include <linux/config.h>
+#include <linux/types.h>
+
+
+/* The following structure vectors all of the I/O and IRQ manipulation
+ from the generic kernel to the hardware specific backend. */
+
+struct task_struct;
+struct mm_struct;
+struct pt_regs;
+struct vm_area_struct;
+
+struct alpha_machine_vector
+{
+ /* This "belongs" down below with the rest of the runtime
+ variables, but it is convenient for entry.S if these
+ two slots are at the beginning of the struct. */
+ unsigned long hae_cache;
+ unsigned long *hae_register;
+
+ unsigned long max_dma_address;
+ unsigned int nr_irqs;
+ unsigned int rtc_port, rtc_addr;
+ unsigned int max_asn;
+ unsigned long mmu_context_mask;
+ unsigned long irq_probe_mask;
+ unsigned long iack_sc;
+
+ unsigned long (*mv_virt_to_bus)(void *);
+ void * (*mv_bus_to_virt)(unsigned long);
+
+ unsigned int (*mv_inb)(unsigned long);
+ unsigned int (*mv_inw)(unsigned long);
+ unsigned int (*mv_inl)(unsigned long);
+
+ void (*mv_outb)(unsigned char, unsigned long);
+ void (*mv_outw)(unsigned short, unsigned long);
+ void (*mv_outl)(unsigned int, unsigned long);
+
+ unsigned long (*mv_readb)(unsigned long);
+ unsigned long (*mv_readw)(unsigned long);
+ unsigned long (*mv_readl)(unsigned long);
+ unsigned long (*mv_readq)(unsigned long);
+
+ void (*mv_writeb)(unsigned char, unsigned long);
+ void (*mv_writew)(unsigned short, unsigned long);
+ void (*mv_writel)(unsigned int, unsigned long);
+ void (*mv_writeq)(unsigned long, unsigned long);
+
+ unsigned long (*mv_dense_mem)(unsigned long);
+
+ int (*pci_read_config_byte)(u8, u8, u8, u8 *value);
+ int (*pci_read_config_word)(u8, u8, u8, u16 *value);
+ int (*pci_read_config_dword)(u8, u8, u8, u32 *value);
+
+ int (*pci_write_config_byte)(u8, u8, u8, u8 value);
+ int (*pci_write_config_word)(u8, u8, u8, u16 value);
+ int (*pci_write_config_dword)(u8, u8, u8, u32 value);
+
+ void (*mv_get_mmu_context)(struct task_struct *);
+ void (*mv_flush_tlb_current)(struct mm_struct *);
+ void (*mv_flush_tlb_other)(struct mm_struct *);
+ void (*mv_flush_tlb_current_page)(struct mm_struct * mm,
+ struct vm_area_struct *vma,
+ unsigned long addr);
+
+ void (*update_irq_hw)(unsigned long, unsigned long, int);
+ void (*ack_irq)(unsigned long);
+ void (*device_interrupt)(unsigned long vector, struct pt_regs *regs);
+ void (*machine_check)(u64 vector, u64 la, struct pt_regs *regs);
+
+ void (*init_arch)(unsigned long *, unsigned long *);
+ void (*init_irq)(void);
+ void (*init_pit)(void);
+ void (*pci_fixup)(void);
+ void (*kill_arch)(int, char *);
+
+ const char *vector_name;
+
+ /* System specific parameters. */
+ union {
+ struct {
+ unsigned long gru_int_req_bits;
+ } cia;
+
+ struct {
+ unsigned long gamma_bias;
+ } t2;
+ } sys;
+
+ /* Runtime variables it is handy to keep close. */
+ unsigned long dma_win_base;
+ unsigned long dma_win_size;
+ unsigned long sm_base_r1, sm_base_r2, sm_base_r3;
+};
+
+extern struct alpha_machine_vector alpha_mv;
+
+#ifdef CONFIG_ALPHA_GENERIC
+extern int alpha_using_srm;
+extern int alpha_use_srm_setup;
+#else
+#ifdef CONFIG_ALPHA_SRM
+#define alpha_using_srm 1
+#else
+#define alpha_using_srm 0
+#endif
+#if defined(CONFIG_ALPHA_SRM_SETUP)
+#define alpha_use_srm_setup 1
+#else
+#define alpha_use_srm_setup 0
+#endif
+#endif /* GENERIC */
+
+#endif /* __ALPHA_MACHVEC_H */
diff --git a/include/asm-alpha/md.h b/include/asm-alpha/md.h
new file mode 100644
index 000000000..6c9b8222a
--- /dev/null
+++ b/include/asm-alpha/md.h
@@ -0,0 +1,13 @@
+/* $Id: md.h,v 1.1 1997/12/15 15:11:48 jj Exp $
+ * md.h: High speed xor_block operation for RAID4/5
+ *
+ */
+
+#ifndef __ASM_MD_H
+#define __ASM_MD_H
+
+/* #define HAVE_ARCH_XORBLOCK */
+
+#define MD_XORBLOCK_ALIGNMENT sizeof(long)
+
+#endif /* __ASM_MD_H */
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index 0d4ab6b21..af97b6740 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -9,6 +9,7 @@
#include <linux/config.h>
#include <asm/system.h>
+#include <asm/machvec.h>
/*
* The maximum ASN's the processor supports. On the EV4 this is 63
@@ -32,11 +33,17 @@
* work correctly and can thus not be used (explaining the lack of PAL-code
* support).
*/
-#ifdef CONFIG_ALPHA_EV5
-#define MAX_ASN 127
+#define EV4_MAX_ASN 63
+#define EV5_MAX_ASN 127
+
+#ifdef CONFIG_ALPHA_GENERIC
+# define MAX_ASN (alpha_mv.max_asn)
#else
-#define MAX_ASN 63
-#define BROKEN_ASN 1
+# ifdef CONFIG_ALPHA_EV4
+# define MAX_ASN EV4_MAX_ASN
+# else
+# define MAX_ASN EV5_MAX_ASN
+# endif
#endif
#ifdef __SMP__
@@ -78,26 +85,21 @@ extern unsigned long asn_cache;
* force a new asn for any other processes the next time they want to
* run.
*/
-extern inline void
-get_new_mmu_context(struct task_struct *p, struct mm_struct *mm)
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __MMU_EXTERN_INLINE
+#endif
+
+extern void get_new_mmu_context(struct task_struct *p, struct mm_struct *mm);
+
+__EXTERN_INLINE void ev4_get_mmu_context(struct task_struct *p)
{
- unsigned long asn = asn_cache;
-
- if ((asn & HARDWARE_ASN_MASK) < MAX_ASN)
- ++asn;
- else {
- tbiap();
- imb();
- asn = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
- }
- asn_cache = asn;
- mm->context = asn; /* full version + asn */
- p->tss.asn = asn & HARDWARE_ASN_MASK; /* just asn */
+ /* As described, ASN's are broken. */
}
-extern inline void get_mmu_context(struct task_struct *p)
+__EXTERN_INLINE void ev5_get_mmu_context(struct task_struct *p)
{
-#ifndef BROKEN_ASN
struct mm_struct * mm = p->mm;
if (mm) {
@@ -106,27 +108,81 @@ extern inline void get_mmu_context(struct task_struct *p)
if ((mm->context ^ asn) & ~HARDWARE_ASN_MASK)
get_new_mmu_context(p, mm);
}
-#endif
}
+#ifdef CONFIG_ALPHA_GENERIC
+# define get_mmu_context (alpha_mv.mv_get_mmu_context)
+#else
+# ifdef CONFIG_ALPHA_EV4
+# define get_mmu_context ev4_get_mmu_context
+# else
+# define get_mmu_context ev5_get_mmu_context
+# endif
+#endif
+
extern inline void init_new_context(struct mm_struct *mm)
{
mm->context = 0;
}
-#define destroy_context(mm) do { } while(0)
+extern inline void destroy_context(struct mm_struct *mm)
+{
+ /* Nothing to do. */
+}
+
/*
- * After we have set current->mm to a new value, this activates
- * the context for the new mm so we see the new mappings.
- * Ideally this would be an extern inline function, but reload_context
- * is declared in pgtable.h, which includes this file. :-(
+ * Force a context reload. This is needed when we change the page
+ * table pointer or when we update the ASN of the current process.
*/
-#define activate_context(tsk) \
- do { \
- get_mmu_context(tsk); \
- reload_context(tsk); \
- } while (0)
+#if defined(CONFIG_ALPHA_GENERIC)
+#define MASK_CONTEXT(tss) \
+ ((struct thread_struct *)((unsigned long)(tss) & alpha_mv.mmu_context_mask))
+#elif defined(CONFIG_ALPHA_DP264)
+#define MASK_CONTEXT(tss) \
+ ((struct thread_struct *)((unsigned long)(tss) & 0xfffffffffful))
+#else
+#define MASK_CONTEXT(tss) (tss)
+#endif
+
+__EXTERN_INLINE struct thread_struct *
+__reload_tss(struct thread_struct *tss)
+{
+ register struct thread_struct *a0 __asm__("$16");
+ register struct thread_struct *v0 __asm__("$0");
+
+ a0 = MASK_CONTEXT(tss);
+
+ __asm__ __volatile__(
+ "call_pal %2" : "=r"(v0), "=r"(a0)
+ : "i"(PAL_swpctx), "r"(a0)
+ : "$1", "$16", "$22", "$23", "$24", "$25");
+
+ return v0;
+}
+
+__EXTERN_INLINE void
+reload_context(struct task_struct *task)
+{
+ __reload_tss(&task->tss);
+}
+
+/*
+ * After we have set current->mm to a new value, this activates the
+ * context for the new mm so we see the new mappings.
+ */
+
+__EXTERN_INLINE void
+activate_context(struct task_struct *task)
+{
+ get_mmu_context(task);
+ reload_context(task);
+}
+
+#ifdef __MMU_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __MMU_EXTERN_INLINE
#endif
+#endif /* __ALPHA_MMU_CONTEXT_H */
diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h
index e2b61494a..aa2a67ae3 100644
--- a/include/asm-alpha/page.h
+++ b/include/asm-alpha/page.h
@@ -19,54 +19,51 @@
*/
static inline void clear_page(unsigned long page)
{
- unsigned long count;
- __asm__ __volatile__(
- ".align 4\n"
- "1:\n\t"
- "stq $31,0(%1)\n\t"
- "stq $31,8(%1)\n\t"
- "stq $31,16(%1)\n\t"
- "stq $31,24(%1)\n\t"
- "subq %0,1,%0\n\t"
- "stq $31,32(%1)\n\t"
- "stq $31,40(%1)\n\t"
- "stq $31,48(%1)\n\t"
- "stq $31,56(%1)\n\t"
- "addq $1,64,$1\n\t"
- "bne %0,1b"
- :"=r" (count),"=r" (page)
- :"0" (PAGE_SIZE/64), "1" (page));
+ unsigned long count = PAGE_SIZE/64;
+ unsigned long *ptr = (unsigned long *)page;
+
+ do {
+ ptr[0] = 0;
+ ptr[1] = 0;
+ ptr[2] = 0;
+ ptr[3] = 0;
+ count--;
+ ptr[4] = 0;
+ ptr[5] = 0;
+ ptr[6] = 0;
+ ptr[7] = 0;
+ ptr += 8;
+ } while (count);
}
-static inline void copy_page(unsigned long to, unsigned long from)
+static inline void copy_page(unsigned long _to, unsigned long _from)
{
- unsigned long count;
- __asm__ __volatile__(
- ".align 4\n"
- "1:\n\t"
- "ldq $0,0(%1)\n\t"
- "ldq $1,8(%1)\n\t"
- "ldq $2,16(%1)\n\t"
- "ldq $3,24(%1)\n\t"
- "ldq $4,32(%1)\n\t"
- "ldq $5,40(%1)\n\t"
- "ldq $6,48(%1)\n\t"
- "ldq $7,56(%1)\n\t"
- "subq %0,1,%0\n\t"
- "addq %1,64,%1\n\t"
- "stq $0,0(%2)\n\t"
- "stq $1,8(%2)\n\t"
- "stq $2,16(%2)\n\t"
- "stq $3,24(%2)\n\t"
- "stq $4,32(%2)\n\t"
- "stq $5,40(%2)\n\t"
- "stq $6,48(%2)\n\t"
- "stq $7,56(%2)\n\t"
- "addq %2,64,%2\n\t"
- "bne %0,1b"
- :"=r" (count), "=r" (from), "=r" (to)
- :"0" (PAGE_SIZE/64), "1" (from), "2" (to)
- :"$0","$1","$2","$3","$4","$5","$6","$7");
+ unsigned long count = PAGE_SIZE/64;
+ unsigned long *to = (unsigned long *)_to;
+ unsigned long *from = (unsigned long *)_from;
+
+ do {
+ unsigned long a,b,c,d,e,f,g,h;
+ a = from[0];
+ b = from[1];
+ c = from[2];
+ d = from[3];
+ e = from[4];
+ f = from[5];
+ g = from[6];
+ h = from[7];
+ count--;
+ from += 8;
+ to[0] = a;
+ to[1] = b;
+ to[2] = c;
+ to[3] = d;
+ to[4] = e;
+ to[5] = f;
+ to[6] = g;
+ to[7] = h;
+ to += 8;
+ } while (count);
}
#ifdef STRICT_MM_TYPECHECKS
diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h
new file mode 100644
index 000000000..2dd28c3cc
--- /dev/null
+++ b/include/asm-alpha/pci.h
@@ -0,0 +1,48 @@
+#ifndef __ALPHA_PCI_H
+#define __ALPHA_PCI_H
+
+#include <linux/config.h>
+#include <linux/pci.h>
+
+
+/*
+ * The following structure is used to manage multiple PCI busses.
+ *
+ * XXX: We should solve thos problem in an architecture independant
+ * way, rather than hacking something up here.
+ */
+
+struct linux_hose_info {
+ struct pci_bus pci_bus;
+ struct linux_hose_info *next;
+ unsigned long pci_io_space;
+ unsigned long pci_mem_space;
+ unsigned long pci_config_space;
+ unsigned long pci_sparse_space;
+ unsigned int pci_first_busno;
+ unsigned int pci_last_busno;
+ unsigned int pci_hose_index;
+};
+
+/* This is indexed by a pseudo- PCI bus number to obtain the real deal. */
+extern struct linux_hose_info *bus2hose[256];
+
+/* Create a handle that is OR-ed into the reported I/O space address
+ for a device. We use this later to find the bus a device lives on. */
+
+#if defined(CONFIG_ALPHA_GENERIC) \
+ || defined(CONFIG_ALPHA_MCPCIA) \
+ /* || defined(CONFIG_ALPHA_TSUNAMI) */
+
+#define PCI_HANDLE(bus) ((bus2hose[bus]->pci_hose_index & 3UL) << 32)
+#define DEV_IS_ON_PRIMARY(dev) \
+ (bus2hose[(dev)->bus->number]->pci_first_busno == (dev)->bus->number)
+
+#else
+
+#define PCI_HANDLE(bus) 0
+#define DEV_IS_ON_PRIMARY(dev) ((dev)->bus->number == 0)
+
+#endif /* Multiple busses */
+
+#endif /* __ALPHA_PCI_H */
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index 3467e6103..3cbbaa17a 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -3,17 +3,20 @@
/*
* This file contains the functions and defines necessary to modify and use
- * the alpha page table tree.
+ * the Alpha page table tree.
*
- * This hopefully works with any standard alpha page-size, as defined
+ * This hopefully works with any standard Alpha page-size, as defined
* in <asm/page.h> (currently 8192).
*/
#include <linux/config.h>
#include <asm/system.h>
+#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu_context.h>
+#include <asm/machvec.h>
-/* Caches aren't brain-dead on the alpha. */
+
+/* Caches aren't brain-dead on the Alpha. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(mm, start, end) do { } while (0)
@@ -22,60 +25,87 @@
#define flush_icache_range(start, end) do { } while (0)
/*
- * Force a context reload. This is needed when we
- * change the page table pointer or when we update
- * the ASN of the current process.
- */
-static inline void reload_context(struct task_struct *task)
-{
- __asm__ __volatile__(
- "bis %0,%0,$16\n\t"
-#ifdef CONFIG_ALPHA_DP264
- "zap $16,0xe0,$16\n\t"
-#endif /* DP264 */
- "call_pal %1"
- : /* no outputs */
- : "r" (&task->tss), "i" (PAL_swpctx)
- : "$0", "$1", "$16", "$22", "$23", "$24", "$25");
-}
-
-/*
* Use a few helper functions to hide the ugly broken ASN
- * numbers on early alpha's (ev4 and ev45)
+ * numbers on early Alphas (ev4 and ev45)
*/
-#ifdef BROKEN_ASN
-#define flush_tlb_current(x) tbiap()
-#define flush_tlb_other(x) do { } while (0)
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __MMU_EXTERN_INLINE
+#endif
-#else
+__EXTERN_INLINE void
+ev4_flush_tlb_current(struct mm_struct *mm)
+{
+ tbiap();
+}
-extern void get_new_asn_and_reload(struct task_struct *, struct mm_struct *);
+__EXTERN_INLINE void
+ev4_flush_tlb_other(struct mm_struct *mm)
+{
+}
-#define flush_tlb_current(mm) get_new_asn_and_reload(current, mm)
-#define flush_tlb_other(mm) do { (mm)->context = 0; } while (0)
+__EXTERN_INLINE void
+ev5_flush_tlb_current(struct mm_struct *mm)
+{
+ mm->context = 0;
+ get_new_mmu_context(current, mm);
+ reload_context(current);
+}
-#endif
+__EXTERN_INLINE void
+ev5_flush_tlb_other(struct mm_struct *mm)
+{
+ mm->context = 0;
+}
/*
* Flush just one page in the current TLB set.
* We need to be very careful about the icache here, there
* is no way to invalidate a specific icache page..
*/
-static inline void flush_tlb_current_page(struct mm_struct * mm,
- struct vm_area_struct *vma,
- unsigned long addr)
+
+__EXTERN_INLINE void
+ev4_flush_tlb_current_page(struct mm_struct * mm,
+ struct vm_area_struct *vma,
+ unsigned long addr)
{
-#ifdef BROKEN_ASN
tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
-#else
+}
+
+__EXTERN_INLINE void
+ev5_flush_tlb_current_page(struct mm_struct * mm,
+ struct vm_area_struct *vma,
+ unsigned long addr)
+{
if (vma->vm_flags & VM_EXEC)
- flush_tlb_current(mm);
+ ev5_flush_tlb_current(mm);
else
tbi(2, addr);
-#endif
}
+
+#ifdef CONFIG_ALPHA_GENERIC
+# define flush_tlb_current alpha_mv.mv_flush_tlb_current
+# define flush_tlb_other alpha_mv.mv_flush_tlb_other
+# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
+#else
+# ifdef CONFIG_ALPHA_EV4
+# define flush_tlb_current ev4_flush_tlb_current
+# define flush_tlb_other ev4_flush_tlb_other
+# define flush_tlb_current_page ev4_flush_tlb_current_page
+# else
+# define flush_tlb_current ev5_flush_tlb_current
+# define flush_tlb_other ev5_flush_tlb_other
+# define flush_tlb_current_page ev5_flush_tlb_current_page
+# endif
+#endif
+
+#ifdef __MMU_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __MMU_EXTERN_INLINE
+#endif
+
/*
* Flush current user mapping.
*/
@@ -125,8 +155,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
}
/*
- * Flush a specified range of user mapping: on the
- * alpha we flush the whole user tlb
+ * Flush a specified range of user mapping: on the
+ * Alpha we flush the whole user tlb.
*/
static inline void flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
@@ -156,7 +186,7 @@ extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long);
#endif /* __SMP__ */
-/* Certain architectures need to do special things when pte's
+/* Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
@@ -173,20 +203,22 @@ extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long);
#define PGDIR_MASK (~(PGDIR_SIZE-1))
/*
- * entries per page directory level: the alpha is three-level, with
+ * Entries per page directory level: the Alpha is three-level, with
* all levels having a one-page page table.
*
- * The PGD is special: the last entry is reserved for self-mapping.
+ * The PGD is special: the last entry is reserved for self-mapping.
*/
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PGD ((1UL << (PAGE_SHIFT-3))-1)
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-/* the no. of pointers that fit on a page: this will go away */
+/* Number of pointers that fit on a page: this will go away. */
#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
#define VMALLOC_START 0xFFFFFE0000000000
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+#define VMALLOC_END (~0UL)
/*
* OSF/1 PAL-code-imposed page table bits
@@ -206,7 +238,7 @@ extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long);
#define _PAGE_ACCESSED 0x40000
/*
- * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly
+ * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly
* by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
* Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
* the KRE/URE bits to watch for it. That way we don't need to overload the
@@ -240,7 +272,7 @@ extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long);
#define _PAGE_S(x) _PAGE_NORMAL(x)
/*
- * The hardware can handle write-only mappings, but as the alpha
+ * The hardware can handle write-only mappings, but as the Alpha
* architecture does byte-wide writes with a read-modify-write
* sequence, it's not practical to have write-without-read privs.
* Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
@@ -269,7 +301,7 @@ extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long);
* BAD_PAGETABLE is used when we need a bogus page-table, while
* BAD_PAGE is used for a bogus page.
*
- * ZERO_PAGE is a global shared page that is always zero: used
+ * ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern pte_t __bad_page(void);
@@ -295,7 +327,7 @@ extern unsigned long __zero_page(void);
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
/*
- * Conversion functions: convert a page and protection to a page entry,
+ * Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
@@ -401,132 +433,182 @@ extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
-extern inline void pte_free_kernel(pte_t * pte)
+#ifndef __SMP__
+extern struct pgtable_cache_struct {
+ unsigned long *pgd_cache;
+ unsigned long *pte_cache;
+ unsigned long pgtable_cache_sz;
+} quicklists;
+#else
+#include <asm/smp.h>
+#define quicklists cpu_data[smp_processor_id()]
+#endif
+#define pgd_quicklist (quicklists.pgd_cache)
+#define pmd_quicklist ((unsigned long *)0)
+#define pte_quicklist (quicklists.pte_cache)
+#define pgtable_cache_size (quicklists.pgtable_cache_sz)
+
+extern __inline__ pgd_t *get_pgd_slow(void)
{
- free_page((unsigned long) pte);
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;
+
+ if (ret) {
+ init = pgd_offset(&init_mm, 0);
+ memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return ret;
}
-extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
+extern __inline__ pgd_t *get_pgd_fast(void)
{
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
- if (pmd_none(*pmd)) {
- pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
- if (pmd_none(*pmd)) {
- if (page) {
- pmd_set(pmd, page);
- return page + address;
- }
- pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
- return NULL;
- }
- free_page((unsigned long) page);
- }
- if (pmd_bad(*pmd)) {
- printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
- pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + address;
+ unsigned long *ret;
+
+ if((ret = pgd_quicklist) != NULL) {
+ pgd_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ } else
+ ret = (unsigned long *)get_pgd_slow();
+ return (pgd_t *)ret;
}
-extern inline void pmd_free_kernel(pmd_t * pmd)
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
- free_page((unsigned long) pmd);
+ *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ pgtable_cache_size++;
}
-extern inline pmd_t * pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
+extern __inline__ void free_pgd_slow(pgd_t *pgd)
{
- address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
- if (pgd_none(*pgd)) {
- pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
- if (pgd_none(*pgd)) {
- if (page) {
- pgd_set(pgd, page);
- return page + address;
- }
- pgd_set(pgd, BAD_PAGETABLE);
- return NULL;
- }
- free_page((unsigned long) page);
+ free_page((unsigned long)pgd);
+}
+
+extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long address_premasked);
+
+extern __inline__ pmd_t *get_pmd_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
}
- if (pgd_bad(*pgd)) {
- printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
- pgd_set(pgd, BAD_PAGETABLE);
- return NULL;
+ return (pmd_t *)ret;
+}
+
+extern __inline__ void free_pmd_fast(pmd_t *pmd)
+{
+ *(unsigned long *)pmd = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pmd;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pmd_slow(pmd_t *pmd)
+{
+ free_page((unsigned long)pmd);
+}
+
+extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
+
+extern __inline__ pte_t *get_pte_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
}
- return (pmd_t *) pgd_page(*pgd) + address;
+ return (pte_t *)ret;
}
-extern inline void pte_free(pte_t * pte)
+extern __inline__ void free_pte_fast(pte_t *pte)
{
- free_page((unsigned long) pte);
+ *(unsigned long *)pte = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pte;
+ pgtable_cache_size++;
}
+extern __inline__ void free_pte_slow(pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+extern void __bad_pte(pmd_t *pmd);
+extern void __bad_pmd(pgd_t *pgd);
+
+#define pte_free_kernel(pte) free_pte_fast(pte)
+#define pte_free(pte) free_pte_fast(pte)
+#define pmd_free_kernel(pmd) free_pmd_fast(pmd)
+#define pmd_free(pmd) free_pmd_fast(pmd)
+#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_alloc() get_pgd_fast()
+
extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
if (pmd_none(*pmd)) {
- pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
- if (pmd_none(*pmd)) {
- if (page) {
- pmd_set(pmd, page);
- return page + address;
- }
- pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
- return NULL;
- }
- free_page((unsigned long) page);
+ pte_t *page = get_pte_fast();
+
+ if (!page)
+ return get_pte_slow(pmd, address);
+ pmd_set(pmd, page);
+ return page + address;
}
if (pmd_bad(*pmd)) {
- printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
- pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
+ __bad_pte(pmd);
return NULL;
}
return (pte_t *) pmd_page(*pmd) + address;
}
-extern inline void pmd_free(pmd_t * pmd)
-{
- free_page((unsigned long) pmd);
-}
-
extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
{
address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
if (pgd_none(*pgd)) {
- pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
- if (pgd_none(*pgd)) {
- if (page) {
- pgd_set(pgd, page);
- return page + address;
- }
- pgd_set(pgd, BAD_PAGETABLE);
- return NULL;
- }
- free_page((unsigned long) page);
+ pmd_t *page = get_pmd_fast();
+
+ if (!page)
+ return get_pmd_slow(pgd, address);
+ pgd_set(pgd, page);
+ return page + address;
}
if (pgd_bad(*pgd)) {
- printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
- pgd_set(pgd, BAD_PAGETABLE);
+ __bad_pmd(pgd);
return NULL;
}
return (pmd_t *) pgd_page(*pgd) + address;
}
-extern inline void pgd_free(pgd_t * pgd)
-{
- free_page((unsigned long) pgd);
-}
+#define pte_alloc_kernel pte_alloc
+#define pmd_alloc_kernel pmd_alloc
+
+extern int do_check_pgt_cache(int, int);
-extern inline pgd_t * pgd_alloc(void)
+extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
- return (pgd_t *) get_free_page(GFP_KERNEL);
+ struct task_struct * p;
+ pgd_t *pgd;
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (!p->mm)
+ continue;
+ *pgd_offset(p->mm,address) = entry;
+ }
+ read_unlock(&tasklist_lock);
+ for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+ pgd[(address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)] = entry;
}
extern pgd_t swapper_pg_dir[1024];
/*
- * The alpha doesn't have any external MMU info: the kernel page
+ * The Alpha doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
*/
extern inline void update_mmu_cache(struct vm_area_struct * vma,
@@ -535,8 +617,8 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma,
}
/*
- * Non-present pages: high 24 bits are offset, next 8 bits type,
- * low 32 bits zero..
+ * Non-present pages: high 24 bits are offset, next 8 bits type,
+ * low 32 bits zero.
*/
extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
@@ -548,4 +630,7 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define module_map vmalloc
#define module_unmap vfree
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+#define PageSkip(page) (0)
+
#endif /* _ALPHA_PGTABLE_H */
diff --git a/include/asm-alpha/posix_types.h b/include/asm-alpha/posix_types.h
index b98290660..3cbf6d573 100644
--- a/include/asm-alpha/posix_types.h
+++ b/include/asm-alpha/posix_types.h
@@ -34,6 +34,8 @@ typedef struct {
int val[2];
} __kernel_fsid_t;
+#ifdef __KERNEL__
+
#ifndef __GNUC__
#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
@@ -109,4 +111,6 @@ static __inline__ void __FD_ZERO(__kernel_fd_set *p)
#endif /* __GNUC__ */
+#endif /* __KERNEL__ */
+
#endif /* _ALPHA_POSIX_TYPES_H */
diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h
index c36876ca8..ee6613b6a 100644
--- a/include/asm-alpha/processor.h
+++ b/include/asm-alpha/processor.h
@@ -45,14 +45,21 @@ struct thread_struct {
unsigned long pal_flags;
unsigned long res1, res2;
- /* the fields below are Linux-specific: */
- /* bit 1..5: IEEE_TRAP_ENABLE bits (see fpu.h) */
- /* bit 6..8: UAC bits (see sysinfo.h) */
- /* bit 17..21: IEEE_STATUS_MASK bits (see fpu.h) */
- /* bit 63: die_if_kernel recursion lock */
+ /*
+ * The fields below are Linux-specific:
+ *
+ * bit 1..5: IEEE_TRAP_ENABLE bits (see fpu.h)
+ * bit 6..8: UAC bits (see sysinfo.h)
+ * bit 17..21: IEEE_STATUS_MASK bits (see fpu.h)
+ * bit 63: die_if_kernel recursion lock
+ */
unsigned long flags;
- /* perform syscall argument validation (get/set_fs) */
+
+ /* Perform syscall argument validation (get/set_fs). */
mm_segment_t fs;
+
+ /* Breakpoint handling for ptrace. */
+ long debugreg[8];
};
#define INIT_MMAP { &init_mm, 0xfffffc0000000000, 0xfffffc0010000000, \
@@ -75,13 +82,23 @@ struct thread_struct {
* holds provided the thread blocked through a call to schedule() ($15
* is the frame pointer in schedule() and $15 is saved at offset 48 by
* entry.S:do_switch_stack).
+ *
+ * Under heavy swap load I've seen this loose in an ugly way. So do
+ * some extra sanity checking on the ranges we expect these pointers
+ * to be in so that we can fail gracefully. This is just for ps after
+ * all. -- r~
*/
extern inline unsigned long thread_saved_pc(struct thread_struct *t)
{
- unsigned long fp;
-
- fp = ((unsigned long*)t->ksp)[6];
- return *(unsigned long*)fp;
+ unsigned long fp, sp = t->ksp, base = (unsigned long)t;
+
+ if (sp > base && sp+6*8 < base + 16*1024) {
+ fp = ((unsigned long*)sp)[6];
+ if (fp > sp && fp < base + 16*1024)
+ return *(unsigned long *)fp;
+ }
+
+ return 0;
}
/*
diff --git a/include/asm-alpha/serial.h b/include/asm-alpha/serial.h
index 565b59dd0..7b2d9ee95 100644
--- a/include/asm-alpha/serial.h
+++ b/include/asm-alpha/serial.h
@@ -37,7 +37,7 @@
#ifdef CONFIG_SERIAL_MANY_PORTS
-#define EXTRA_SERIAL_PORT_DEFNS \
+#define EXTRA_SERIAL_PORT_DEFNS \
{ 0, BASE_BAUD, 0x1A0, 9, FOURPORT_FLAGS }, /* ttyS4 */ \
{ 0, BASE_BAUD, 0x1A8, 9, FOURPORT_FLAGS }, /* ttyS5 */ \
{ 0, BASE_BAUD, 0x1B0, 9, FOURPORT_FLAGS }, /* ttyS6 */ \
diff --git a/include/asm-alpha/siginfo.h b/include/asm-alpha/siginfo.h
index b32ceb1f9..c2304b041 100644
--- a/include/asm-alpha/siginfo.h
+++ b/include/asm-alpha/siginfo.h
@@ -43,6 +43,7 @@ typedef struct siginfo {
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
+ uid_t _uid; /* sender's uid */
int _status; /* exit code */
clock_t _utime;
clock_t _stime;
diff --git a/include/asm-alpha/signal.h b/include/asm-alpha/signal.h
index 80e27df1a..501f35ea5 100644
--- a/include/asm-alpha/signal.h
+++ b/include/asm-alpha/signal.h
@@ -76,7 +76,7 @@ typedef unsigned long sigset_t;
/*
* SA_FLAGS values:
*
- * SA_ONSTACK is not currently supported, but will allow sigaltstack(2).
+ * SA_ONSTACK indicates that a registered stack_t will be used.
* SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
* SA_RESTART flag to get restarting signals (which were the default long ago)
* SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
@@ -100,6 +100,16 @@ typedef unsigned long sigset_t;
#define SA_NOMASK SA_NODEFER
#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK 1
+#define SS_DISABLE 2
+
+#define MINSIGSTKSZ 4096
+#define SIGSTKSZ 16384
+
+
#ifdef __KERNEL__
/*
* These values of sa_flags are used only by the kernel as part of the
@@ -164,6 +174,15 @@ typedef struct sigaltstack {
size_t ss_size;
} stack_t;
+/* sigstack(2) is deprecated, and will be withdrawn in a future version
+ of the X/Open CAE Specification. Use sigaltstack instead. It is only
+ implemented here for OSF/1 compatibility. */
+
+struct sigstack {
+ void *ss_sp;
+ int ss_onstack;
+};
+
#ifdef __KERNEL__
#include <asm/sigcontext.h>
#endif
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index 811abc297..90bfec2e2 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -1,8 +1,6 @@
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
-#define cpu_logical_map(cpu) (cpu)
-
#ifdef __SMP__
#include <linux/tasks.h>
@@ -10,6 +8,9 @@
struct cpuinfo_alpha {
unsigned long loops_per_sec;
unsigned int next;
+ unsigned long *pgd_cache;
+ unsigned long *pte_cache;
+ unsigned long pgtable_cache_sz;
};
extern struct cpuinfo_alpha cpu_data[NR_CPUS];
@@ -44,7 +45,8 @@ extern __volatile__ int cpu_number_map[NR_CPUS];
__r0; \
})
-#define smp_processor_id() hard_smp_processor_id()
+#define smp_processor_id() hard_smp_processor_id()
+#define cpu_logical_map(cpu) (cpu)
#endif /* __SMP__ */
diff --git a/include/asm-alpha/smp_lock.h b/include/asm-alpha/smp_lock.h
deleted file mode 100644
index bd04c6fb2..000000000
--- a/include/asm-alpha/smp_lock.h
+++ /dev/null
@@ -1,120 +0,0 @@
-#ifndef __ALPHA_SMPLOCK_H
-#define __ALPHA_SMPLOCK_H
-
-#ifndef __SMP__
-
-#define lock_kernel() do { } while(0)
-#define unlock_kernel() do { } while(0)
-#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
-#define reacquire_kernel_lock(task, cpu, depth) do { } while (0)
-
-#else
-
-#include <asm/system.h>
-#include <asm/current.h>
-#include <asm/bitops.h>
-#include <asm/hardirq.h>
-
-#define kernel_lock_held() \
- (klock_info.kernel_flag && (klock_info.akp == smp_processor_id()))
-
-/* Release global kernel lock and global interrupt lock */
-#define release_kernel_lock(task, cpu, depth) \
-do { \
- if ((depth = (task)->lock_depth) != 0) { \
- __cli(); \
- (task)->lock_depth = 0; \
- klock_info.akp = NO_PROC_ID; \
- klock_info.kernel_flag = 0; \
- mb(); \
- } \
- release_irqlock(cpu); \
- __sti(); \
-} while (0)
-
-#if 1
-#define DEBUG_KERNEL_LOCK
-#else
-#undef DEBUG_KERNEL_LOCK
-#endif
-
-#ifdef DEBUG_KERNEL_LOCK
-extern void ___lock_kernel(klock_info_t *klip, int cpu, long ipl);
-#else /* DEBUG_KERNEL_LOCK */
-static inline void ___lock_kernel(klock_info_t *klip, int cpu, long ipl)
-{
- long regx;
-
- __asm__ __volatile__(
- "1: ldl_l %1,%0;"
- " blbs %1,6f;"
- " or %1,1,%1;"
- " stl_c %1,%0;"
- " beq %1,6f;"
- "4: mb\n"
- ".section .text2,\"ax\"\n"
- "6: mov %4,$16;"
- " call_pal %3;"
- "7: ldl %1,%0;"
- " blbs %1,7b;"
- " bis $31,7,$16;"
- " call_pal %3;"
- " br 1b\n"
- ".previous"
- : "=m,=m" (__dummy_lock(klip)), "=&r,=&r" (regx)
- : "0,0" (__dummy_lock(klip)), "i,i" (PAL_swpipl), "i,r" (ipl)
- : "$0", "$1", "$16", "$22", "$23", "$24", "$25", "memory"
- );
-}
-#endif /* DEBUG_KERNEL_LOCK */
-
-#define reacquire_kernel_lock(task, cpu, depth) \
-do { \
- if (depth) { \
- long ipl; \
- klock_info_t *klip = &klock_info; \
- __save_and_cli(ipl); \
- ___lock_kernel(klip, cpu, ipl); \
- klip->akp = cpu; \
- (task)->lock_depth = depth; \
- __restore_flags(ipl); \
- } \
-} while (0)
-
-/* The following acquire and release the master kernel global lock,
- * the idea is that the usage of this mechanmism becomes less and less
- * as time goes on, to the point where they are no longer needed at all
- * and can thus disappear.
- */
-
-#define lock_kernel() \
-if (current->lock_depth > 0) { \
- ++current->lock_depth; \
-} else { \
- long ipl; \
- int cpu = smp_processor_id(); \
- klock_info_t *klip = &klock_info; \
- __save_and_cli(ipl); \
- ___lock_kernel(klip, cpu, ipl); \
- klip->akp = cpu; \
- current->lock_depth = 1; \
- __restore_flags(ipl); \
-}
-
-/* Release kernel global lock. */
-#define unlock_kernel() \
-if (current->lock_depth > 1) { \
- --current->lock_depth; \
-} else { \
- long ipl; \
- __save_and_cli(ipl); \
- klock_info.akp = NO_PROC_ID; \
- klock_info.kernel_flag = KLOCK_CLEAR; \
- mb(); \
- current->lock_depth = 0; \
- __restore_flags(ipl); \
-}
-
-#endif /* __SMP__ */
-
-#endif /* __ALPHA_SMPLOCK_H */
diff --git a/include/asm-alpha/smplock.h b/include/asm-alpha/smplock.h
new file mode 100644
index 000000000..e62326a10
--- /dev/null
+++ b/include/asm-alpha/smplock.h
@@ -0,0 +1,49 @@
+/*
+ * <asm/smplock.h>
+ *
+ * Default SMP lock implementation
+ */
+#include <linux/interrupt.h>
+#include <asm/spinlock.h>
+
+extern spinlock_t kernel_flag;
+
+/*
+ * Release global kernel lock and global interrupt lock
+ */
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+} while (0)
+
+/*
+ * Re-acquire the kernel lock
+ */
+#define reacquire_kernel_lock(task) \
+do { \
+ if (task->lock_depth >= 0) \
+ spin_lock(&kernel_flag); \
+} while (0)
+
+
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously,
+ * so we only need to worry about other
+ * CPU's.
+ */
+extern __inline__ void lock_kernel(void)
+{
+ if (!++current->lock_depth)
+ spin_lock(&kernel_flag);
+}
+
+extern __inline__ void unlock_kernel(void)
+{
+ if (--current->lock_depth < 0)
+ spin_unlock(&kernel_flag);
+}
diff --git a/include/asm-alpha/string.h b/include/asm-alpha/string.h
index 4b83c8291..11495a0b6 100644
--- a/include/asm-alpha/string.h
+++ b/include/asm-alpha/string.h
@@ -4,10 +4,8 @@
#ifdef __KERNEL__
/*
- * GCC of any recent vintage doesn't do stupid things with bcopy. Of
- * EGCS-devel vintage, it knows all about expanding memcpy inline.
- * For things other than EGCS-devel but still recent, GCC will expand
- * __builtin_memcpy as a simple call to memcpy.
+ * GCC of any recent vintage doesn't do stupid things with bcopy.
+ * EGCS 1.1 knows all about expanding memcpy inline, others don't.
*
* Similarly for a memset with data = 0.
*/
@@ -16,15 +14,15 @@
/* For backward compatibility with modules. Unused otherwise. */
extern void * __memcpy(void *, const void *, size_t);
-#if __GNUC__ > 2 || __GNUC_MINOR__ >= 8
+#if __GNUC__ > 2 || __GNUC_MINOR__ >= 91
#define memcpy __builtin_memcpy
#endif
#define __HAVE_ARCH_MEMSET
-extern void * __constant_c_memset(void *, unsigned long, long);
-extern void * __memset(void *, char, size_t);
+extern void * __constant_c_memset(void *, unsigned long, size_t);
+extern void * __memset(void *, int, size_t);
-#if __GNUC__ > 2 || __GNUC_MINOR__ >= 8
+#if __GNUC__ > 2 || __GNUC_MINOR__ >= 91
#define memset(s, c, n) \
(__builtin_constant_p(c) \
? (__builtin_constant_p(n) && (c) == 0 \
@@ -46,6 +44,20 @@ extern void * __memset(void *, char, size_t);
#define __HAVE_ARCH_STRRCHR
#define __HAVE_ARCH_STRLEN
+/* The following routine is like memset except that it writes 16-bit
+ aligned values. The DEST and COUNT parameters must be even for
+ correct operation. */
+
+#define __HAVE_ARCH_MEMSETW
+extern void * __memsetw(void *dest, unsigned short, size_t count);
+
+#define memsetw(s, c, n) \
+(__builtin_constant_p(c) \
+ ? __constant_c_memset((s),0x0001000100010001UL*(unsigned short)(c),(n)) \
+ : __memsetw((s),(c),(n)))
+
+extern int strcasecmp(const char *, const char *);
+
#endif /* __KERNEL__ */
#endif /* __ALPHA_STRING_H__ */
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index 72b91317e..a4f18c347 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -100,7 +100,7 @@ extern void wripir(unsigned long);
#define switch_to(prev,next) do { \
current = next; \
- alpha_switch_to((unsigned long) &current->tss - 0xfffffc0000000000); \
+ alpha_switch_to((unsigned long) &current->tss - IDENT_ADDR); \
} while (0)
extern void alpha_switch_to(unsigned long pctxp);
@@ -108,6 +108,9 @@ extern void alpha_switch_to(unsigned long pctxp);
#define mb() \
__asm__ __volatile__("mb": : :"memory")
+#define wmb() \
+__asm__ __volatile__("wmb": : :"memory")
+
#define imb() \
__asm__ __volatile__ ("call_pal %0" : : "i" (PAL_imb) : "memory")
diff --git a/include/asm-alpha/timex.h b/include/asm-alpha/timex.h
new file mode 100644
index 000000000..c0bfb799c
--- /dev/null
+++ b/include/asm-alpha/timex.h
@@ -0,0 +1,11 @@
+/*
+ * linux/include/asm-alpha/timex.h
+ *
+ * ALPHA architecture timex specifications
+ */
+#ifndef _ASMALPHA_TIMEX_H
+#define _ASMALPHA_TIMEX_H
+
+#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+
+#endif
diff --git a/include/asm-alpha/uaccess.h b/include/asm-alpha/uaccess.h
index 5ab23c062..665acc313 100644
--- a/include/asm-alpha/uaccess.h
+++ b/include/asm-alpha/uaccess.h
@@ -161,7 +161,7 @@ struct __large_struct { unsigned long buf[100]; };
: "m"(__m(addr)), "1"(__gu_err))
#ifdef __HAVE_CPU_BWX
-/* Those lucky bastards with ev56 and later cpus can do byte/word moves. */
+/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
#define __get_user_16(addr) \
__asm__("1: ldwu %0,%2\n" \
@@ -275,7 +275,7 @@ __asm__ __volatile__("1: stl %r2,%1\n" \
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
#ifdef __HAVE_CPU_BWX
-/* Those lucky bastards with ev56 and later cpus can do byte/word moves. */
+/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
#define __put_user_16(x,addr) \
__asm__ __volatile__("1: stw %r2,%1\n" \
diff --git a/include/asm-alpha/unaligned.h b/include/asm-alpha/unaligned.h
index 35da3d5ae..8017f6bfa 100644
--- a/include/asm-alpha/unaligned.h
+++ b/include/asm-alpha/unaligned.h
@@ -17,11 +17,24 @@
extern void bad_unaligned_access_length(void);
/*
+ * EGCS 1.1 knows about arbitrary unaligned loads. Define some
+ * packed structures to talk about such things with.
+ */
+
+struct __una_u64 { __u64 x __attribute__((packed)); };
+struct __una_u32 { __u32 x __attribute__((packed)); };
+struct __una_u16 { __u16 x __attribute__((packed)); };
+
+/*
* Elemental unaligned loads
*/
extern inline unsigned long __uldq(const unsigned long * r11)
{
+#if __GNUC__ > 2 || __GNUC_MINOR__ >= 91
+ const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
+ return ptr->x;
+#else
unsigned long r1,r2;
__asm__("ldq_u %0,%3\n\t"
"ldq_u %1,%4\n\t"
@@ -32,10 +45,15 @@ extern inline unsigned long __uldq(const unsigned long * r11)
"m" (*r11),
"m" (*(const unsigned long *)(7+(char *) r11)));
return r1 | r2;
+#endif
}
extern inline unsigned long __uldl(const unsigned int * r11)
{
+#if __GNUC__ > 2 || __GNUC_MINOR__ >= 91
+ const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
+ return ptr->x;
+#else
unsigned long r1,r2;
__asm__("ldq_u %0,%3\n\t"
"ldq_u %1,%4\n\t"
@@ -46,10 +64,15 @@ extern inline unsigned long __uldl(const unsigned int * r11)
"m" (*r11),
"m" (*(const unsigned long *)(3+(char *) r11)));
return r1 | r2;
+#endif
}
extern inline unsigned long __uldw(const unsigned short * r11)
{
+#if __GNUC__ > 2 || __GNUC_MINOR__ >= 91
+ const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
+ return ptr->x;
+#else
unsigned long r1,r2;
__asm__("ldq_u %0,%3\n\t"
"ldq_u %1,%4\n\t"
@@ -60,6 +83,7 @@ extern inline unsigned long __uldw(const unsigned short * r11)
"m" (*r11),
"m" (*(const unsigned long *)(1+(char *) r11)));
return r1 | r2;
+#endif
}
/*
@@ -68,6 +92,10 @@ extern inline unsigned long __uldw(const unsigned short * r11)
extern inline void __ustq(unsigned long r5, unsigned long * r11)
{
+#if __GNUC__ > 2 || __GNUC_MINOR__ >= 91
+ struct __una_u64 *ptr = (struct __una_u64 *) r11;
+ ptr->x = r5;
+#else
unsigned long r1,r2,r3,r4;
__asm__("ldq_u %3,%1\n\t"
@@ -84,10 +112,15 @@ extern inline void __ustq(unsigned long r5, unsigned long * r11)
"=m" (*(unsigned long *)(7+(char *) r11)),
"=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
:"r" (r5), "r" (r11));
+#endif
}
extern inline void __ustl(unsigned long r5, unsigned int * r11)
{
+#if __GNUC__ > 2 || __GNUC_MINOR__ >= 91
+ struct __una_u32 *ptr = (struct __una_u32 *) r11;
+ ptr->x = r5;
+#else
unsigned long r1,r2,r3,r4;
__asm__("ldq_u %3,%1\n\t"
@@ -104,10 +137,15 @@ extern inline void __ustl(unsigned long r5, unsigned int * r11)
"=m" (*(unsigned long *)(3+(char *) r11)),
"=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
:"r" (r5), "r" (r11));
+#endif
}
extern inline void __ustw(unsigned long r5, unsigned short * r11)
{
+#if __GNUC__ > 2 || __GNUC_MINOR__ >= 91
+ struct __una_u16 *ptr = (struct __una_u16 *) r11;
+ ptr->x = r5;
+#else
unsigned long r1,r2,r3,r4;
__asm__("ldq_u %3,%1\n\t"
@@ -124,6 +162,7 @@ extern inline void __ustw(unsigned long r5, unsigned short * r11)
"=m" (*(unsigned long *)(1+(char *) r11)),
"=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
:"r" (r5), "r" (r11));
+#endif
}
extern inline unsigned long __get_unaligned(const void *ptr, size_t size)
diff --git a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h
index e5c3636f7..6fcf82aff 100644
--- a/include/asm-alpha/unistd.h
+++ b/include/asm-alpha/unistd.h
@@ -113,7 +113,7 @@
#define __NR_osf_old_sigblock 109 /* not implemented */
#define __NR_osf_old_sigsetmask 110 /* not implemented */
#define __NR_sigsuspend 111
-#define __NR_osf_sigstack 112 /* not implemented */
+#define __NR_osf_sigstack 112
#define __NR_recvmsg 113
#define __NR_sendmsg 114
#define __NR_osf_old_vtrace 115 /* not implemented */
@@ -205,7 +205,7 @@
#define __NR_getpgid 233
#define __NR_getsid 234
-#define __NR_osf_sigaltstack 235 /* not implemented */
+#define __NR_sigaltstack 235
#define __NR_osf_waitid 236 /* not implemented */
#define __NR_osf_priocntlset 237 /* not implemented */
#define __NR_osf_sigsendset 238 /* not implemented */
@@ -304,52 +304,188 @@
#define __NR_getrusage 364
#define __NR_wait4 365
#define __NR_adjtimex 366
-
+#define __NR_getcwd 367
+#define __NR_capget 368
+#define __NR_capset 369
#if defined(__LIBRARY__) && defined(__GNUC__)
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
+#define _syscall_return(type) \
+ return (_sc_err ? errno = _sc_ret, _sc_ret = -1L : 0), (type) _sc_ret
+
+#define _syscall_clobbers \
+ "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", \
+ "$22", "$23", "$24", "$25", "$27", "$28" \
+
#define _syscall0(type, name) \
type name(void) \
{ \
- extern long syscall (int, ...); \
- return syscall(__NR_##name)); \
+ long _sc_ret, _sc_err; \
+ { \
+ register long _sc_0 __asm__("$0"); \
+ register long _sc_19 __asm__("$19"); \
+ \
+ _sc_0 = __NR_##name; \
+ __asm__("callsys # %0 %1 %2" \
+ : "=r"(_sc_0), "=r"(_sc_19) \
+ : "0"(_sc_0) \
+ : _syscall_clobbers); \
+ _sc_ret = _sc_0, _sc_err = _sc_19; \
+ } \
+ _syscall_return(type); \
}
#define _syscall1(type,name,type1,arg1) \
type name(type1 arg1) \
{ \
- extern long syscall (int, ...); \
- return syscall(__NR_##name, arg1); \
+ long _sc_ret, _sc_err; \
+ { \
+ register long _sc_0 __asm__("$0"); \
+ register long _sc_16 __asm__("$16"); \
+ register long _sc_19 __asm__("$19"); \
+ \
+ _sc_0 = __NR_##name; \
+ _sc_16 = (long) (arg1); \
+ __asm__("callsys # %0 %1 %2 %3" \
+ : "=r"(_sc_0), "=r"(_sc_19) \
+ : "0"(_sc_0), "r"(_sc_16) \
+ : _syscall_clobbers); \
+ _sc_ret = _sc_0, _sc_err = _sc_19; \
+ } \
+ _syscall_return(type); \
}
#define _syscall2(type,name,type1,arg1,type2,arg2) \
type name(type1 arg1,type2 arg2) \
{ \
- extern long syscall (int, ...); \
- return syscall(__NR_##name, arg1, arg2); \
+ long _sc_ret, _sc_err; \
+ { \
+ register long _sc_0 __asm__("$0"); \
+ register long _sc_16 __asm__("$16"); \
+ register long _sc_17 __asm__("$17"); \
+ register long _sc_19 __asm__("$19"); \
+ \
+ _sc_0 = __NR_##name; \
+ _sc_16 = (long) (arg1); \
+ _sc_17 = (long) (arg2); \
+ __asm__("callsys # %0 %1 %2 %3 %4" \
+ : "=r"(_sc_0), "=r"(_sc_19) \
+ : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17) \
+ : _syscall_clobbers); \
+ _sc_ret = _sc_0, _sc_err = _sc_19; \
+ } \
+ _syscall_return(type); \
}
#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
type name(type1 arg1,type2 arg2,type3 arg3) \
{ \
- extern long syscall (int, ...); \
- return syscall(__NR_##name, arg1, arg2, arg3); \
+ long _sc_ret, _sc_err; \
+ { \
+ register long _sc_0 __asm__("$0"); \
+ register long _sc_16 __asm__("$16"); \
+ register long _sc_17 __asm__("$17"); \
+ register long _sc_18 __asm__("$18"); \
+ register long _sc_19 __asm__("$19"); \
+ \
+ _sc_0 = __NR_##name; \
+ _sc_16 = (long) (arg1); \
+ _sc_17 = (long) (arg2); \
+ _sc_18 = (long) (arg3); \
+ __asm__("callsys # %0 %1 %2 %3 %4 %5" \
+ : "=r"(_sc_0), "=r"(_sc_19) \
+ : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
+ "r"(_sc_18) \
+ : _syscall_clobbers); \
+ _sc_ret = _sc_0, _sc_err = _sc_19; \
+ } \
+ _syscall_return(type); \
}
#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
{ \
- extern long syscall (int, ...); \
- return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
+ long _sc_ret, _sc_err; \
+ { \
+ register long _sc_0 __asm__("$0"); \
+ register long _sc_16 __asm__("$16"); \
+ register long _sc_17 __asm__("$17"); \
+ register long _sc_18 __asm__("$18"); \
+ register long _sc_19 __asm__("$19"); \
+ \
+ _sc_0 = __NR_##name; \
+ _sc_16 = (long) (arg1); \
+ _sc_17 = (long) (arg2); \
+ _sc_18 = (long) (arg3); \
+ _sc_19 = (long) (arg4); \
+ __asm__("callsys # %0 %1 %2 %3 %4 %5 %6" \
+ : "=r"(_sc_0), "=r"(_sc_19) \
+ : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
+ "r"(_sc_18), "1"(_sc_19) \
+ : _syscall_clobbers); \
+ _sc_ret = _sc_0, _sc_err = _sc_19; \
+ } \
+ _syscall_return(type); \
}
#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
- extern long syscall (int, ...); \
- return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
+type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
+{ \
+ long _sc_ret, _sc_err; \
+ { \
+ register long _sc_0 __asm__("$0"); \
+ register long _sc_16 __asm__("$16"); \
+ register long _sc_17 __asm__("$17"); \
+ register long _sc_18 __asm__("$18"); \
+ register long _sc_19 __asm__("$19"); \
+ register long _sc_20 __asm__("$20"); \
+ \
+ _sc_0 = __NR_##name; \
+ _sc_16 = (long) (arg1); \
+ _sc_17 = (long) (arg2); \
+ _sc_18 = (long) (arg3); \
+ _sc_19 = (long) (arg4); \
+ _sc_20 = (long) (arg5); \
+ __asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7" \
+ : "=r"(_sc_0), "=r"(_sc_19) \
+ : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
+ "r"(_sc_18), "1"(_sc_19), "r"(_sc_20) \
+ : _syscall_clobbers); \
+ _sc_ret = _sc_0, _sc_err = _sc_19; \
+ } \
+ _syscall_return(type); \
+}
+
+#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
+ type5,arg5,type6,arg6) \
+type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, type6 arg6)\
+{ \
+ long _sc_ret, _sc_err; \
+ { \
+ register long _sc_0 __asm__("$0"); \
+ register long _sc_16 __asm__("$16"); \
+ register long _sc_17 __asm__("$17"); \
+ register long _sc_18 __asm__("$18"); \
+ register long _sc_19 __asm__("$19"); \
+ register long _sc_20 __asm__("$20"); \
+ register long _sc_21 __asm__("$21"); \
+ \
+ _sc_0 = __NR_##name; \
+ _sc_16 = (long) (arg1); \
+ _sc_17 = (long) (arg2); \
+ _sc_18 = (long) (arg3); \
+ _sc_19 = (long) (arg4); \
+ _sc_20 = (long) (arg5); \
+ _sc_21 = (long) (arg6); \
+ __asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7 %8" \
+ : "=r"(_sc_0), "=r"(_sc_19) \
+ : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
+ "r"(_sc_18), "1"(_sc_19), "r"(_sc_20), "r"(_sc_21) \
+ : _syscall_clobbers); \
+ _sc_ret = _sc_0, _sc_err = _sc_19; \
+ } \
+ _syscall_return(type); \
}
#endif /* __LIBRARY__ && __GNUC__ */
diff --git a/include/asm-alpha/vga.h b/include/asm-alpha/vga.h
new file mode 100644
index 000000000..733fe41c7
--- /dev/null
+++ b/include/asm-alpha/vga.h
@@ -0,0 +1,56 @@
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ */
+
+#ifndef _LINUX_ASM_VGA_H_
+#define _LINUX_ASM_VGA_H_
+
+#include <asm/io.h>
+
+#define VT_BUF_HAVE_RW
+#define VT_BUF_HAVE_MEMSETW
+#define VT_BUF_HAVE_MEMCPYF
+
+extern inline void scr_writew(u16 val, u16 *addr)
+{
+ if ((long) addr < 0)
+ *addr = val;
+ else
+ writew(val, (unsigned long) addr);
+}
+
+extern inline u16 scr_readw(u16 *addr)
+{
+ if ((long) addr < 0)
+ return *addr;
+ else
+ return readw((unsigned long) addr);
+}
+
+extern inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
+{
+ if ((long)s < 0)
+ memsetw(s, c, count);
+ else
+ memsetw_io(s, c, count);
+}
+
+extern inline void scr_memcpyw_from(u16 *d, u16 *s, unsigned int count)
+{
+ memcpy_fromio(d, s, count);
+}
+
+extern inline void scr_memcpyw_to(u16 *d, u16 *s, unsigned int count)
+{
+ memcpy_toio(d, s, count);
+}
+
+
+#define vga_readb readb
+#define vga_writeb writeb
+
+#define VGA_MAP_MEM(x) (x)
+
+#endif