summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-04 07:40:19 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-04 07:40:19 +0000
commit33263fc5f9ac8e8cb2b22d06af3ce5ac1dd815e4 (patch)
tree2d1b86a40bef0958a68cf1a2eafbeb0667a70543 /include/asm-alpha
parent216f5f51aa02f8b113aa620ebc14a9631a217a00 (diff)
Merge with Linux 2.3.32.
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/cache.h2
-rw-r--r--include/asm-alpha/core_irongate.h563
-rw-r--r--include/asm-alpha/core_tsunami.h2
-rw-r--r--include/asm-alpha/div64.h4
-rw-r--r--include/asm-alpha/dma.h3
-rw-r--r--include/asm-alpha/floppy.h2
-rw-r--r--include/asm-alpha/fpu.h23
-rw-r--r--include/asm-alpha/hardirq.h16
-rw-r--r--include/asm-alpha/hw_irq.h91
-rw-r--r--include/asm-alpha/hwrpb.h5
-rw-r--r--include/asm-alpha/io.h12
-rw-r--r--include/asm-alpha/irq.h41
-rw-r--r--include/asm-alpha/machvec.h4
-rw-r--r--include/asm-alpha/mmu_context.h2
-rw-r--r--include/asm-alpha/page.h4
-rw-r--r--include/asm-alpha/pgalloc.h337
-rw-r--r--include/asm-alpha/pgtable.h375
-rw-r--r--include/asm-alpha/posix_types.h5
-rw-r--r--include/asm-alpha/processor.h17
-rw-r--r--include/asm-alpha/resource.h7
-rw-r--r--include/asm-alpha/sfp-machine.h84
-rw-r--r--include/asm-alpha/system.h136
-rw-r--r--include/asm-alpha/vga.h14
23 files changed, 1306 insertions, 443 deletions
diff --git a/include/asm-alpha/cache.h b/include/asm-alpha/cache.h
index b7662d726..5e46073a8 100644
--- a/include/asm-alpha/cache.h
+++ b/include/asm-alpha/cache.h
@@ -10,6 +10,4 @@
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#define SMP_CACHE_BYTES L1_CACHE_BYTES
-#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
-
#endif
diff --git a/include/asm-alpha/core_irongate.h b/include/asm-alpha/core_irongate.h
new file mode 100644
index 000000000..be1964f8d
--- /dev/null
+++ b/include/asm-alpha/core_irongate.h
@@ -0,0 +1,563 @@
+#ifndef __ALPHA_IRONGATE__H__
+#define __ALPHA_IRONGATE__H__
+
+#include <linux/types.h>
+#include <asm/compiler.h>
+
+/*
+ * IRONGATE is the internal name for the AMD-751 K7 core logic chipset
+ * which provides memory controller and PCI access for NAUTILUS-based
+ * EV6 (21264) systems.
+ *
+ * This file is based on:
+ *
+ * IronGate management library, (c) 1999 Alpha Processor, Inc.
+ * Begun 19 January 1999 by Stig Telfer, Alpha Processor, Inc.
+ */
+
+/*
+ * The 21264 supports, and internally recognizes, a 44-bit physical
+ * address space that is divided equally between memory address space
+ * and I/O address space. Memory address space resides in the lower
+ * half of the physical address space (PA[43]=0) and I/O address space
+ * resides in the upper half of the physical address space (PA[43]=1).
+ *
+ */
+
+/* Eh? Not offset from memory? */
+#define IRONGATE_DMA_WIN_BASE (0U)
+#define IRONGATE_DMA_WIN_SIZE (0U)
+
+/*
+ * Irongate CSR map. Some of the CSRs are 8 or 16 bits, but all access
+ * through the routines given is 32-bit.
+ *
+ * The first 0x40 bytes are standard as per the PCI spec.
+ */
+
+typedef volatile __u32 igcsr32;
+
+typedef struct {
+ igcsr32 dev_vendor; /* 0x00 - device ID, vendor ID */
+ igcsr32 stat_cmd; /* 0x04 - status, command */
+ igcsr32 class; /* 0x08 - class code, rev ID */
+ igcsr32 latency; /* 0x0C - header type, PCI latency */
+ igcsr32 bar0; /* 0x10 - BAR0 - AGP */
+ igcsr32 bar1; /* 0x14 - BAR1 - GART */
+ igcsr32 bar2; /* 0x18 - Power Management reg block */
+
+ igcsr32 rsrvd0[6]; /* 0x1C-0x33 reserved */
+
+ igcsr32 capptr; /* 0x34 - Capabilities pointer */
+
+ igcsr32 rsrvd1[2]; /* 0x38-0x3F reserved */
+
+ igcsr32 bacsr10; /* 0x40 - base address chip selects */
+ igcsr32 bacsr32; /* 0x44 - base address chip selects */
+ igcsr32 bacsr54; /* 0x48 - base address chip selects */
+
+ igcsr32 rsrvd2[1]; /* 0x4C-0x4F reserved */
+
+ igcsr32 drammap; /* 0x50 - address mapping control */
+ igcsr32 dramtm; /* 0x54 - timing, driver strength */
+ igcsr32 dramms; /* 0x58 - ECC, mode/status */
+
+ igcsr32 rsrvd3[1]; /* 0x5C-0x5F reserved */
+
+ igcsr32 biu0; /* 0x60 - bus interface unit */
+ igcsr32 biusip; /* 0x64 - Serial initialisation pkt */
+
+ igcsr32 rsrvd4[2]; /* 0x68-0x6F reserved */
+
+ igcsr32 mro; /* 0x70 - memory request optimiser */
+
+ igcsr32 rsrvd5[3]; /* 0x74-0x7F reserved */
+
+ igcsr32 whami; /* 0x80 - who am I */
+ igcsr32 pciarb; /* 0x84 - PCI arbitration control */
+ igcsr32 pcicfg; /* 0x88 - PCI config status */
+
+ igcsr32 rsrvd6[5]; /* 0x8C-0x9F reserved */
+
+ /* AGP (bus 1) control registers */
+ igcsr32 agpcap; /* 0xA0 - AGP Capability Identifier */
+ igcsr32 agpstat; /* 0xA4 - AGP status register */
+ igcsr32 agpcmd; /* 0xA8 - AGP control register */
+ igcsr32 agpva; /* 0xAC - AGP Virtual Address Space */
+ igcsr32 agpmode; /* 0xB0 - AGP/GART mode control */
+} Irongate0;
+
+/* Bitfield and mask register definitions */
+
+/* Device, vendor IDs - offset 0x00 */
+
+typedef union {
+ igcsr32 i; /* integer value of CSR */
+ struct {
+ unsigned v : 16;
+ unsigned d : 16;
+ } r; /* structured interpretation */
+} ig_dev_vendor_t;
+
+
+/* Status, command registers - offset 0x04 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned command;
+ unsigned status;
+ } s;
+ struct {
+ /* command register fields */
+ unsigned iospc : 1; /* always reads zero */
+ unsigned memspc : 1; /* PCI memory space accesses? */
+ unsigned iten : 1; /* always 1: can be bus initiator */
+ unsigned scmon : 1; /* always 0 special cycles not chckd */
+ unsigned mwic : 1; /* always 0 - no mem write & invalid */
+ unsigned vgaps : 1; /* always 0 - palette rds not special */
+ unsigned per : 1; /* parity error resp: always 0 */
+ unsigned step : 1; /* address/data stepping : always 0 */
+ unsigned serre : 1; /* 1 = sys err output driver enable */
+ unsigned fbbce : 1; /* fast back-back cycle : always 0 */
+ unsigned zero1 : 6; /* must be zero */
+
+ /* status register fields */
+ unsigned zero2 : 4; /* must be zero */
+ unsigned cl : 1; /* config space capa list: always 1 */
+ unsigned pci66 : 1; /* 66 MHz PCI support - always 0 */
+ unsigned udf : 1; /* user defined features - always 0 */
+ unsigned fbbc : 1; /* back-back transactions - always 0 */
+ unsigned ppe : 1; /* PCI parity error detected (0) */
+ unsigned devsel : 2; /* DEVSEL timing (always 01) */
+ unsigned sta : 1; /* signalled target abort (0) */
+ unsigned rta : 1; /* recvd target abort */
+ unsigned ria : 1; /* recvd initiator abort */
+ unsigned serr : 1; /* SERR has been asserted */
+ unsigned dpe : 1; /* DRAM parity error (0) */
+ } r;
+} ig_stat_cmd_t;
+
+
+/* Revision ID, Programming interface, subclass, baseclass - offset 0x08 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ /* revision ID */
+ unsigned step : 4; /* stepping Revision ID */
+ unsigned die : 4; /* die Revision ID */
+ unsigned pif : 8; /* programming interface (0x00) */
+ unsigned sub : 8; /* subclass code (0x00) */
+ unsigned base: 8; /* baseclass code (0x06) */
+ } r;
+} ig_class_t;
+
+
+/* Latency Timer, PCI Header type - offset 0x0C */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned zero1:8; /* reserved */
+ unsigned lat : 8; /* latency in PCI bus clocks */
+ unsigned hdr : 8; /* PCI header type */
+ unsigned zero2:8; /* reserved */
+ } r;
+} ig_latency_t;
+
+
+/* Base Address Register 0 - offset 0x10 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned mem : 1; /* Reg pts to memory (always 0) */
+ unsigned type: 2; /* 32 bit register = 0b00 */
+ unsigned pref: 1; /* graphics mem prefetchable=1 */
+ unsigned baddrl : 21; /* 32M = minimum alloc -> all zero */
+ unsigned size : 6; /* size requirements for AGP */
+ unsigned zero : 1; /* reserved=0 */
+ } r;
+} ig_bar0_t;
+
+
+/* Base Address Register 1 - offset 0x14 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned mem : 1; /* BAR0 maps to memory -> 0 */
+ unsigned type : 2; /* BAR1 is 32-bit -> 0b00 */
+ unsigned pref : 1; /* graphics mem prefetchable=1 */
+ unsigned baddrl : 8; /* 4K alloc for AGP CSRs -> 0b00 */
+ unsigned baddrh : 20; /* base addr of AGP CSRs A[30:11] */
+ } r;
+} ig_bar1_t;
+
+
+/* Base Address Register 2 - offset 0x18 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned io : 1; /* BAR2 maps to I/O space -> 1 */
+ unsigned zero1: 1; /* reserved */
+ unsigned addr : 22; /* BAR2[31:10] - PM2_BLK base */
+ unsigned zero2: 8; /* reserved */
+ } r;
+} ig_bar2_t;
+
+
+/* Capabilities Pointer - offset 0x34 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned cap : 8; /* =0xA0, offset of AGP ctrl regs */
+ unsigned zero: 24; /* reserved */
+ } r;
+} ig_capptr_t;
+
+
+/* Base Address Chip Select Register 1,0 - offset 0x40 */
+/* Base Address Chip Select Register 3,2 - offset 0x44 */
+/* Base Address Chip Select Register 5,4 - offset 0x48 */
+
+typedef union {
+
+ igcsr32 i;
+ struct {
+ /* lower bank */
+ unsigned en0 : 1; /* memory bank enabled */
+ unsigned mask0 : 6; /* Address mask for A[28:23] */
+ unsigned base0 : 9; /* Bank Base Address A[31:23] */
+
+ /* upper bank */
+ unsigned en1 : 1; /* memory bank enabled */
+ unsigned mask1 : 6; /* Address mask for A[28:23] */
+ unsigned base1 : 9; /* Bank Base Address A[31:23] */
+ } r;
+} ig_bacsr_t, ig_bacsr10_t, ig_bacsr32_t, ig_bacsr54_t;
+
+
+/* SDRAM Address Mapping Control Register - offset 0x50 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned z1 : 1; /* reserved */
+ unsigned bnks0: 1; /* 0->2 banks in chip select 0 */
+ unsigned am0 : 1; /* row/column addressing */
+ unsigned z2 : 1; /* reserved */
+
+ unsigned z3 : 1; /* reserved */
+ unsigned bnks1: 1; /* 0->2 banks in chip select 1 */
+ unsigned am1 : 1; /* row/column addressing */
+ unsigned z4 : 1; /* reserved */
+
+ unsigned z5 : 1; /* reserved */
+ unsigned bnks2: 1; /* 0->2 banks in chip select 2 */
+ unsigned am2 : 1; /* row/column addressing */
+ unsigned z6 : 1; /* reserved */
+
+ unsigned z7 : 1; /* reserved */
+ unsigned bnks3: 1; /* 0->2 banks in chip select 3 */
+ unsigned am3 : 1; /* row/column addressing */
+ unsigned z8 : 1; /* reserved */
+
+ unsigned z9 : 1; /* reserved */
+ unsigned bnks4: 1; /* 0->2 banks in chip select 4 */
+ unsigned am4 : 1; /* row/column addressing */
+ unsigned z10 : 1; /* reserved */
+
+ unsigned z11 : 1; /* reserved */
+ unsigned bnks5: 1; /* 0->2 banks in chip select 5 */
+ unsigned am5 : 1; /* row/column addressing */
+ unsigned z12 : 1; /* reserved */
+
+ unsigned rsrvd: 8; /* reserved */
+ } r;
+} ig_drammap_t;
+
+
+/* DRAM timing and driver strength register - offset 0x54 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ /* DRAM timing parameters */
+ unsigned trcd : 2;
+ unsigned tcl : 2;
+ unsigned tras: 3;
+ unsigned trp : 2;
+ unsigned trc : 3;
+ unsigned icl: 2;
+ unsigned ph : 2;
+
+ /* Chipselect driver strength */
+ unsigned adra : 1;
+ unsigned adrb : 1;
+ unsigned ctrl : 3;
+ unsigned dqm : 1;
+ unsigned cs : 1;
+ unsigned clk: 1;
+ unsigned rsrvd:8;
+ } r;
+} ig_dramtm_t;
+
+
+/* DRAM Mode / Status and ECC Register - offset 0x58 */
+
+typedef union {
+ igcsr32 i;
+ struct {
+ unsigned chipsel : 6; /* failing ECC chip select */
+ unsigned zero1 : 2; /* always reads zero */
+ unsigned status : 2; /* ECC Detect logic status */
+ unsigned zero2 : 6; /* always reads zero */
+
+ unsigned cycles : 2; /* cycles per refresh, see table */
+ unsigned en : 1; /* ECC enable */
+ unsigned r : 1; /* Large burst enable (=0) */
+ unsigned bre : 1; /* Burst refresh enable */
+ unsigned zero3 : 2; /* reserved = 0 */
+ unsigned mwe : 1; /* Enable writes to DRAM mode reg */
+ unsigned type : 1; /* SDRAM = 0, default */
+ unsigned sdraminit : 1; /* SDRAM init - set params first! */
+ unsigned zero4 : 6; /* reserved = 0 */
+ } r;
+} ig_dramms_t;
+
+
+/*
+ * Memory spaces:
+ */
+
+/* ??? the following probably needs fixing */
+/* Irongate is consistent with a subset of the Tsunami memory map */
+/* XXX: Do we need to conditionalize on this? */
+#ifdef USE_48_BIT_KSEG
+#define IRONGATE_BIAS 0x80000000000UL
+#else
+#define IRONGATE_BIAS 0x10000000000UL
+#endif
+
+
+#define IRONGATE_MEM (IDENT_ADDR | IRONGATE_BIAS | 0x000000000UL)
+#define IRONGATE_IACK_SC (IDENT_ADDR | IRONGATE_BIAS | 0x1F8000000UL)
+#define IRONGATE_IO (IDENT_ADDR | IRONGATE_BIAS | 0x1FC000000UL)
+#define IRONGATE_CONF (IDENT_ADDR | IRONGATE_BIAS | 0x1FE000000UL)
+
+
+#define IRONGATE0 ((Irongate0 *) IRONGATE_CONF)
+
+/*
+ * Data structure for handling IRONGATE machine checks:
+ * This is the standard OSF logout frame
+ */
+
+#define SCB_Q_SYSERR 0x620 /* OSF definitions */
+#define SCB_Q_PROCERR 0x630
+#define SCB_Q_SYSMCHK 0x660
+#define SCB_Q_PROCMCHK 0x670
+
+struct el_IRONGATE_sysdata_mcheck {
+ __u32 FrameSize; /* Bytes, including this field */
+ __u32 FrameFlags; /* <31> = Retry, <30> = Second Error */
+ __u32 CpuOffset; /* Offset to CPU-specific into */
+ __u32 SystemOffset; /* Offset to system-specific info */
+ __u32 MCHK_Code;
+ __u32 MCHK_Frame_Rev;
+ __u64 I_STAT;
+ __u64 DC_STAT;
+ __u64 C_ADDR;
+ __u64 DC1_SYNDROME;
+ __u64 DC0_SYNDROME;
+ __u64 C_STAT;
+ __u64 C_STS;
+ __u64 RESERVED0;
+ __u64 EXC_ADDR;
+ __u64 IER_CM;
+ __u64 ISUM;
+ __u64 MM_STAT;
+ __u64 PAL_BASE;
+ __u64 I_CTL;
+ __u64 PCTX;
+};
+
+
+#ifdef __KERNEL__
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __IO_EXTERN_INLINE
+#endif
+
+/*
+ * Translate physical memory address as seen on (PCI) bus into
+ * a kernel virtual address and vv.
+ */
+
+__EXTERN_INLINE unsigned long irongate_virt_to_bus(void * address)
+{
+ return virt_to_phys(address) + IRONGATE_DMA_WIN_BASE;
+}
+
+__EXTERN_INLINE void * irongate_bus_to_virt(unsigned long address)
+{
+ return phys_to_virt(address - IRONGATE_DMA_WIN_BASE);
+}
+
+/*
+ * I/O functions:
+ *
+ * IRONGATE (AMD-751) PCI/memory support chip for the EV6 (21264) and
+ * K7 can only use linear accesses to get at PCI memory and I/O spaces.
+ */
+
+#define vucp volatile unsigned char *
+#define vusp volatile unsigned short *
+#define vuip volatile unsigned int *
+#define vulp volatile unsigned long *
+
+__EXTERN_INLINE unsigned int irongate_inb(unsigned long addr)
+{
+ return __kernel_ldbu(*(vucp)(addr + IRONGATE_IO));
+}
+
+__EXTERN_INLINE void irongate_outb(unsigned char b, unsigned long addr)
+{
+ __kernel_stb(b, *(vucp)(addr + IRONGATE_IO));
+ mb();
+}
+
+__EXTERN_INLINE unsigned int irongate_inw(unsigned long addr)
+{
+ return __kernel_ldwu(*(vusp)(addr + IRONGATE_IO));
+}
+
+__EXTERN_INLINE void irongate_outw(unsigned short b, unsigned long addr)
+{
+ __kernel_stw(b, *(vusp)(addr + IRONGATE_IO));
+ mb();
+}
+
+__EXTERN_INLINE unsigned int irongate_inl(unsigned long addr)
+{
+ return *(vuip)(addr + IRONGATE_IO);
+}
+
+__EXTERN_INLINE void irongate_outl(unsigned int b, unsigned long addr)
+{
+ *(vuip)(addr + IRONGATE_IO) = b;
+ mb();
+}
+
+/*
+ * Memory functions. All accesses are done through linear space.
+ */
+
+__EXTERN_INLINE unsigned long irongate_readb(unsigned long addr)
+{
+ return __kernel_ldbu(*(vucp)addr);
+}
+
+__EXTERN_INLINE unsigned long irongate_readw(unsigned long addr)
+{
+ return __kernel_ldwu(*(vusp)addr);
+}
+
+__EXTERN_INLINE unsigned long irongate_readl(unsigned long addr)
+{
+ return *(vuip)addr;
+}
+
+__EXTERN_INLINE unsigned long irongate_readq(unsigned long addr)
+{
+ return *(vulp)addr;
+}
+
+__EXTERN_INLINE void irongate_writeb(unsigned char b, unsigned long addr)
+{
+ __kernel_stb(b, *(vucp)addr);
+}
+
+__EXTERN_INLINE void irongate_writew(unsigned short b, unsigned long addr)
+{
+ __kernel_stw(b, *(vusp)addr);
+}
+
+__EXTERN_INLINE void irongate_writel(unsigned int b, unsigned long addr)
+{
+ *(vuip)addr = b;
+}
+
+__EXTERN_INLINE void irongate_writeq(unsigned long b, unsigned long addr)
+{
+ *(vulp)addr = b;
+}
+
+__EXTERN_INLINE unsigned long irongate_ioremap(unsigned long addr)
+{
+ return addr + IRONGATE_MEM;
+}
+
+__EXTERN_INLINE int irongate_is_ioaddr(unsigned long addr)
+{
+ return addr >= IRONGATE_MEM;
+}
+
+#undef vucp
+#undef vusp
+#undef vuip
+#undef vulp
+
+#ifdef __WANT_IO_DEF
+
+#define virt_to_bus irongate_virt_to_bus
+#define bus_to_virt irongate_bus_to_virt
+
+#define __inb irongate_inb
+#define __inw irongate_inw
+#define __inl irongate_inl
+#define __outb irongate_outb
+#define __outw irongate_outw
+#define __outl irongate_outl
+#define __readb irongate_readb
+#define __readw irongate_readw
+#define __writeb irongate_writeb
+#define __writew irongate_writew
+#define __readl irongate_readl
+#define __readq irongate_readq
+#define __writel irongate_writel
+#define __writeq irongate_writeq
+#define __ioremap irongate_ioremap
+#define __is_ioaddr irongate_is_ioaddr
+
+#define inb(port) __inb((port))
+#define inw(port) __inw((port))
+#define inl(port) __inl((port))
+#define outb(v, port) __outb((v),(port))
+#define outw(v, port) __outw((v),(port))
+#define outl(v, port) __outl((v),(port))
+
+#define __raw_readb(a) __readb((unsigned long)(a))
+#define __raw_readw(a) __readw((unsigned long)(a))
+#define __raw_readl(a) __readl((unsigned long)(a))
+#define __raw_readq(a) __readq((unsigned long)(a))
+#define __raw_writeb(v,a) __writeb((v),(unsigned long)(a))
+#define __raw_writew(v,a) __writew((v),(unsigned long)(a))
+#define __raw_writel(v,a) __writel((v),(unsigned long)(a))
+#define __raw_writeq(v,a) __writeq((v),(unsigned long)(a))
+
+#endif /* __WANT_IO_DEF */
+
+#ifdef __IO_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __IO_EXTERN_INLINE
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ALPHA_IRONGATE__H__ */
diff --git a/include/asm-alpha/core_tsunami.h b/include/asm-alpha/core_tsunami.h
index 14bc80e2d..5bfa4d658 100644
--- a/include/asm-alpha/core_tsunami.h
+++ b/include/asm-alpha/core_tsunami.h
@@ -291,6 +291,8 @@ union TPchipPERRMASK {
#define TSUNAMI_IO_BIAS TSUNAMI_IO(0)
#define TSUNAMI_MEM_BIAS TSUNAMI_MEM(0)
+/* The IO address space is larger than 0xffff */
+#define TSUNAMI_IO_SPACE (TSUNAMI_CONF(0) - TSUNAMI_IO(0))
/*
* Data structure for handling TSUNAMI machine checks:
diff --git a/include/asm-alpha/div64.h b/include/asm-alpha/div64.h
index 6260adb75..080dcd480 100644
--- a/include/asm-alpha/div64.h
+++ b/include/asm-alpha/div64.h
@@ -7,8 +7,8 @@
*/
#define do_div(n,base) ({ \
int __res; \
- __res = ((unsigned long) n) % (unsigned) base; \
- n = ((unsigned long) n) / (unsigned) base; \
+ __res = ((unsigned long) (n)) % (unsigned) (base); \
+ (n) = ((unsigned long) (n)) / (unsigned) (base); \
__res; })
#endif
diff --git a/include/asm-alpha/dma.h b/include/asm-alpha/dma.h
index 28762e674..e6d667144 100644
--- a/include/asm-alpha/dma.h
+++ b/include/asm-alpha/dma.h
@@ -91,6 +91,7 @@
*/
#define ALPHA_XL_MAX_DMA_ADDRESS (IDENT_ADDR+0x3000000UL)
#define ALPHA_RUFFIAN_MAX_DMA_ADDRESS (IDENT_ADDR+0x1000000UL)
+#define ALPHA_NAUTILUS_MAX_DMA_ADDRESS (IDENT_ADDR+0x1000000UL)
#define ALPHA_MAX_DMA_ADDRESS (~0UL)
#ifdef CONFIG_ALPHA_GENERIC
@@ -100,6 +101,8 @@
# define MAX_DMA_ADDRESS ALPHA_XL_MAX_DMA_ADDRESS
# elif defined(CONFIG_ALPHA_RUFFIAN)
# define MAX_DMA_ADDRESS ALPHA_RUFFIAN_MAX_DMA_ADDRESS
+# elif defined(CONFIG_ALPHA_NAUTILUS)
+# define MAX_DMA_ADDRESS ALPHA_NAUTILUS_MAX_DMA_ADDRESS
# else
# define MAX_DMA_ADDRESS ALPHA_MAX_DMA_ADDRESS
# endif
diff --git a/include/asm-alpha/floppy.h b/include/asm-alpha/floppy.h
index 8203920b9..a08aa973c 100644
--- a/include/asm-alpha/floppy.h
+++ b/include/asm-alpha/floppy.h
@@ -64,7 +64,7 @@ static int FDC2 = -1;
#ifdef CONFIG_ALPHA_GENERIC
# define CROSS_64KB(a,s) (__CROSS_64KB(a,s) && ~alpha_mv.max_dma_address)
#else
-# if defined(CONFIG_ALPHA_XL) || defined(CONFIG_ALPHA_RUFFIAN)
+# if defined(CONFIG_ALPHA_XL) || defined(CONFIG_ALPHA_RUFFIAN) || defined(CONFIG_ALPHA_NAUTILUS)
# define CROSS_64KB(a,s) __CROSS_64KB(a,s)
# else
# define CROSS_64KB(a,s) (0)
diff --git a/include/asm-alpha/fpu.h b/include/asm-alpha/fpu.h
index 5e56e7db2..118941b3c 100644
--- a/include/asm-alpha/fpu.h
+++ b/include/asm-alpha/fpu.h
@@ -4,6 +4,8 @@
/*
* Alpha floating-point control register defines:
*/
+#define FPCR_DNOD (1UL<<47) /* denorm INV trap disable */
+#define FPCR_DNZ (1UL<<48) /* denorms to zero */
#define FPCR_INVD (1UL<<49) /* invalid op disable (opt.) */
#define FPCR_DZED (1UL<<50) /* division by zero disable (opt.) */
#define FPCR_OVFD (1UL<<51) /* overflow disable (optional) */
@@ -42,9 +44,16 @@
#define IEEE_TRAP_ENABLE_OVF (1UL<<3) /* overflow */
#define IEEE_TRAP_ENABLE_UNF (1UL<<4) /* underflow */
#define IEEE_TRAP_ENABLE_INE (1UL<<5) /* inexact */
+#define IEEE_TRAP_ENABLE_DNO (1UL<<6) /* denorm */
#define IEEE_TRAP_ENABLE_MASK (IEEE_TRAP_ENABLE_INV | IEEE_TRAP_ENABLE_DZE |\
IEEE_TRAP_ENABLE_OVF | IEEE_TRAP_ENABLE_UNF |\
- IEEE_TRAP_ENABLE_INE)
+ IEEE_TRAP_ENABLE_INE | IEEE_TRAP_ENABLE_DNO)
+
+/* Denorm and Underflow flushing */
+#define IEEE_MAP_DMZ (1UL<<12) /* Map denorm inputs to zero */
+#define IEEE_MAP_UMZ (1UL<<13) /* Map underflowed outputs to zero */
+
+#define IEEE_MAP_MASK (IEEE_MAP_DMZ | IEEE_MAP_UMZ)
/* status bits coming from fpcr: */
#define IEEE_STATUS_INV (1UL<<17)
@@ -52,12 +61,16 @@
#define IEEE_STATUS_OVF (1UL<<19)
#define IEEE_STATUS_UNF (1UL<<20)
#define IEEE_STATUS_INE (1UL<<21)
+#define IEEE_STATUS_DNO (1UL<<22)
#define IEEE_STATUS_MASK (IEEE_STATUS_INV | IEEE_STATUS_DZE | \
IEEE_STATUS_OVF | IEEE_STATUS_UNF | \
- IEEE_STATUS_INE)
+ IEEE_STATUS_INE | IEEE_STATUS_DNO)
+
+#define IEEE_SW_MASK (IEEE_TRAP_ENABLE_MASK | IEEE_STATUS_MASK | IEEE_MAP_MASK)
-#define IEEE_SW_MASK (IEEE_TRAP_ENABLE_MASK | IEEE_STATUS_MASK)
+#define IEEE_CURRENT_RM_SHIFT 32
+#define IEEE_CURRENT_RM_MASK (3UL<<IEEE_CURRENT_RM_SHIFT)
#define IEEE_STATUS_TO_EXCSUM_SHIFT 16
@@ -78,6 +91,7 @@ ieee_swcr_to_fpcr(unsigned long sw)
| IEEE_TRAP_ENABLE_DZE
| IEEE_TRAP_ENABLE_OVF)) << 48;
fp |= (~sw & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE)) << 57;
+ fp |= (~sw & IEEE_TRAP_ENABLE_DNO) << 41;
return fp;
}
@@ -90,6 +104,7 @@ ieee_fpcr_to_swcr(unsigned long fp)
| IEEE_TRAP_ENABLE_DZE
| IEEE_TRAP_ENABLE_OVF);
sw |= (~fp >> 57) & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE);
+ sw |= (~fp >> 41) & IEEE_TRAP_ENABLE_DNO;
return sw;
}
@@ -124,6 +139,8 @@ static inline void wrfpcr(unsigned long val)
extern unsigned long alpha_read_fp_reg (unsigned long reg);
extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
+extern unsigned long alpha_read_fp_reg_s (unsigned long reg);
+extern void alpha_write_fp_reg_s (unsigned long reg, unsigned long val);
#endif /* __KERNEL__ */
diff --git a/include/asm-alpha/hardirq.h b/include/asm-alpha/hardirq.h
index 31cdbac06..67544e13d 100644
--- a/include/asm-alpha/hardirq.h
+++ b/include/asm-alpha/hardirq.h
@@ -28,8 +28,8 @@ extern int __local_irq_count;
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) ((void) 0)
-#define hardirq_enter(cpu, irq) (local_irq_count(cpu)++)
-#define hardirq_exit(cpu, irq) (local_irq_count(cpu)--)
+#define irq_enter(cpu, irq) (local_irq_count(cpu)++)
+#define irq_exit(cpu, irq) (local_irq_count(cpu)--)
#define synchronize_irq() barrier()
@@ -52,13 +52,16 @@ static inline void release_irqlock(int cpu)
}
}
-static inline void hardirq_enter(int cpu, int irq)
+static inline void irq_enter(int cpu, int irq)
{
++local_irq_count(cpu);
atomic_inc(&global_irq_count);
+
+ while (spin_is_locked(&global_irq_lock))
+ barrier();
}
-static inline void hardirq_exit(int cpu, int irq)
+static inline void irq_exit(int cpu, int irq)
{
atomic_dec(&global_irq_count);
--local_irq_count(cpu);
@@ -66,11 +69,10 @@ static inline void hardirq_exit(int cpu, int irq)
static inline int hardirq_trylock(int cpu)
{
- return (!atomic_read(&global_irq_count)
- && !spin_is_locked(&global_irq_lock));
+ return !local_irq_count(cpu) && !spin_is_locked(&global_irq_lock);
}
-#define hardirq_endlock(cpu) ((void)0)
+#define hardirq_endlock(cpu) do { } while (0)
extern void synchronize_irq(void);
diff --git a/include/asm-alpha/hw_irq.h b/include/asm-alpha/hw_irq.h
new file mode 100644
index 000000000..c6af57999
--- /dev/null
+++ b/include/asm-alpha/hw_irq.h
@@ -0,0 +1,91 @@
+#ifndef _ALPHA_HW_IRQ_H
+#define _ALPHA_HW_IRQ_H
+/*
+ * linux/arch/alpha/kernel/irq.h
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 1998 Richard Henderson
+ *
+ * This file contains declarations and inline functions for interfacing
+ * with the IRQ handling routines in irq.c.
+ */
+
+#include <linux/config.h>
+
+#define STANDARD_INIT_IRQ_PROLOG \
+ outb(0, DMA1_RESET_REG); \
+ outb(0, DMA2_RESET_REG); \
+ outb(0, DMA1_CLR_MASK_REG); \
+ outb(0, DMA2_CLR_MASK_REG)
+
+extern unsigned long _alpha_irq_masks[2];
+#define alpha_irq_mask _alpha_irq_masks[0]
+
+extern void common_ack_irq(unsigned long irq);
+extern void isa_device_interrupt(unsigned long vector, struct pt_regs * regs);
+extern void srm_device_interrupt(unsigned long vector, struct pt_regs * regs);
+
+extern void handle_irq(int irq, int ack, struct pt_regs * regs);
+
+#define RTC_IRQ 8
+#ifdef CONFIG_RTC
+#define TIMER_IRQ 0 /* timer is the pit */
+#else
+#define TIMER_IRQ RTC_IRQ /* timer is the rtc */
+#endif
+
+/*
+ * PROBE_MASK is the bitset of irqs that we consider for autoprobing.
+ */
+
+/* NOTE: we only handle the first 64 IRQs in this code. */
+
+/* The normal mask includes all the IRQs except timer IRQ 0. */
+#define _PROBE_MASK(nr_irqs) \
+ (((nr_irqs > 63) ? ~0UL : ((1UL << (nr_irqs & 63)) - 1)) & ~1UL)
+
+/* Mask out unused timer irq 0 and RTC irq 8. */
+#define P2K_PROBE_MASK (_PROBE_MASK(16) & ~0x101UL)
+
+/* Mask out unused timer irq 0, "irqs" 20-30, and the EISA cascade. */
+#define ALCOR_PROBE_MASK (_PROBE_MASK(48) & ~0xfff000000001UL)
+
+/* Leave timer IRQ 0 in the mask. */
+#define RUFFIAN_PROBE_MASK (_PROBE_MASK(48) | 1UL)
+
+/* Do not probe/enable beyond the PCI devices. */
+#define TSUNAMI_PROBE_MASK _PROBE_MASK(48)
+
+#if defined(CONFIG_ALPHA_GENERIC)
+# define PROBE_MASK alpha_mv.irq_probe_mask
+#elif defined(CONFIG_ALPHA_P2K)
+# define PROBE_MASK P2K_PROBE_MASK
+#elif defined(CONFIG_ALPHA_ALCOR) || defined(CONFIG_ALPHA_XLT)
+# define PROBE_MASK ALCOR_PROBE_MASK
+#elif defined(CONFIG_ALPHA_RUFFIAN)
+# define PROBE_MASK RUFFIAN_PROBE_MASK
+#elif defined(CONFIG_ALPHA_DP264)
+# define PROBE_MASK TSUNAMI_PROBE_MASK
+#else
+# define PROBE_MASK _PROBE_MASK(NR_IRQS)
+#endif
+
+
+extern char _stext;
+static inline void alpha_do_profile (unsigned long pc)
+{
+ if (prof_buffer && current->pid) {
+ pc -= (unsigned long) &_stext;
+ pc >>= prof_shift;
+ /*
+ * Don't ignore out-of-bounds PC values silently,
+ * put them into the last histogram slot, so if
+ * present, they will show up as a sharp peak.
+ */
+ if (pc > prof_len - 1)
+ pc = prof_len - 1;
+ atomic_inc((atomic_t *)&prof_buffer[pc]);
+ }
+}
+
+#endif
diff --git a/include/asm-alpha/hwrpb.h b/include/asm-alpha/hwrpb.h
index 69823af35..ec710c9c8 100644
--- a/include/asm-alpha/hwrpb.h
+++ b/include/asm-alpha/hwrpb.h
@@ -55,11 +55,16 @@
#define ST_DEC_TSUNAMI 34 /* Tsunami systype */
#define ST_DEC_WILDFIRE 35 /* Wildfire systype */
#define ST_DEC_CUSCO 36 /* CUSCO systype */
+#define ST_DEC_EIGER 37 /* Eiger systype */
/* UNOFFICIAL!!! */
#define ST_UNOFFICIAL_BIAS 100
#define ST_DTI_RUFFIAN 101 /* RUFFIAN systype */
+/* Alpha Processor, Inc. systems */
+#define ST_API_BIAS 200 /* Offset for API systems */
+#define ST_API_NAUTILUS (ST_API_BIAS + 1) /* Nautilus systype */
+
struct pcb_struct {
unsigned long ksp;
unsigned long usp;
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h
index 8505e47c8..e8ed0baa1 100644
--- a/include/asm-alpha/io.h
+++ b/include/asm-alpha/io.h
@@ -132,20 +132,22 @@ extern void _sethae (unsigned long addr); /* cached version */
# include <asm/core_apecs.h>
#elif defined(CONFIG_ALPHA_CIA)
# include <asm/core_cia.h>
+#elif defined(CONFIG_ALPHA_IRONGATE)
+# include <asm/core_irongate.h>
+#elif defined(CONFIG_ALPHA_JENSEN)
+# include <asm/jensen.h>
#elif defined(CONFIG_ALPHA_LCA)
# include <asm/core_lca.h>
#elif defined(CONFIG_ALPHA_MCPCIA)
# include <asm/core_mcpcia.h>
+#elif defined(CONFIG_ALPHA_POLARIS)
+# include <asm/core_polaris.h>
#elif defined(CONFIG_ALPHA_PYXIS)
# include <asm/core_pyxis.h>
#elif defined(CONFIG_ALPHA_T2)
# include <asm/core_t2.h>
#elif defined(CONFIG_ALPHA_TSUNAMI)
# include <asm/core_tsunami.h>
-#elif defined(CONFIG_ALPHA_JENSEN)
-# include <asm/jensen.h>
-#elif defined(CONFIG_ALPHA_POLARIS)
-# include <asm/core_polaris.h>
#else
#error "What system is this?"
#endif
@@ -224,6 +226,8 @@ extern void _writeq(unsigned long b, unsigned long addr);
# define outl_p outl
#endif
+#define IO_SPACE_LIMIT 0xffff
+
#else
/* Userspace declarations. */
diff --git a/include/asm-alpha/irq.h b/include/asm-alpha/irq.h
index ad1c917a3..f9b4d9f12 100644
--- a/include/asm-alpha/irq.h
+++ b/include/asm-alpha/irq.h
@@ -16,7 +16,7 @@
many places throughout the kernel to size static arrays. That's ok,
we'll use alpha_mv.nr_irqs when we want the real thing. */
-# define NR_IRQS 64
+# define NR_IRQS 128
#elif defined(CONFIG_ALPHA_CABRIOLET) || \
defined(CONFIG_ALPHA_EB66P) || \
@@ -43,45 +43,17 @@
# define NR_IRQS 40
#elif defined(CONFIG_ALPHA_DP264) || \
- defined(CONFIG_ALPHA_RAWHIDE)
+ defined(CONFIG_ALPHA_EIGER)
# define NR_IRQS 64
-#elif defined(CONFIG_ALPHA_TAKARA)
-# define NR_IRQS 20
+#elif defined(CONFIG_ALPHA_RAWHIDE) || \
+ defined(CONFIG_ALPHA_TAKARA)
+# define NR_IRQS 128
#else /* everyone else */
# define NR_IRQS 16
#endif
-/*
- * PROBE_MASK is the bitset of irqs that we consider for autoprobing.
- */
-
-/* The normal mask includes all the IRQs except the timer. */
-#define _PROBE_MASK(nr_irqs) (((1UL << (nr_irqs & 63)) - 1) & ~1UL)
-
-/* Mask out unused timer irq 0 and RTC irq 8. */
-#define P2K_PROBE_MASK (_PROBE_MASK(16) & ~0x101UL)
-
-/* Mask out unused timer irq 0, "irqs" 20-30, and the EISA cascade. */
-#define ALCOR_PROBE_MASK (_PROBE_MASK(48) & ~0xfff000000001UL)
-
-/* Leave timer irq 0 in the mask. */
-#define RUFFIAN_PROBE_MASK (_PROBE_MASK(48) | 1UL)
-
-#if defined(CONFIG_ALPHA_GENERIC)
-# define PROBE_MASK alpha_mv.irq_probe_mask
-#elif defined(CONFIG_ALPHA_P2K)
-# define PROBE_MASK P2K_PROBE_MASK
-#elif defined(CONFIG_ALPHA_ALCOR) || defined(CONFIG_ALPHA_XLT)
-# define PROBE_MASK ALCOR_PROBE_MASK
-#elif defined(CONFIG_ALPHA_RUFFIAN)
-# define PROBE_MASK RUFFIAN_PROBE_MASK
-#else
-# define PROBE_MASK _PROBE_MASK(NR_IRQS)
-#endif
-
-
static __inline__ int irq_cannonicalize(int irq)
{
/*
@@ -95,9 +67,6 @@ extern void disable_irq(unsigned int);
extern void disable_irq_nosync(unsigned int);
extern void enable_irq(unsigned int);
-extern void irq_enter(int cpu, int irq);
-extern void irq_exit(int cpu, int irq);
-
struct pt_regs;
extern void (*perf_irq)(unsigned long, struct pt_regs *);
diff --git a/include/asm-alpha/machvec.h b/include/asm-alpha/machvec.h
index 14be8d58b..2c2f82bbd 100644
--- a/include/asm-alpha/machvec.h
+++ b/include/asm-alpha/machvec.h
@@ -78,11 +78,11 @@ struct alpha_machine_vector
void (*device_interrupt)(unsigned long vector, struct pt_regs *regs);
void (*machine_check)(u64 vector, u64 la, struct pt_regs *regs);
- void (*init_arch)(unsigned long *, unsigned long *);
+ void (*init_arch)(void);
void (*init_irq)(void);
void (*init_pit)(void);
void (*init_pci)(void);
- void (*kill_arch)(int, char *);
+ void (*kill_arch)(int);
u8 (*pci_swizzle)(struct pci_dev *, u8 *);
int (*pci_map_irq)(struct pci_dev *, u8, u8);
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index 64d550938..18e316751 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -160,6 +160,8 @@ ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, long cpu)
current->thread.ptbr
= ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
+
+ __reload_thread(&current->thread);
}
__EXTERN_INLINE void
diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h
index 8011c5859..17c61130a 100644
--- a/include/asm-alpha/page.h
+++ b/include/asm-alpha/page.h
@@ -19,7 +19,7 @@
* results in clearer kernel profiles as we see _who_ is
* doing page clearing or copying.
*/
-static inline void clear_page(unsigned long page)
+static inline void clear_page(void * page)
{
unsigned long count = PAGE_SIZE/64;
unsigned long *ptr = (unsigned long *)page;
@@ -38,7 +38,7 @@ static inline void clear_page(unsigned long page)
} while (count);
}
-static inline void copy_page(unsigned long _to, unsigned long _from)
+static inline void copy_page(void * _to, void * _from)
{
unsigned long count = PAGE_SIZE/64;
unsigned long *to = (unsigned long *)_to;
diff --git a/include/asm-alpha/pgalloc.h b/include/asm-alpha/pgalloc.h
new file mode 100644
index 000000000..5ea0193be
--- /dev/null
+++ b/include/asm-alpha/pgalloc.h
@@ -0,0 +1,337 @@
+#ifndef _ALPHA_PGALLOC_H
+#define _ALPHA_PGALLOC_H
+
+#include <linux/config.h>
+
+/* Caches aren't brain-dead on the Alpha. */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_range(mm, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr) do { } while (0)
+#define flush_page_to_ram(page) do { } while (0)
+#define flush_icache_range(start, end) do { } while (0)
+
+/*
+ * Use a few helper functions to hide the ugly broken ASN
+ * numbers on early Alphas (ev4 and ev45)
+ */
+
+#ifndef __EXTERN_INLINE
+#define __EXTERN_INLINE extern inline
+#define __MMU_EXTERN_INLINE
+#endif
+
+__EXTERN_INLINE void
+ev4_flush_tlb_current(struct mm_struct *mm)
+{
+ tbiap();
+}
+
+__EXTERN_INLINE void
+ev4_flush_tlb_other(struct mm_struct *mm)
+{
+}
+
+extern void ev5_flush_tlb_current(struct mm_struct *mm);
+
+__EXTERN_INLINE void
+ev5_flush_tlb_other(struct mm_struct *mm)
+{
+ mm->context = 0;
+}
+
+/*
+ * Flush just one page in the current TLB set.
+ * We need to be very careful about the icache here, there
+ * is no way to invalidate a specific icache page..
+ */
+
+__EXTERN_INLINE void
+ev4_flush_tlb_current_page(struct mm_struct * mm,
+ struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
+}
+
+__EXTERN_INLINE void
+ev5_flush_tlb_current_page(struct mm_struct * mm,
+ struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ if (vma->vm_flags & VM_EXEC)
+ ev5_flush_tlb_current(mm);
+ else
+ tbi(2, addr);
+}
+
+
+#ifdef CONFIG_ALPHA_GENERIC
+# define flush_tlb_current alpha_mv.mv_flush_tlb_current
+# define flush_tlb_other alpha_mv.mv_flush_tlb_other
+# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
+#else
+# ifdef CONFIG_ALPHA_EV4
+# define flush_tlb_current ev4_flush_tlb_current
+# define flush_tlb_other ev4_flush_tlb_other
+# define flush_tlb_current_page ev4_flush_tlb_current_page
+# else
+# define flush_tlb_current ev5_flush_tlb_current
+# define flush_tlb_other ev5_flush_tlb_other
+# define flush_tlb_current_page ev5_flush_tlb_current_page
+# endif
+#endif
+
+#ifdef __MMU_EXTERN_INLINE
+#undef __EXTERN_INLINE
+#undef __MMU_EXTERN_INLINE
+#endif
+
+/*
+ * Flush current user mapping.
+ */
+static inline void flush_tlb(void)
+{
+ flush_tlb_current(current->mm);
+}
+
+#ifndef __SMP__
+/*
+ * Flush everything (kernel mapping may also have
+ * changed due to vmalloc/vfree)
+ */
+static inline void flush_tlb_all(void)
+{
+ tbia();
+}
+
+/*
+ * Flush a specified user mapping
+ */
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (mm != current->mm)
+ flush_tlb_other(mm);
+ else
+ flush_tlb_current(mm);
+}
+
+/*
+ * Page-granular tlb flush.
+ *
+ * do a tbisd (type = 2) normally, and a tbis (type = 3)
+ * if it is an executable mapping. We want to avoid the
+ * itlb flush, because that potentially also does a
+ * icache flush.
+ */
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct mm_struct * mm = vma->vm_mm;
+
+ if (mm != current->mm)
+ flush_tlb_other(mm);
+ else
+ flush_tlb_current_page(mm, vma, addr);
+}
+
+/*
+ * Flush a specified range of user mapping: on the
+ * Alpha we flush the whole user tlb.
+ */
+static inline void flush_tlb_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ flush_tlb_mm(mm);
+}
+
+#else /* __SMP__ */
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long);
+
+#endif /* __SMP__ */
+
+/*
+ * Allocate and free page tables. The xxx_kernel() versions are
+ * used to allocate a kernel page table - this turns on ASN bits
+ * if any.
+ */
+#ifndef __SMP__
+extern struct pgtable_cache_struct {
+ unsigned long *pgd_cache;
+ unsigned long *pte_cache;
+ unsigned long pgtable_cache_sz;
+} quicklists;
+#else
+#include <asm/smp.h>
+#define quicklists cpu_data[smp_processor_id()]
+#endif
+#define pgd_quicklist (quicklists.pgd_cache)
+#define pmd_quicklist ((unsigned long *)0)
+#define pte_quicklist (quicklists.pte_cache)
+#define pgtable_cache_size (quicklists.pgtable_cache_sz)
+
+extern __inline__ pgd_t *get_pgd_slow(void)
+{
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;
+
+ if (ret) {
+ init = pgd_offset(&init_mm, 0UL);
+ memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+
+ pgd_val(ret[PTRS_PER_PGD])
+ = pte_val(mk_pte(mem_map + MAP_NR(ret), PAGE_KERNEL));
+ }
+ return ret;
+}
+
+extern __inline__ pgd_t *get_pgd_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = pgd_quicklist) != NULL) {
+ pgd_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ } else
+ ret = (unsigned long *)get_pgd_slow();
+ return (pgd_t *)ret;
+}
+
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
+{
+ *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pgd_slow(pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+
+extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long address_premasked);
+
+extern __inline__ pmd_t *get_pmd_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ }
+ return (pmd_t *)ret;
+}
+
+extern __inline__ void free_pmd_fast(pmd_t *pmd)
+{
+ *(unsigned long *)pmd = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pmd;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pmd_slow(pmd_t *pmd)
+{
+ free_page((unsigned long)pmd);
+}
+
+extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
+
+extern __inline__ pte_t *get_pte_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ }
+ return (pte_t *)ret;
+}
+
+extern __inline__ void free_pte_fast(pte_t *pte)
+{
+ *(unsigned long *)pte = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pte;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pte_slow(pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+extern void __bad_pte(pmd_t *pmd);
+extern void __bad_pmd(pgd_t *pgd);
+
+#define pte_free_kernel(pte) free_pte_fast(pte)
+#define pte_free(pte) free_pte_fast(pte)
+#define pmd_free_kernel(pmd) free_pmd_fast(pmd)
+#define pmd_free(pmd) free_pmd_fast(pmd)
+#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_alloc() get_pgd_fast()
+
+extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
+{
+ address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+ if (pmd_none(*pmd)) {
+ pte_t *page = get_pte_fast();
+
+ if (!page)
+ return get_pte_slow(pmd, address);
+ pmd_set(pmd, page);
+ return page + address;
+ }
+ if (pmd_bad(*pmd)) {
+ __bad_pte(pmd);
+ return NULL;
+ }
+ return (pte_t *) pmd_page(*pmd) + address;
+}
+
+extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
+{
+ address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+ if (pgd_none(*pgd)) {
+ pmd_t *page = get_pmd_fast();
+
+ if (!page)
+ return get_pmd_slow(pgd, address);
+ pgd_set(pgd, page);
+ return page + address;
+ }
+ if (pgd_bad(*pgd)) {
+ __bad_pmd(pgd);
+ return NULL;
+ }
+ return (pmd_t *) pgd_page(*pgd) + address;
+}
+
+#define pte_alloc_kernel pte_alloc
+#define pmd_alloc_kernel pmd_alloc
+
+extern int do_check_pgt_cache(int, int);
+
+extern inline void set_pgdir(unsigned long address, pgd_t entry)
+{
+ struct task_struct * p;
+ pgd_t *pgd;
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (!p->mm)
+ continue;
+ *pgd_offset(p->mm,address) = entry;
+ }
+ read_unlock(&tasklist_lock);
+ for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+ pgd[(address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)] = entry;
+}
+
+#endif /* _ALPHA_PGALLOC_H */
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index cc59d36df..a627d50ba 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -9,165 +9,11 @@
* in <asm/page.h> (currently 8192).
*/
#include <linux/config.h>
-#include <linux/spinlock.h> /* For the task lock */
-#include <asm/system.h>
+#include <asm/page.h>
#include <asm/processor.h> /* For TASK_SIZE */
-#include <asm/mmu_context.h>
#include <asm/machvec.h>
-
-/* Caches aren't brain-dead on the Alpha. */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_range(mm, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr) do { } while (0)
-#define flush_page_to_ram(page) do { } while (0)
-#define flush_icache_range(start, end) do { } while (0)
-
-/*
- * Use a few helper functions to hide the ugly broken ASN
- * numbers on early Alphas (ev4 and ev45)
- */
-
-#ifndef __EXTERN_INLINE
-#define __EXTERN_INLINE extern inline
-#define __MMU_EXTERN_INLINE
-#endif
-
-__EXTERN_INLINE void
-ev4_flush_tlb_current(struct mm_struct *mm)
-{
- tbiap();
-}
-
-__EXTERN_INLINE void
-ev4_flush_tlb_other(struct mm_struct *mm)
-{
-}
-
-extern void ev5_flush_tlb_current(struct mm_struct *mm);
-
-__EXTERN_INLINE void
-ev5_flush_tlb_other(struct mm_struct *mm)
-{
- mm->context = 0;
-}
-
-/*
- * Flush just one page in the current TLB set.
- * We need to be very careful about the icache here, there
- * is no way to invalidate a specific icache page..
- */
-
-__EXTERN_INLINE void
-ev4_flush_tlb_current_page(struct mm_struct * mm,
- struct vm_area_struct *vma,
- unsigned long addr)
-{
- tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
-}
-
-__EXTERN_INLINE void
-ev5_flush_tlb_current_page(struct mm_struct * mm,
- struct vm_area_struct *vma,
- unsigned long addr)
-{
- if (vma->vm_flags & VM_EXEC)
- ev5_flush_tlb_current(mm);
- else
- tbi(2, addr);
-}
-
-
-#ifdef CONFIG_ALPHA_GENERIC
-# define flush_tlb_current alpha_mv.mv_flush_tlb_current
-# define flush_tlb_other alpha_mv.mv_flush_tlb_other
-# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
-#else
-# ifdef CONFIG_ALPHA_EV4
-# define flush_tlb_current ev4_flush_tlb_current
-# define flush_tlb_other ev4_flush_tlb_other
-# define flush_tlb_current_page ev4_flush_tlb_current_page
-# else
-# define flush_tlb_current ev5_flush_tlb_current
-# define flush_tlb_other ev5_flush_tlb_other
-# define flush_tlb_current_page ev5_flush_tlb_current_page
-# endif
-#endif
-
-#ifdef __MMU_EXTERN_INLINE
-#undef __EXTERN_INLINE
-#undef __MMU_EXTERN_INLINE
-#endif
-
-/*
- * Flush current user mapping.
- */
-static inline void flush_tlb(void)
-{
- flush_tlb_current(current->mm);
-}
-
-#ifndef __SMP__
-/*
- * Flush everything (kernel mapping may also have
- * changed due to vmalloc/vfree)
- */
-static inline void flush_tlb_all(void)
-{
- tbia();
-}
-
-/*
- * Flush a specified user mapping
- */
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
- if (mm != current->mm)
- flush_tlb_other(mm);
- else
- flush_tlb_current(mm);
-}
-
-/*
- * Page-granular tlb flush.
- *
- * do a tbisd (type = 2) normally, and a tbis (type = 3)
- * if it is an executable mapping. We want to avoid the
- * itlb flush, because that potentially also does a
- * icache flush.
- */
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
-{
- struct mm_struct * mm = vma->vm_mm;
-
- if (mm != current->mm)
- flush_tlb_other(mm);
- else
- flush_tlb_current_page(mm, vma, addr);
-}
-
-/*
- * Flush a specified range of user mapping: on the
- * Alpha we flush the whole user tlb.
- */
-static inline void flush_tlb_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- flush_tlb_mm(mm);
-}
-
-#else /* __SMP__ */
-
-extern void flush_tlb_all(void);
-extern void flush_tlb_mm(struct mm_struct *);
-extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long);
-
-#endif /* __SMP__ */
-
/* Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following
* hook is made available.
@@ -293,7 +139,7 @@ extern unsigned long __zero_page(void);
#define BAD_PAGETABLE __bad_pagetable()
#define BAD_PAGE __bad_page()
-#define ZERO_PAGE(vaddr) (PAGE_OFFSET+0x30A000)
+#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(ZERO_PGE))
/* number of bits that fit into a memory pointer */
#define BITS_PER_PTR (8*sizeof(unsigned long))
@@ -330,8 +176,14 @@ extern unsigned long __zero_page(void);
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
-{ pte_t pte; pte_val(pte) = ((page-PAGE_OFFSET) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
+#define mk_pte(page, pgprot) \
+({ \
+ pte_t pte; \
+ \
+ pte_val(pte) = ((unsigned long)(page - mem_map) << 32) | \
+ pgprot_val(pgprot); \
+ pte; \
+})
extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpage) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
@@ -345,8 +197,8 @@ extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
-extern inline unsigned long pte_page(pte_t pte)
-{ return PAGE_OFFSET + ((pte_val(pte) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
+#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> 32)))
+#define pte_page(x) (mem_map+pte_pagenr(x))
extern inline unsigned long pmd_page(pmd_t pmd)
{ return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
@@ -368,6 +220,8 @@ extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _P
extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; }
extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
+#define page_address(page) ((page)->virtual)
+
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
@@ -395,10 +249,8 @@ extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; retu
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* to find an entry in a page-table-directory. */
-extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
-{
- return mm->pgd + ((address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1));
-}
+#define __pgd_offset(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define pgd_offset(mm, address) ((mm)->pgd+__pgd_offset(address))
/* Find an entry in the second-level page table.. */
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
@@ -412,186 +264,6 @@ extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
}
-/*
- * Allocate and free page tables. The xxx_kernel() versions are
- * used to allocate a kernel page table - this turns on ASN bits
- * if any.
- */
-#ifndef __SMP__
-extern struct pgtable_cache_struct {
- unsigned long *pgd_cache;
- unsigned long *pte_cache;
- unsigned long pgtable_cache_sz;
-} quicklists;
-#else
-#include <asm/smp.h>
-#define quicklists cpu_data[smp_processor_id()]
-#endif
-#define pgd_quicklist (quicklists.pgd_cache)
-#define pmd_quicklist ((unsigned long *)0)
-#define pte_quicklist (quicklists.pte_cache)
-#define pgtable_cache_size (quicklists.pgtable_cache_sz)
-
-extern __inline__ pgd_t *get_pgd_slow(void)
-{
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;
-
- if (ret) {
- init = pgd_offset(&init_mm, 0);
- memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-
- pgd_val(ret[PTRS_PER_PGD])
- = pte_val(mk_pte((unsigned long)ret, PAGE_KERNEL));
- }
- return ret;
-}
-
-extern __inline__ pgd_t *get_pgd_fast(void)
-{
- unsigned long *ret;
-
- if((ret = pgd_quicklist) != NULL) {
- pgd_quicklist = (unsigned long *)(*ret);
- ret[0] = ret[1];
- pgtable_cache_size--;
- } else
- ret = (unsigned long *)get_pgd_slow();
- return (pgd_t *)ret;
-}
-
-extern __inline__ void free_pgd_fast(pgd_t *pgd)
-{
- *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
- pgd_quicklist = (unsigned long *) pgd;
- pgtable_cache_size++;
-}
-
-extern __inline__ void free_pgd_slow(pgd_t *pgd)
-{
- free_page((unsigned long)pgd);
-}
-
-extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long address_premasked);
-
-extern __inline__ pmd_t *get_pmd_fast(void)
-{
- unsigned long *ret;
-
- if((ret = (unsigned long *)pte_quicklist) != NULL) {
- pte_quicklist = (unsigned long *)(*ret);
- ret[0] = ret[1];
- pgtable_cache_size--;
- }
- return (pmd_t *)ret;
-}
-
-extern __inline__ void free_pmd_fast(pmd_t *pmd)
-{
- *(unsigned long *)pmd = (unsigned long) pte_quicklist;
- pte_quicklist = (unsigned long *) pmd;
- pgtable_cache_size++;
-}
-
-extern __inline__ void free_pmd_slow(pmd_t *pmd)
-{
- free_page((unsigned long)pmd);
-}
-
-extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
-
-extern __inline__ pte_t *get_pte_fast(void)
-{
- unsigned long *ret;
-
- if((ret = (unsigned long *)pte_quicklist) != NULL) {
- pte_quicklist = (unsigned long *)(*ret);
- ret[0] = ret[1];
- pgtable_cache_size--;
- }
- return (pte_t *)ret;
-}
-
-extern __inline__ void free_pte_fast(pte_t *pte)
-{
- *(unsigned long *)pte = (unsigned long) pte_quicklist;
- pte_quicklist = (unsigned long *) pte;
- pgtable_cache_size++;
-}
-
-extern __inline__ void free_pte_slow(pte_t *pte)
-{
- free_page((unsigned long)pte);
-}
-
-extern void __bad_pte(pmd_t *pmd);
-extern void __bad_pmd(pgd_t *pgd);
-
-#define pte_free_kernel(pte) free_pte_fast(pte)
-#define pte_free(pte) free_pte_fast(pte)
-#define pmd_free_kernel(pmd) free_pmd_fast(pmd)
-#define pmd_free(pmd) free_pmd_fast(pmd)
-#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc() get_pgd_fast()
-
-extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
-{
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
- if (pmd_none(*pmd)) {
- pte_t *page = get_pte_fast();
-
- if (!page)
- return get_pte_slow(pmd, address);
- pmd_set(pmd, page);
- return page + address;
- }
- if (pmd_bad(*pmd)) {
- __bad_pte(pmd);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + address;
-}
-
-extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
-{
- address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
- if (pgd_none(*pgd)) {
- pmd_t *page = get_pmd_fast();
-
- if (!page)
- return get_pmd_slow(pgd, address);
- pgd_set(pgd, page);
- return page + address;
- }
- if (pgd_bad(*pgd)) {
- __bad_pmd(pgd);
- return NULL;
- }
- return (pmd_t *) pgd_page(*pgd) + address;
-}
-
-#define pte_alloc_kernel pte_alloc
-#define pmd_alloc_kernel pmd_alloc
-
-extern int do_check_pgt_cache(int, int);
-
-extern inline void set_pgdir(unsigned long address, pgd_t entry)
-{
- struct task_struct * p;
- pgd_t *pgd;
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- if (!p->mm)
- continue;
- *pgd_offset(p->mm,address) = entry;
- }
- read_unlock(&tasklist_lock);
- for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
- pgd[(address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)] = entry;
-}
-
extern pgd_t swapper_pg_dir[1024];
/*
@@ -610,9 +282,11 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma,
extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
-#define SWP_TYPE(entry) (((entry) >> 32) & 0xff)
-#define SWP_OFFSET(entry) ((entry) >> 40)
-#define SWP_ENTRY(type,offset) pte_val(mk_swap_pte((type),(offset)))
+#define SWP_TYPE(x) (((x).val >> 32) & 0xff)
+#define SWP_OFFSET(x) ((x).val >> 40)
+#define SWP_ENTRY(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
+#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define swp_entry_to_pte(x) ((pte_t) { (x).val })
#define module_map vmalloc
#define module_unmap vfree
@@ -624,4 +298,11 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define io_remap_page_range(start, busaddr, size, prot) \
remap_page_range(start, virt_to_phys(__ioremap(busaddr)), size, prot)
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
+
#endif /* _ALPHA_PGTABLE_H */
diff --git a/include/asm-alpha/posix_types.h b/include/asm-alpha/posix_types.h
index 50967ef96..357a44704 100644
--- a/include/asm-alpha/posix_types.h
+++ b/include/asm-alpha/posix_types.h
@@ -12,6 +12,7 @@ typedef unsigned int __kernel_ino_t;
typedef unsigned int __kernel_mode_t;
typedef unsigned int __kernel_nlink_t;
typedef long __kernel_off_t;
+typedef long __kernel_loff_t;
typedef int __kernel_pid_t;
typedef int __kernel_ipc_pid_t;
typedef unsigned int __kernel_uid_t;
@@ -26,10 +27,6 @@ typedef int __kernel_daddr_t;
typedef char * __kernel_caddr_t;
typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
-#ifdef __GNUC__
-typedef long long __kernel_loff_t;
-#endif
-
typedef struct {
int val[2];
} __kernel_fsid_t;
diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h
index 9f55ea0c3..092f96fab 100644
--- a/include/asm-alpha/processor.h
+++ b/include/asm-alpha/processor.h
@@ -127,13 +127,18 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
#define forget_segments() do { } while (0)
unsigned long get_wchan(struct task_struct *p);
-/*
-* See arch/alpha/kernel/ptrace.c for details.
-*/
-#define PT_REG(reg) (PAGE_SIZE - sizeof(struct pt_regs) \
- + (long)&((struct pt_regs *)0)->reg)
+
+/* See arch/alpha/kernel/ptrace.c for details. */
+#define PT_REG(reg) (PAGE_SIZE*2 - sizeof(struct pt_regs) \
+ + (long)&((struct pt_regs *)0)->reg)
+
+#define SW_REG(reg) (PAGE_SIZE*2 - sizeof(struct pt_regs) \
+ - sizeof(struct switch_stack) \
+ + (long)&((struct switch_stack *)0)->reg)
+
#define KSTK_EIP(tsk) \
- (*(unsigned long *)(PT_REG(pc) + PAGE_SIZE + (unsigned long)(tsk)))
+ (*(unsigned long *)(PT_REG(pc) + (unsigned long)(tsk)))
+
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
/* NOTE: The task struct and the stack go together! */
diff --git a/include/asm-alpha/resource.h b/include/asm-alpha/resource.h
index 234cbfa2f..0f2ddd20c 100644
--- a/include/asm-alpha/resource.h
+++ b/include/asm-alpha/resource.h
@@ -18,6 +18,13 @@
#define RLIM_NLIMITS 10
+/*
+ * SuS says limits have to be unsigned. Fine, it's unsigned, but
+ * we retain the old value for compatibility, especially with DU.
+ * When you run into the 2^63 barrier, you call me.
+ */
+#define RLIM_INFINITY 0x7ffffffffffffffful
+
#ifdef __KERNEL__
#define INIT_RLIMITS \
diff --git a/include/asm-alpha/sfp-machine.h b/include/asm-alpha/sfp-machine.h
new file mode 100644
index 000000000..8adc0e74c
--- /dev/null
+++ b/include/asm-alpha/sfp-machine.h
@@ -0,0 +1,84 @@
+/* Machine-dependent software floating-point definitions.
+ Alpha kernel version.
+ Copyright (C) 1997,1998,1999 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson (rth@cygnus.com),
+ Jakub Jelinek (jakub@redhat.com) and
+ David S. Miller (davem@redhat.com).
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If
+ not, write to the Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+#ifndef _SFP_MACHINE_H
+#define _SFP_MACHINE_H
+
+#define _FP_W_TYPE_SIZE 64
+#define _FP_W_TYPE unsigned long
+#define _FP_WS_TYPE signed long
+#define _FP_I_TYPE long
+
+#define _FP_MUL_MEAT_S(R,X,Y) \
+ _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S,R,X,Y)
+#define _FP_MUL_MEAT_D(R,X,Y) \
+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_Q(R,X,Y) \
+ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
+
+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
+#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv(D,R,X,Y)
+#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y)
+
+#define _FP_NANFRAC_S _FP_QNANBIT_S
+#define _FP_NANFRAC_D _FP_QNANBIT_D
+#define _FP_NANFRAC_Q _FP_QNANBIT_Q
+#define _FP_NANSIGN_S 1
+#define _FP_NANSIGN_D 1
+#define _FP_NANSIGN_Q 1
+
+#define _FP_KEEPNANFRACP 1
+
+/* Alpha Architecture Handbook, 4.7.10.4 sais that
+ * we should prefer any type of NaN in Fb, then Fa.
+ */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
+ do { \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R,X); \
+ R##_c = FP_CLS_NAN; \
+ } while (0)
+
+/* Obtain the current rounding mode. */
+#define FP_ROUNDMODE mode
+#define FP_RND_NEAREST (FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT)
+#define FP_RND_ZERO (FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT)
+#define FP_RND_PINF (FPCR_DYN_PLUS >> FPCR_DYN_SHIFT)
+#define FP_RND_MINF (FPCR_DYN_MINUS >> FPCR_DYN_SHIFT)
+
+/* Exception flags. */
+#define FP_EX_INVALID IEEE_TRAP_ENABLE_INV
+#define FP_EX_OVERFLOW IEEE_TRAP_ENABLE_OVF
+#define FP_EX_UNDERFLOW IEEE_TRAP_ENABLE_UNF
+#define FP_EX_DIVZERO IEEE_TRAP_ENABLE_DZE
+#define FP_EX_INEXACT IEEE_TRAP_ENABLE_INE
+#define FP_EX_DENORM IEEE_TRAP_ENABLE_DNO
+
+#define FP_DENORM_ZERO (fpcw & IEEE_MAP_DMZ)
+
+#define FP_HANDLE_EXCEPTIONS return _fex
+
+/* We write the results always */
+#define FP_INHIBIT_RESULTS 0
+
+#endif
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index 880f0f4bb..d298c51db 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -86,6 +86,30 @@ struct el_common_EV5_uncorrectable_mcheck {
unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/
};
+struct el_common_EV6_mcheck {
+ unsigned int FrameSize; /* Bytes, including this field */
+ unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */
+ unsigned int CpuOffset; /* Offset to CPU-specific info */
+ unsigned int SystemOffset; /* Offset to system-specific info */
+ unsigned int MCHK_Code;
+ unsigned int MCHK_Frame_Rev;
+ unsigned long I_STAT; /* EV6 Internal Processor Registers */
+ unsigned long DC_STAT; /* (See the 21264 Spec) */
+ unsigned long C_ADDR;
+ unsigned long DC1_SYNDROME;
+ unsigned long DC0_SYNDROME;
+ unsigned long C_STAT;
+ unsigned long C_STS;
+ unsigned long RESERVED0;
+ unsigned long EXC_ADDR;
+ unsigned long IER_CM;
+ unsigned long ISUM;
+ unsigned long MM_STAT;
+ unsigned long PAL_BASE;
+ unsigned long I_CTL;
+ unsigned long PCTX;
+};
+
extern void halt(void) __attribute__((noreturn));
#define prepare_to_switch() do { } while(0)
@@ -293,12 +317,11 @@ extern void __global_restore_flags(unsigned long flags);
#define tbia() __tbi(-2, /* no second argument */)
/*
- * Give prototypes to shut up gcc.
+ * Atomic exchange.
*/
-extern __inline__ unsigned long xchg_u32(volatile int *m, unsigned long val);
-extern __inline__ unsigned long xchg_u64(volatile long *m, unsigned long val);
-extern __inline__ unsigned long xchg_u32(volatile int *m, unsigned long val)
+extern __inline__ unsigned long
+__xchg_u32(volatile int *m, unsigned long val)
{
unsigned long dummy;
@@ -317,7 +340,8 @@ extern __inline__ unsigned long xchg_u32(volatile int *m, unsigned long val)
return val;
}
-extern __inline__ unsigned long xchg_u64(volatile long * m, unsigned long val)
+extern __inline__ unsigned long
+__xchg_u64(volatile long *m, unsigned long val)
{
unsigned long dummy;
@@ -336,33 +360,109 @@ extern __inline__ unsigned long xchg_u64(volatile long * m, unsigned long val)
return val;
}
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid xchg().
- *
- * This only works if the compiler isn't horribly bad at optimizing.
- * gcc-2.5.8 reportedly can't handle this, but as that doesn't work
- * too well on the alpha anyway..
- */
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid xchg(). */
extern void __xchg_called_with_bad_pointer(void);
static __inline__ unsigned long
-__xchg(unsigned long x, volatile void * ptr, int size)
+__xchg(volatile void *ptr, unsigned long x, int size)
{
switch (size) {
case 4:
- return xchg_u32(ptr, x);
+ return __xchg_u32(ptr, x);
case 8:
- return xchg_u64(ptr, x);
+ return __xchg_u64(ptr, x);
}
__xchg_called_with_bad_pointer();
return x;
}
-#define xchg(ptr,x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define xchg(ptr,x) \
+ ({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+ })
+
#define tas(ptr) (xchg((ptr),1))
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+extern __inline__ unsigned long
+__cmpxchg_u32(volatile int *m, int old, int new)
+{
+ unsigned long prev, cmp;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%2\n"
+ " cmpeq %0,%3,%1\n"
+ " beq %1,2f\n"
+ " mov %4,%1\n"
+ " stl_c %1,%2\n"
+ " beq %1,3f\n"
+ "2: mb\n"
+ ".section .text2,\"ax\"\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r"(prev), "=&r"(cmp), "=m"(*m)
+ : "r"((long) old), "r"(new), "m"(*m));
+
+ return prev;
+}
+
+extern __inline__ unsigned long
+__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
+{
+ unsigned long prev, cmp;
+
+ __asm__ __volatile__(
+ "1: ldq_l %0,%2\n"
+ " cmpeq %0,%3,%1\n"
+ " beq %1,2f\n"
+ " mov %4,%1\n"
+ " stq_c %1,%2\n"
+ " beq %1,3f\n"
+ "2: mb\n"
+ ".section .text2,\"ax\"\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r"(prev), "=&r"(cmp), "=m"(*m)
+ : "r"((long) old), "r"(new), "m"(*m));
+
+ return prev;
+}
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+ case 8:
+ return __cmpxchg_u64(ptr, old, new);
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr,o,n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/include/asm-alpha/vga.h b/include/asm-alpha/vga.h
index 17e164742..44b6abcc0 100644
--- a/include/asm-alpha/vga.h
+++ b/include/asm-alpha/vga.h
@@ -11,6 +11,7 @@
#define VT_BUF_HAVE_RW
#define VT_BUF_HAVE_MEMSETW
+#define VT_BUF_HAVE_MEMCPYW
#define VT_BUF_HAVE_MEMCPYF
extern inline void scr_writew(u16 val, u16 *addr)
@@ -37,15 +38,10 @@ extern inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
memsetw(s, c, count);
}
-extern inline void scr_memcpyw_from(u16 *d, const u16 *s, unsigned int count)
-{
- memcpy_fromio(d, s, count);
-}
-
-extern inline void scr_memcpyw_to(u16 *d, const u16 *s, unsigned int count)
-{
- memcpy_toio(d, s, count);
-}
+/* Do not trust that the usage will be correct; analyze the arguments. */
+extern void scr_memcpyw(u16 *d, const u16 *s, unsigned int count);
+#define scr_memcpyw_from scr_memcpyw
+#define scr_memcpyw_to scr_memcpyw
/* ??? These are currently only used for downloading character sets. As
such, they don't need memory barriers. Is this all they are intended