summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/atomic.h43
-rw-r--r--include/asm-alpha/bitops.h153
-rw-r--r--include/asm-alpha/core_irongate.h275
-rw-r--r--include/asm-alpha/elf.h2
-rw-r--r--include/asm-alpha/fcntl.h9
-rw-r--r--include/asm-alpha/resource.h4
-rw-r--r--include/asm-alpha/semaphore-helper.h2
-rw-r--r--include/asm-alpha/spinlock.h26
-rw-r--r--include/asm-alpha/system.h58
-rw-r--r--include/asm-alpha/termios.h1
-rw-r--r--include/asm-alpha/uaccess.h28
11 files changed, 247 insertions, 354 deletions
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index cc8b6f278..4e8d0c410 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -1,8 +1,6 @@
#ifndef _ALPHA_ATOMIC_H
#define _ALPHA_ATOMIC_H
-#include <linux/config.h>
-
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc...
@@ -11,11 +9,13 @@
* than regular operations.
*/
-#ifdef CONFIG_SMP
+
+/*
+ * Counter is volatile to make sure gcc doesn't try to be clever
+ * and move things around on us. We need to use _exactly_ the address
+ * the user gave us, not some alias that contains the same information.
+ */
typedef struct { volatile int counter; } atomic_t;
-#else
-typedef struct { int counter; } atomic_t;
-#endif
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
@@ -23,19 +23,12 @@ typedef struct { int counter; } atomic_t;
#define atomic_set(v,i) ((v)->counter = (i))
/*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
- * not some alias that contains the same information.
- */
-#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
-
-/*
* To get proper branch prediction for the main line, we must branch
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
-extern __inline__ void atomic_add(int i, atomic_t * v)
+static __inline__ void atomic_add(int i, atomic_t * v)
{
unsigned long temp;
__asm__ __volatile__(
@@ -46,11 +39,11 @@ extern __inline__ void atomic_add(int i, atomic_t * v)
".subsection 2\n"
"2: br 1b\n"
".previous"
- :"=&r" (temp), "=m" (__atomic_fool_gcc(v))
- :"Ir" (i), "m" (__atomic_fool_gcc(v)));
+ :"=&r" (temp), "=m" (v->counter)
+ :"Ir" (i), "m" (v->counter));
}
-extern __inline__ void atomic_sub(int i, atomic_t * v)
+static __inline__ void atomic_sub(int i, atomic_t * v)
{
unsigned long temp;
__asm__ __volatile__(
@@ -61,14 +54,14 @@ extern __inline__ void atomic_sub(int i, atomic_t * v)
".subsection 2\n"
"2: br 1b\n"
".previous"
- :"=&r" (temp), "=m" (__atomic_fool_gcc(v))
- :"Ir" (i), "m" (__atomic_fool_gcc(v)));
+ :"=&r" (temp), "=m" (v->counter)
+ :"Ir" (i), "m" (v->counter));
}
/*
* Same as above, but return the result value
*/
-extern __inline__ long atomic_add_return(int i, atomic_t * v)
+static __inline__ long atomic_add_return(int i, atomic_t * v)
{
long temp, result;
__asm__ __volatile__(
@@ -81,12 +74,12 @@ extern __inline__ long atomic_add_return(int i, atomic_t * v)
".subsection 2\n"
"2: br 1b\n"
".previous"
- :"=&r" (temp), "=m" (__atomic_fool_gcc(v)), "=&r" (result)
- :"Ir" (i), "m" (__atomic_fool_gcc(v)));
+ :"=&r" (temp), "=m" (v->counter), "=&r" (result)
+ :"Ir" (i), "m" (v->counter) : "memory");
return result;
}
-extern __inline__ long atomic_sub_return(int i, atomic_t * v)
+static __inline__ long atomic_sub_return(int i, atomic_t * v)
{
long temp, result;
__asm__ __volatile__(
@@ -99,8 +92,8 @@ extern __inline__ long atomic_sub_return(int i, atomic_t * v)
".subsection 2\n"
"2: br 1b\n"
".previous"
- :"=&r" (temp), "=m" (__atomic_fool_gcc(v)), "=&r" (result)
- :"Ir" (i), "m" (__atomic_fool_gcc(v)));
+ :"=&r" (temp), "=m" (v->counter), "=&r" (result)
+ :"Ir" (i), "m" (v->counter) : "memory");
return result;
}
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index 69bfdcaf1..649abd02d 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -1,6 +1,9 @@
#ifndef _ALPHA_BITOPS_H
#define _ALPHA_BITOPS_H
+#include <linux/config.h>
+#include <linux/kernel.h>
+
/*
* Copyright 1994, Linus Torvalds.
*/
@@ -17,14 +20,19 @@
* bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
*/
+#define BITOPS_NO_BRANCH
+
extern __inline__ void set_bit(unsigned long nr, volatile void * addr)
{
+#ifndef BITOPS_NO_BRANCH
unsigned long oldbit;
+#endif
unsigned long temp;
unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
+#ifndef BITOPS_NO_BRANCH
__asm__ __volatile__(
- "1: ldl_l %0,%1\n"
+ "1: ldl_l %0,%4\n"
" and %0,%3,%2\n"
" bne %2,2f\n"
" xor %0,%3,%0\n"
@@ -36,16 +44,57 @@ extern __inline__ void set_bit(unsigned long nr, volatile void * addr)
".previous"
:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
:"Ir" (1UL << (nr & 31)), "m" (*m));
+#else
+ __asm__ __volatile__(
+ "1: ldl_l %0,%3\n"
+ " bis %0,%2,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ :"=&r" (temp), "=m" (*m)
+ :"Ir" (1UL << (nr & 31)), "m" (*m));
+#endif
}
+/*
+ * WARNING: non atomic version.
+ */
+extern __inline__ void __set_bit(unsigned long nr, volatile void * addr)
+{
+ unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
+ /*
+ * Asm and C produces the same thing so let
+ * the compiler to do its good work.
+ */
+#if 0
+ int tmp;
+
+ __asm__ __volatile__(
+ "ldl %0,%3\n\t"
+ "bis %0,%2,%0\n\t"
+ "stl %0,%1"
+ : "=&r" (tmp), "=m" (*m)
+ : "Ir" (1UL << (nr & 31)), "m" (*m));
+#else
+ *m |= 1UL << (nr & 31);
+#endif
+}
+
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() smp_mb()
extern __inline__ void clear_bit(unsigned long nr, volatile void * addr)
{
+#ifndef BITOPS_NO_BRANCH
unsigned long oldbit;
+#endif
unsigned long temp;
unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
+#ifndef BITOPS_NO_BRANCH
__asm__ __volatile__(
- "1: ldl_l %0,%1\n"
+ "1: ldl_l %0,%4\n"
" and %0,%3,%2\n"
" beq %2,2f\n"
" xor %0,%3,%0\n"
@@ -57,6 +106,18 @@ extern __inline__ void clear_bit(unsigned long nr, volatile void * addr)
".previous"
:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
:"Ir" (1UL << (nr & 31)), "m" (*m));
+#else
+ __asm__ __volatile__(
+ "1: ldl_l %0,%3\n"
+ " and %0,%2,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ :"=&r" (temp), "=m" (*m)
+ :"Ir" (~(1UL << (nr & 31))), "m" (*m));
+#endif
}
extern __inline__ void change_bit(unsigned long nr, volatile void * addr)
@@ -65,12 +126,12 @@ extern __inline__ void change_bit(unsigned long nr, volatile void * addr)
unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
__asm__ __volatile__(
- "1: ldl_l %0,%1\n"
+ "1: ldl_l %0,%3\n"
" xor %0,%2,%0\n"
" stl_c %0,%1\n"
- " beq %0,3f\n"
+ " beq %0,2f\n"
".subsection 2\n"
- "3: br 1b\n"
+ "2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (*m)
:"Ir" (1UL << (nr & 31)), "m" (*m));
@@ -84,18 +145,43 @@ extern __inline__ int test_and_set_bit(unsigned long nr,
unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
__asm__ __volatile__(
- "1: ldl_l %0,%1\n"
+ "1: ldl_l %0,%4\n"
" and %0,%3,%2\n"
" bne %2,2f\n"
" xor %0,%3,%0\n"
" stl_c %0,%1\n"
" beq %0,3f\n"
+#ifdef CONFIG_SMP
" mb\n"
+#endif
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
+ :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
+
+ return oldbit != 0;
+}
+
+/*
+ * WARNING: non atomic version.
+ */
+extern __inline__ int __test_and_set_bit(unsigned long nr,
+ volatile void * addr)
+{
+ unsigned long oldbit;
+ unsigned long temp;
+ unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
+
+ __asm__ __volatile__(
+ " ldl %0,%4\n"
+ " and %0,%3,%2\n"
+ " bne %2,1f\n"
+ " xor %0,%3,%0\n"
+ " stl %0,%1\n"
+ "1:\n"
+ :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
:"Ir" (1UL << (nr & 31)), "m" (*m));
return oldbit != 0;
@@ -109,18 +195,43 @@ extern __inline__ int test_and_clear_bit(unsigned long nr,
unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
__asm__ __volatile__(
- "1: ldl_l %0,%1\n"
+ "1: ldl_l %0,%4\n"
" and %0,%3,%2\n"
" beq %2,2f\n"
" xor %0,%3,%0\n"
" stl_c %0,%1\n"
" beq %0,3f\n"
+#ifdef CONFIG_SMP
" mb\n"
+#endif
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
+ :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
+
+ return oldbit != 0;
+}
+
+/*
+ * WARNING: non atomic version.
+ */
+extern __inline__ int __test_and_clear_bit(unsigned long nr,
+ volatile void * addr)
+{
+ unsigned long oldbit;
+ unsigned long temp;
+ unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
+
+ __asm__ __volatile__(
+ " ldl %0,%4\n"
+ " and %0,%3,%2\n"
+ " beq %2,1f\n"
+ " xor %0,%3,%0\n"
+ " stl %0,%1\n"
+ "1:\n"
+ :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
:"Ir" (1UL << (nr & 31)), "m" (*m));
return oldbit != 0;
@@ -134,17 +245,19 @@ extern __inline__ int test_and_change_bit(unsigned long nr,
unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
__asm__ __volatile__(
- "1: ldl_l %0,%1\n"
+ "1: ldl_l %0,%4\n"
" and %0,%3,%2\n"
" xor %0,%3,%0\n"
" stl_c %0,%1\n"
" beq %0,3f\n"
+#ifdef CONFIG_SMP
" mb\n"
+#endif
".subsection 2\n"
"3: br 1b\n"
".previous"
:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
- :"Ir" (1UL << (nr & 31)), "m" (*m));
+ :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
return oldbit != 0;
}
@@ -175,13 +288,10 @@ extern inline unsigned long ffz_b(unsigned long x)
extern inline unsigned long ffz(unsigned long word)
{
-#if 0 && defined(__alpha_cix__)
- /* Swine architects -- a year after they publish v3 of the
- handbook, in the 21264 data sheet they quietly change CIX
- to FIX and remove the spiffy counting instructions. */
+#if defined(__alpha_cix__) && defined(__alpha_fix__)
/* Whee. EV6 can calculate it directly. */
unsigned long result;
- __asm__("ctlz %1,%0" : "=r"(result) : "r"(~word));
+ __asm__("cttz %1,%0" : "=r"(result) : "r"(~word));
return result;
#else
unsigned long bits, qofs, bofs;
@@ -214,10 +324,7 @@ extern inline int ffs(int word)
* of bits set) of a N-bit word
*/
-#if 0 && defined(__alpha_cix__)
-/* Swine architects -- a year after they publish v3 of the handbook, in
- the 21264 data sheet they quietly change CIX to FIX and remove the
- spiffy counting instructions. */
+#if defined(__alpha_cix__) && defined(__alpha_fix__)
/* Whee. EV6 can calculate it directly. */
extern __inline__ unsigned long hweight64(unsigned long w)
{
@@ -285,16 +392,16 @@ found_middle:
#ifdef __KERNEL__
-#define ext2_set_bit test_and_set_bit
-#define ext2_clear_bit test_and_clear_bit
+#define ext2_set_bit __test_and_set_bit
+#define ext2_clear_bit __test_and_clear_bit
#define ext2_test_bit test_bit
#define ext2_find_first_zero_bit find_first_zero_bit
#define ext2_find_next_zero_bit find_next_zero_bit
/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr) __set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
#define minix_test_bit(nr,addr) test_bit(nr,addr)
#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
diff --git a/include/asm-alpha/core_irongate.h b/include/asm-alpha/core_irongate.h
index c97283d89..e2c9fa968 100644
--- a/include/asm-alpha/core_irongate.h
+++ b/include/asm-alpha/core_irongate.h
@@ -83,247 +83,24 @@ typedef struct {
igcsr32 agpmode; /* 0xB0 - AGP/GART mode control */
} Irongate0;
-/* Bitfield and mask register definitions */
-
-/* Device, vendor IDs - offset 0x00 */
-
-typedef union {
- igcsr32 i; /* integer value of CSR */
- struct {
- unsigned v : 16;
- unsigned d : 16;
- } r; /* structured interpretation */
-} ig_dev_vendor_t;
-
-
-/* Status, command registers - offset 0x04 */
-
-typedef union {
- igcsr32 i;
- struct {
- unsigned command;
- unsigned status;
- } s;
- struct {
- /* command register fields */
- unsigned iospc : 1; /* always reads zero */
- unsigned memspc : 1; /* PCI memory space accesses? */
- unsigned iten : 1; /* always 1: can be bus initiator */
- unsigned scmon : 1; /* always 0 special cycles not chckd */
- unsigned mwic : 1; /* always 0 - no mem write & invalid */
- unsigned vgaps : 1; /* always 0 - palette rds not special */
- unsigned per : 1; /* parity error resp: always 0 */
- unsigned step : 1; /* address/data stepping : always 0 */
- unsigned serre : 1; /* 1 = sys err output driver enable */
- unsigned fbbce : 1; /* fast back-back cycle : always 0 */
- unsigned zero1 : 6; /* must be zero */
-
- /* status register fields */
- unsigned zero2 : 4; /* must be zero */
- unsigned cl : 1; /* config space capa list: always 1 */
- unsigned pci66 : 1; /* 66 MHz PCI support - always 0 */
- unsigned udf : 1; /* user defined features - always 0 */
- unsigned fbbc : 1; /* back-back transactions - always 0 */
- unsigned ppe : 1; /* PCI parity error detected (0) */
- unsigned devsel : 2; /* DEVSEL timing (always 01) */
- unsigned sta : 1; /* signalled target abort (0) */
- unsigned rta : 1; /* recvd target abort */
- unsigned ria : 1; /* recvd initiator abort */
- unsigned serr : 1; /* SERR has been asserted */
- unsigned dpe : 1; /* DRAM parity error (0) */
- } r;
-} ig_stat_cmd_t;
-
-
-/* Revision ID, Programming interface, subclass, baseclass - offset 0x08 */
-
-typedef union {
- igcsr32 i;
- struct {
- /* revision ID */
- unsigned step : 4; /* stepping Revision ID */
- unsigned die : 4; /* die Revision ID */
- unsigned pif : 8; /* programming interface (0x00) */
- unsigned sub : 8; /* subclass code (0x00) */
- unsigned base: 8; /* baseclass code (0x06) */
- } r;
-} ig_class_t;
-
-
-/* Latency Timer, PCI Header type - offset 0x0C */
-
-typedef union {
- igcsr32 i;
- struct {
- unsigned zero1:8; /* reserved */
- unsigned lat : 8; /* latency in PCI bus clocks */
- unsigned hdr : 8; /* PCI header type */
- unsigned zero2:8; /* reserved */
- } r;
-} ig_latency_t;
-
-
-/* Base Address Register 0 - offset 0x10 */
-
-typedef union {
- igcsr32 i;
- struct {
- unsigned mem : 1; /* Reg pts to memory (always 0) */
- unsigned type: 2; /* 32 bit register = 0b00 */
- unsigned pref: 1; /* graphics mem prefetchable=1 */
- unsigned baddrl : 21; /* 32M = minimum alloc -> all zero */
- unsigned size : 6; /* size requirements for AGP */
- unsigned zero : 1; /* reserved=0 */
- } r;
-} ig_bar0_t;
-
-
-/* Base Address Register 1 - offset 0x14 */
-
-typedef union {
- igcsr32 i;
- struct {
- unsigned mem : 1; /* BAR0 maps to memory -> 0 */
- unsigned type : 2; /* BAR1 is 32-bit -> 0b00 */
- unsigned pref : 1; /* graphics mem prefetchable=1 */
- unsigned baddrl : 8; /* 4K alloc for AGP CSRs -> 0b00 */
- unsigned baddrh : 20; /* base addr of AGP CSRs A[30:11] */
- } r;
-} ig_bar1_t;
-
-
-/* Base Address Register 2 - offset 0x18 */
-
-typedef union {
- igcsr32 i;
- struct {
- unsigned io : 1; /* BAR2 maps to I/O space -> 1 */
- unsigned zero1: 1; /* reserved */
- unsigned addr : 22; /* BAR2[31:10] - PM2_BLK base */
- unsigned zero2: 8; /* reserved */
- } r;
-} ig_bar2_t;
-
-
-/* Capabilities Pointer - offset 0x34 */
-
-typedef union {
- igcsr32 i;
- struct {
- unsigned cap : 8; /* =0xA0, offset of AGP ctrl regs */
- unsigned zero: 24; /* reserved */
- } r;
-} ig_capptr_t;
-
-
-/* Base Address Chip Select Register 1,0 - offset 0x40 */
-/* Base Address Chip Select Register 3,2 - offset 0x44 */
-/* Base Address Chip Select Register 5,4 - offset 0x48 */
-
-typedef union {
-
- igcsr32 i;
- struct {
- /* lower bank */
- unsigned en0 : 1; /* memory bank enabled */
- unsigned mask0 : 6; /* Address mask for A[28:23] */
- unsigned base0 : 9; /* Bank Base Address A[31:23] */
-
- /* upper bank */
- unsigned en1 : 1; /* memory bank enabled */
- unsigned mask1 : 6; /* Address mask for A[28:23] */
- unsigned base1 : 9; /* Bank Base Address A[31:23] */
- } r;
-} ig_bacsr_t, ig_bacsr10_t, ig_bacsr32_t, ig_bacsr54_t;
-
-
-/* SDRAM Address Mapping Control Register - offset 0x50 */
-
-typedef union {
- igcsr32 i;
- struct {
- unsigned z1 : 1; /* reserved */
- unsigned bnks0: 1; /* 0->2 banks in chip select 0 */
- unsigned am0 : 1; /* row/column addressing */
- unsigned z2 : 1; /* reserved */
-
- unsigned z3 : 1; /* reserved */
- unsigned bnks1: 1; /* 0->2 banks in chip select 1 */
- unsigned am1 : 1; /* row/column addressing */
- unsigned z4 : 1; /* reserved */
-
- unsigned z5 : 1; /* reserved */
- unsigned bnks2: 1; /* 0->2 banks in chip select 2 */
- unsigned am2 : 1; /* row/column addressing */
- unsigned z6 : 1; /* reserved */
-
- unsigned z7 : 1; /* reserved */
- unsigned bnks3: 1; /* 0->2 banks in chip select 3 */
- unsigned am3 : 1; /* row/column addressing */
- unsigned z8 : 1; /* reserved */
-
- unsigned z9 : 1; /* reserved */
- unsigned bnks4: 1; /* 0->2 banks in chip select 4 */
- unsigned am4 : 1; /* row/column addressing */
- unsigned z10 : 1; /* reserved */
-
- unsigned z11 : 1; /* reserved */
- unsigned bnks5: 1; /* 0->2 banks in chip select 5 */
- unsigned am5 : 1; /* row/column addressing */
- unsigned z12 : 1; /* reserved */
-
- unsigned rsrvd: 8; /* reserved */
- } r;
-} ig_drammap_t;
-
-
-/* DRAM timing and driver strength register - offset 0x54 */
-
-typedef union {
- igcsr32 i;
- struct {
- /* DRAM timing parameters */
- unsigned trcd : 2;
- unsigned tcl : 2;
- unsigned tras: 3;
- unsigned trp : 2;
- unsigned trc : 3;
- unsigned icl: 2;
- unsigned ph : 2;
-
- /* Chipselect driver strength */
- unsigned adra : 1;
- unsigned adrb : 1;
- unsigned ctrl : 3;
- unsigned dqm : 1;
- unsigned cs : 1;
- unsigned clk: 1;
- unsigned rsrvd:8;
- } r;
-} ig_dramtm_t;
-
-
-/* DRAM Mode / Status and ECC Register - offset 0x58 */
-
-typedef union {
- igcsr32 i;
- struct {
- unsigned chipsel : 6; /* failing ECC chip select */
- unsigned zero1 : 2; /* always reads zero */
- unsigned status : 2; /* ECC Detect logic status */
- unsigned zero2 : 6; /* always reads zero */
-
- unsigned cycles : 2; /* cycles per refresh, see table */
- unsigned en : 1; /* ECC enable */
- unsigned r : 1; /* Large burst enable (=0) */
- unsigned bre : 1; /* Burst refresh enable */
- unsigned zero3 : 2; /* reserved = 0 */
- unsigned mwe : 1; /* Enable writes to DRAM mode reg */
- unsigned type : 1; /* SDRAM = 0, default */
- unsigned sdraminit : 1; /* SDRAM init - set params first! */
- unsigned zero4 : 6; /* reserved = 0 */
- } r;
-} ig_dramms_t;
+
+typedef struct {
+
+ igcsr32 dev_vendor; /* 0x00 - Device and Vendor IDs */
+ igcsr32 stat_cmd; /* 0x04 - Status and Command regs */
+ igcsr32 class; /* 0x08 - subclass, baseclass etc */
+ igcsr32 htype; /* 0x0C - header type (at 0x0E) */
+ igcsr32 rsrvd0[2]; /* 0x10-0x17 reserved */
+ igcsr32 busnos; /* 0x18 - Primary, secondary bus nos */
+ igcsr32 io_baselim_regs; /* 0x1C - IO base, IO lim, AGP status */
+ igcsr32 mem_baselim; /* 0x20 - memory base, memory lim */
+ igcsr32 pfmem_baselim; /* 0x24 - prefetchable base, lim */
+ igcsr32 rsrvd1[2]; /* 0x28-0x2F reserved */
+ igcsr32 io_baselim; /* 0x30 - IO base, IO limit */
+ igcsr32 rsrvd2[2]; /* 0x34-0x3B - reserved */
+ igcsr32 interrupt; /* 0x3C - interrupt, PCI bridge ctrl */
+
+} Irongate1;
/*
@@ -343,7 +120,21 @@ typedef union {
#define IRONGATE_IO (IDENT_ADDR | IRONGATE_BIAS | 0x1FC000000UL)
#define IRONGATE_CONF (IDENT_ADDR | IRONGATE_BIAS | 0x1FE000000UL)
-#define IRONGATE0 ((Irongate0 *) IRONGATE_CONF)
+/*
+ * PCI Configuration space accesses are formed like so:
+ *
+ * 0x1FE << 24 | : 2 2 2 2 1 1 1 1 : 1 1 1 1 1 1 0 0 : 0 0 0 0 0 0 0 0 :
+ * : 3 2 1 0 9 8 7 6 : 5 4 3 2 1 0 9 8 : 7 6 5 4 3 2 1 0 :
+ * ---bus numer--- -device-- -fun- ---register----
+ */
+
+#define IGCSR(dev,fun,reg) ( IRONGATE_CONF | \
+ ((dev)<<11) | \
+ ((fun)<<8) | \
+ (reg) )
+
+#define IRONGATE0 ((Irongate0 *) IGCSR(0, 0, 0))
+#define IRONGATE1 ((Irongate1 *) IGCSR(1, 0, 0))
/*
* Data structure for handling IRONGATE machine checks:
diff --git a/include/asm-alpha/elf.h b/include/asm-alpha/elf.h
index d031ef3c2..7d5df3a44 100644
--- a/include/asm-alpha/elf.h
+++ b/include/asm-alpha/elf.h
@@ -127,7 +127,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#ifdef __KERNEL__
#define SET_PERSONALITY(EX, IBCS2) \
- set_personality((EX).e_flags & EF_ALPHA_32BIT \
+ set_personality(((EX).e_flags & EF_ALPHA_32BIT) \
? PER_LINUX_32BIT : (IBCS2) ? PER_SVR4 : PER_LINUX)
#endif
diff --git a/include/asm-alpha/fcntl.h b/include/asm-alpha/fcntl.h
index 314e3addb..86a73667f 100644
--- a/include/asm-alpha/fcntl.h
+++ b/include/asm-alpha/fcntl.h
@@ -48,13 +48,19 @@
#define F_EXLCK 16 /* or 3 */
#define F_SHLCK 32 /* or 4 */
+#define F_INPROGRESS 16
+
/* operations for bsd flock(), also used by the kernel implementation */
#define LOCK_SH 1 /* shared lock */
#define LOCK_EX 2 /* exclusive lock */
#define LOCK_NB 4 /* or'd with one of the above to prevent
blocking */
#define LOCK_UN 8 /* remove lock */
-
+#define LOCK_MAND 32 /* This is a mandatory flock */
+#define LOCK_READ 64 /* ... Which allows concurrent read operations */
+#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
+#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
+
struct flock {
short l_type;
short l_whence;
@@ -66,5 +72,6 @@ struct flock {
#ifdef __KERNEL__
#define flock64 flock
#endif
+#define F_LINUX_SPECIFIC_BASE 1024
#endif
diff --git a/include/asm-alpha/resource.h b/include/asm-alpha/resource.h
index 0f2ddd20c..686b9558d 100644
--- a/include/asm-alpha/resource.h
+++ b/include/asm-alpha/resource.h
@@ -15,8 +15,9 @@
#define RLIMIT_AS 7 /* address space limit(?) */
#define RLIMIT_NPROC 8 /* max number of processes */
#define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */
+#define RLIMIT_LOCKS 10 /* maximum file locks held */
-#define RLIM_NLIMITS 10
+#define RLIM_NLIMITS 11
/*
* SuS says limits have to be unsigned. Fine, it's unsigned, but
@@ -39,6 +40,7 @@
{LONG_MAX, LONG_MAX}, /* RLIMIT_AS */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_NPROC */ \
{LONG_MAX, LONG_MAX}, /* RLIMIT_MEMLOCK */ \
+ {LONG_MAX, LONG_MAX}, /* RLIMIT_LOCKS */ \
}
#endif /* __KERNEL__ */
diff --git a/include/asm-alpha/semaphore-helper.h b/include/asm-alpha/semaphore-helper.h
index 2812510db..52d8fb5f4 100644
--- a/include/asm-alpha/semaphore-helper.h
+++ b/include/asm-alpha/semaphore-helper.h
@@ -37,7 +37,7 @@ waking_non_zero(struct semaphore *sem)
".subsection 2\n"
"3: br 1b\n"
".previous"
- : "=r"(ret), "=r"(tmp), "=m"(__atomic_fool_gcc(&sem->waking))
+ : "=r"(ret), "=r"(tmp), "=m"(sem->waking.counter)
: "0"(0));
return ret > 0;
diff --git a/include/asm-alpha/spinlock.h b/include/asm-alpha/spinlock.h
index c14eb0909..64e05d17b 100644
--- a/include/asm-alpha/spinlock.h
+++ b/include/asm-alpha/spinlock.h
@@ -5,8 +5,8 @@
#include <linux/kernel.h>
#include <asm/current.h>
-#define DEBUG_SPINLOCK 1
-#define DEBUG_RWLOCK 1
+#define DEBUG_SPINLOCK 0
+#define DEBUG_RWLOCK 0
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
@@ -38,9 +38,6 @@ typedef struct {
#define spin_is_locked(x) ((x)->lock != 0)
#define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); })
-typedef struct { unsigned long a[100]; } __dummy_lock_t;
-#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
-
#if DEBUG_SPINLOCK
extern void spin_unlock(spinlock_t * lock);
extern void debug_spin_lock(spinlock_t * lock, const char *, int);
@@ -83,8 +80,8 @@ static inline void spin_lock(spinlock_t * lock)
" blbs %0,2b\n"
" br 1b\n"
".previous"
- : "=r" (tmp), "=m" (__dummy_lock(lock))
- : "m"(__dummy_lock(lock)));
+ : "=r" (tmp), "=m" (lock->lock)
+ : "m"(lock->lock) : "memory");
}
#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
@@ -119,9 +116,8 @@ static inline void write_lock(rwlock_t * lock)
" bne %1,6b\n"
" br 1b\n"
".previous"
- : "=m" (__dummy_lock(lock)), "=&r" (regx)
- : "0" (__dummy_lock(lock))
- );
+ : "=m" (*(volatile int *)lock), "=&r" (regx)
+ : "0" (*(volatile int *)lock) : "memory");
}
static inline void read_lock(rwlock_t * lock)
@@ -140,9 +136,8 @@ static inline void read_lock(rwlock_t * lock)
" blbs %1,6b\n"
" br 1b\n"
".previous"
- : "=m" (__dummy_lock(lock)), "=&r" (regx)
- : "m" (__dummy_lock(lock))
- );
+ : "=m" (*(volatile int *)lock), "=&r" (regx)
+ : "m" (*(volatile int *)lock) : "memory");
}
#endif /* DEBUG_RWLOCK */
@@ -156,6 +151,7 @@ static inline void read_unlock(rwlock_t * lock)
{
long regx;
__asm__ __volatile__(
+ " mb\n"
"1: ldl_l %1,%0\n"
" addl %1,2,%1\n"
" stl_c %1,%0\n"
@@ -163,8 +159,8 @@ static inline void read_unlock(rwlock_t * lock)
".subsection 2\n"
"6: br 1b\n"
".previous"
- : "=m" (__dummy_lock(lock)), "=&r" (regx)
- : "m" (__dummy_lock(lock)));
+ : "=m" (*(volatile int *)lock), "=&r" (regx)
+ : "m" (*(volatile int *)lock) : "memory");
}
#endif /* _ALPHA_SPINLOCK_H */
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index 6328750e1..b97d0c5b6 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -137,12 +137,19 @@ __asm__ __volatile__("mb": : :"memory")
#define wmb() \
__asm__ __volatile__("wmb": : :"memory")
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#endif
+
#define set_mb(var, value) \
do { var = value; mb(); } while (0)
-#define set_rmb(var, value) \
-do { var = value; rmb(); } while (0)
-
#define set_wmb(var, value) \
do { var = value; wmb(); } while (0)
@@ -284,11 +291,11 @@ extern int __min_ipl;
#define getipl() (rdps() & 7)
#define setipl(ipl) ((void) swpipl(ipl))
-#define __cli() setipl(IPL_MAX)
-#define __sti() setipl(IPL_MIN)
+#define __cli() do { setipl(IPL_MAX); barrier(); } while(0)
+#define __sti() do { barrier(); setipl(IPL_MIN); } while(0)
#define __save_flags(flags) ((flags) = rdps())
-#define __save_and_cli(flags) ((flags) = swpipl(IPL_MAX))
-#define __restore_flags(flags) setipl(flags)
+#define __save_and_cli(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0)
+#define __restore_flags(flags) do { barrier(); setipl(flags); barrier(); } while(0)
#define local_irq_save(flags) __save_and_cli(flags)
#define local_irq_restore(flags) __restore_flags(flags)
@@ -344,6 +351,8 @@ extern void __global_restore_flags(unsigned long flags);
/*
* Atomic exchange.
+ * Since it can be used to implement critical sections
+ * it must clobber "memory" (also for interrupts in UP).
*/
extern __inline__ unsigned long
@@ -352,16 +361,18 @@ __xchg_u32(volatile int *m, unsigned long val)
unsigned long dummy;
__asm__ __volatile__(
- "1: ldl_l %0,%2\n"
+ "1: ldl_l %0,%4\n"
" bis $31,%3,%1\n"
" stl_c %1,%2\n"
" beq %1,2f\n"
+#ifdef CONFIG_SMP
" mb\n"
+#endif
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
- : "rI" (val), "m" (*m));
+ : "rI" (val), "m" (*m) : "memory");
return val;
}
@@ -372,16 +383,18 @@ __xchg_u64(volatile long *m, unsigned long val)
unsigned long dummy;
__asm__ __volatile__(
- "1: ldq_l %0,%2\n"
+ "1: ldq_l %0,%4\n"
" bis $31,%3,%1\n"
" stq_c %1,%2\n"
" beq %1,2f\n"
+#ifdef CONFIG_SMP
" mb\n"
+#endif
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
- : "rI" (val), "m" (*m));
+ : "rI" (val), "m" (*m) : "memory");
return val;
}
@@ -416,6 +429,11 @@ __xchg(volatile void *ptr, unsigned long x, int size)
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
+ *
+ * The memory barrier should be placed in SMP only when we actually
+ * make the change. If we don't change anything (so if the returned
+ * prev is equal to old) then we aren't acquiring anything new and
+ * we don't need any memory barrier as far I can tell.
*/
#define __HAVE_ARCH_CMPXCHG 1
@@ -426,18 +444,21 @@ __cmpxchg_u32(volatile int *m, int old, int new)
unsigned long prev, cmp;
__asm__ __volatile__(
- "1: ldl_l %0,%2\n"
+ "1: ldl_l %0,%5\n"
" cmpeq %0,%3,%1\n"
" beq %1,2f\n"
" mov %4,%1\n"
" stl_c %1,%2\n"
" beq %1,3f\n"
- "2: mb\n"
+#ifdef CONFIG_SMP
+ " mb\n"
+#endif
+ "2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
- : "r"((long) old), "r"(new), "m"(*m));
+ : "r"((long) old), "r"(new), "m"(*m) : "memory");
return prev;
}
@@ -448,18 +469,21 @@ __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
unsigned long prev, cmp;
__asm__ __volatile__(
- "1: ldq_l %0,%2\n"
+ "1: ldq_l %0,%5\n"
" cmpeq %0,%3,%1\n"
" beq %1,2f\n"
" mov %4,%1\n"
" stq_c %1,%2\n"
" beq %1,3f\n"
- "2: mb\n"
+#ifdef CONFIG_SMP
+ " mb\n"
+#endif
+ "2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
- : "r"((long) old), "r"(new), "m"(*m));
+ : "r"((long) old), "r"(new), "m"(*m) : "memory");
return prev;
}
diff --git a/include/asm-alpha/termios.h b/include/asm-alpha/termios.h
index 2c8b5288a..670576b84 100644
--- a/include/asm-alpha/termios.h
+++ b/include/asm-alpha/termios.h
@@ -71,6 +71,7 @@ struct termio {
#define N_SLIP 1
#define N_MOUSE 2
#define N_PPP 3
+#define N_STRIP 4
#define N_AX25 5
#define N_X25 6 /* X.25 async */
#define N_6PACK 7
diff --git a/include/asm-alpha/uaccess.h b/include/asm-alpha/uaccess.h
index 98c446942..eeb5b8540 100644
--- a/include/asm-alpha/uaccess.h
+++ b/include/asm-alpha/uaccess.h
@@ -79,24 +79,6 @@ extern inline int verify_area(int type, const void * addr, unsigned long size)
__get_user_nocheck((x),(ptr),sizeof(*(ptr)))
/*
- * The "xxx_ret" versions return constant specified in third argument, if
- * something bad happens. These macros can be optimized for the
- * case of just returning from the function xxx_ret is used.
- */
-
-#define put_user_ret(x,ptr,ret) ({ \
-if (put_user(x,ptr)) return ret; })
-
-#define get_user_ret(x,ptr,ret) ({ \
-if (get_user(x,ptr)) return ret; })
-
-#define __put_user_ret(x,ptr,ret) ({ \
-if (__put_user(x,ptr)) return ret; })
-
-#define __get_user_ret(x,ptr,ret) ({ \
-if (__get_user(x,ptr)) return ret; })
-
-/*
* The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
* encode the bits we need for resolving the exception. See the
* more extensive comments with fixup_inline_exception below for
@@ -417,16 +399,6 @@ copy_from_user(void *to, const void *from, long n)
return __copy_tofrom_user(to, from, n, from);
}
-#define copy_to_user_ret(to,from,n,retval) ({ \
-if (copy_to_user(to,from,n)) \
- return retval; \
-})
-
-#define copy_from_user_ret(to,from,n,retval) ({ \
-if (copy_from_user(to,from,n)) \
- return retval; \
-})
-
extern void __do_clear_user(void);
extern inline long