summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-23 00:40:54 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-23 00:40:54 +0000
commit529c593ece216e4aaffd36bd940cb94f1fa63129 (patch)
tree78f1c0b805f5656aa7b0417a043c5346f700a2cf /include/asm-alpha
parent0bd079751d25808d1972baee5c4eaa1db2227257 (diff)
Merge with 2.3.43. I did ignore all modifications to the qlogicisp.c
driver due to the Origin A64 hacks.
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/bitops.h16
-rw-r--r--include/asm-alpha/core_apecs.h82
-rw-r--r--include/asm-alpha/core_cia.h31
-rw-r--r--include/asm-alpha/core_irongate.h23
-rw-r--r--include/asm-alpha/core_lca.h29
-rw-r--r--include/asm-alpha/core_mcpcia.h25
-rw-r--r--include/asm-alpha/core_polaris.h21
-rw-r--r--include/asm-alpha/core_pyxis.h33
-rw-r--r--include/asm-alpha/core_t2.h20
-rw-r--r--include/asm-alpha/core_tsunami.h37
-rw-r--r--include/asm-alpha/hardirq.h2
-rw-r--r--include/asm-alpha/io.h35
-rw-r--r--include/asm-alpha/jensen.h20
-rw-r--r--include/asm-alpha/machvec.h5
-rw-r--r--include/asm-alpha/mmu_context.h4
-rw-r--r--include/asm-alpha/pci.h106
-rw-r--r--include/asm-alpha/pgalloc.h12
-rw-r--r--include/asm-alpha/scatterlist.h15
-rw-r--r--include/asm-alpha/semaphore.h299
-rw-r--r--include/asm-alpha/smp.h3
-rw-r--r--include/asm-alpha/softirq.h125
-rw-r--r--include/asm-alpha/types.h31
22 files changed, 480 insertions, 494 deletions
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index c9e7e7aee..c590d9e51 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -76,8 +76,8 @@ extern __inline__ void change_bit(unsigned long nr, volatile void * addr)
:"Ir" (1UL << (nr & 31)), "m" (*m));
}
-extern __inline__ unsigned long test_and_set_bit(unsigned long nr,
- volatile void * addr)
+extern __inline__ int test_and_set_bit(unsigned long nr,
+ volatile void * addr)
{
unsigned long oldbit;
unsigned long temp;
@@ -101,8 +101,8 @@ extern __inline__ unsigned long test_and_set_bit(unsigned long nr,
return oldbit != 0;
}
-extern __inline__ unsigned long test_and_clear_bit(unsigned long nr,
- volatile void * addr)
+extern __inline__ int test_and_clear_bit(unsigned long nr,
+ volatile void * addr)
{
unsigned long oldbit;
unsigned long temp;
@@ -126,8 +126,8 @@ extern __inline__ unsigned long test_and_clear_bit(unsigned long nr,
return oldbit != 0;
}
-extern __inline__ unsigned long test_and_change_bit(unsigned long nr,
- volatile void * addr)
+extern __inline__ int test_and_change_bit(unsigned long nr,
+ volatile void * addr)
{
unsigned long oldbit;
unsigned long temp;
@@ -149,9 +149,9 @@ extern __inline__ unsigned long test_and_change_bit(unsigned long nr,
return oldbit != 0;
}
-extern __inline__ unsigned long test_bit(int nr, volatile void * addr)
+extern __inline__ int test_bit(int nr, volatile void * addr)
{
- return 1UL & (((const int *) addr)[nr >> 5] >> (nr & 31));
+ return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
}
/*
diff --git a/include/asm-alpha/core_apecs.h b/include/asm-alpha/core_apecs.h
index f9796c1d5..927aa2ea7 100644
--- a/include/asm-alpha/core_apecs.h
+++ b/include/asm-alpha/core_apecs.h
@@ -1,7 +1,6 @@
#ifndef __ALPHA_APECS__H__
#define __ALPHA_APECS__H__
-#include <linux/config.h>
#include <linux/types.h>
#include <asm/compiler.h>
@@ -66,19 +65,10 @@
for most other things they are identical. It didn't seem reasonable to
make the AVANTI support pay for the limitations of the XL. It is true,
however, that an XL kernel will run on an AVANTI without problems.
-*/
-#define APECS_XL_DMA_WIN1_BASE (64UL*1024*1024)
-#define APECS_XL_DMA_WIN1_SIZE (64UL*1024*1024)
-#define APECS_XL_DMA_WIN1_SIZE_PARANOID (48UL*1024*1024)
-#define APECS_XL_DMA_WIN2_BASE (1UL*1024*1024*1024)
-#define APECS_XL_DMA_WIN2_SIZE (1UL*1024*1024*1024)
-
-
-/* These are for normal APECS family machines, AVANTI/MUSTANG/EB64/PC64. */
-
-#define APECS_DMA_WIN_BASE (1UL*1024*1024*1024)
-#define APECS_DMA_WIN_SIZE (1UL*1024*1024*1024)
+ %%% All of this should be obviated by the ability to route
+ everything through the iommu.
+*/
/*
* 21071-DA Control and Status registers.
@@ -370,64 +360,6 @@ struct el_apecs_procdata
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-
-/*
- * NOTE: we fudge the window 1 maximum as 48Mb instead of 64Mb, to prevent
- * virt_to_bus() from returning an address in the first window, for a
- * data area that goes beyond the 64Mb first DMA window. Sigh...
- * This MUST match with <asm/dma.h> MAX_DMA_ADDRESS for consistency, but
- * we can't just use that here, because of header file looping... :-(
- */
-
-__EXTERN_INLINE unsigned long apecs_virt_to_bus(void * address)
-{
- unsigned long paddr = virt_to_phys(address);
- return paddr + APECS_DMA_WIN_BASE;
-}
-
-static inline unsigned long apecs_xl_virt_to_bus(void * address)
-{
- unsigned long paddr = virt_to_phys(address);
- if (paddr < APECS_XL_DMA_WIN1_SIZE_PARANOID)
- return paddr + APECS_XL_DMA_WIN1_BASE;
- else
- return paddr + APECS_XL_DMA_WIN2_BASE;
-}
-
-__EXTERN_INLINE void * apecs_bus_to_virt(unsigned long address)
-{
- /*
- * This check is a sanity check but also ensures that bus
- * address 0 maps to virtual address 0 which is useful to
- * detect null "pointers" (the NCR driver is much simpler if
- * NULL pointers are preserved).
- */
- if (address < APECS_DMA_WIN_BASE)
- return 0;
- return phys_to_virt(address - APECS_DMA_WIN_BASE);
-}
-
-static inline void * apecs_xl_bus_to_virt(unsigned long address)
-{
- /*
- * This check is a sanity check but also ensures that bus
- * address 0 maps to virtual address 0 which is useful to
- * detect null "pointers" (the NCR driver is much simpler if
- * NULL pointers are preserved).
- */
- if (address < APECS_XL_DMA_WIN1_BASE)
- return 0;
- else if (address < (APECS_XL_DMA_WIN1_BASE + APECS_XL_DMA_WIN1_SIZE))
- address -= APECS_XL_DMA_WIN1_BASE;
- else /* should be more checking here, maybe? */
- address -= APECS_XL_DMA_WIN2_BASE;
- return phys_to_virt(address);
-}
-
-/*
* I/O functions:
*
* Unlike Jensen, the APECS machines have no concept of local
@@ -579,14 +511,6 @@ __EXTERN_INLINE int apecs_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#ifdef CONFIG_ALPHA_XL
-#define virt_to_bus apecs_xl_virt_to_bus
-#define bus_to_virt apecs_xl_bus_to_virt
-#else
-#define virt_to_bus apecs_virt_to_bus
-#define bus_to_virt apecs_bus_to_virt
-#endif
-
#define __inb apecs_inb
#define __inw apecs_inw
#define __inl apecs_inl
diff --git a/include/asm-alpha/core_cia.h b/include/asm-alpha/core_cia.h
index 34b2f03b8..418aa3492 100644
--- a/include/asm-alpha/core_cia.h
+++ b/include/asm-alpha/core_cia.h
@@ -77,20 +77,6 @@
#define CIA_MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
#define CIA_MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
-#define CIA_DMA_WIN_BASE (1UL*1024*1024*1024)
-#define CIA_DMA_WIN_SIZE (2UL*1024*1024*1024)
-
-/* Window 0 at 1GB size 1GB mapping to 0. */
-#define CIA_DMA_WIN0_BASE_DEFAULT (1UL*1024*1024*1024)
-#define CIA_DMA_WIN0_SIZE_DEFAULT (1UL*1024*1024*1024)
-#define CIA_DMA_WIN0_TRAN_DEFAULT (0UL)
-
-/* Window 1 at 2GB size 1GB mapping to 1GB. */
-#define CIA_DMA_WIN1_BASE_DEFAULT (2UL*1024*1024*1024)
-#define CIA_DMA_WIN1_SIZE_DEFAULT (1UL*1024*1024*1024)
-#define CIA_DMA_WIN1_TRAN_DEFAULT (1UL*1024*1024*1024)
-
-
/*
* 21171-CA Control and Status Registers (p4-1)
*/
@@ -289,21 +275,6 @@ struct el_CIA_sysdata_mcheck {
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-
-__EXTERN_INLINE unsigned long cia_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + CIA_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * cia_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address - CIA_DMA_WIN_BASE);
-}
-
-/*
* I/O functions:
*
* CIA (the 2117x PCI/memory support chipset for the EV5 (21164)
@@ -491,8 +462,6 @@ __EXTERN_INLINE int cia_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus cia_virt_to_bus
-#define bus_to_virt cia_bus_to_virt
#define __inb cia_inb
#define __inw cia_inw
#define __inl cia_inl
diff --git a/include/asm-alpha/core_irongate.h b/include/asm-alpha/core_irongate.h
index ae9aaaa85..5c30feb37 100644
--- a/include/asm-alpha/core_irongate.h
+++ b/include/asm-alpha/core_irongate.h
@@ -22,13 +22,8 @@
* and I/O address space. Memory address space resides in the lower
* half of the physical address space (PA[43]=0) and I/O address space
* resides in the upper half of the physical address space (PA[43]=1).
- *
*/
-#define IRONGATE_DMA_WIN_BASE (0UL)
-#define IRONGATE_DMA_WIN_SIZE (0UL)
-
-
/*
* Irongate CSR map. Some of the CSRs are 8 or 16 bits, but all access
* through the routines given is 32-bit.
@@ -393,21 +388,6 @@ struct el_IRONGATE_sysdata_mcheck {
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-
-__EXTERN_INLINE unsigned long irongate_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + IRONGATE_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * irongate_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address - IRONGATE_DMA_WIN_BASE);
-}
-
-/*
* I/O functions:
*
* IRONGATE (AMD-751) PCI/memory support chip for the EV6 (21264) and
@@ -513,9 +493,6 @@ __EXTERN_INLINE int irongate_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus irongate_virt_to_bus
-#define bus_to_virt irongate_bus_to_virt
-
#define __inb irongate_inb
#define __inw irongate_inw
#define __inl irongate_inl
diff --git a/include/asm-alpha/core_lca.h b/include/asm-alpha/core_lca.h
index 644512cba..6ba2bbd83 100644
--- a/include/asm-alpha/core_lca.h
+++ b/include/asm-alpha/core_lca.h
@@ -55,10 +55,6 @@
* ugh).
*/
-#define LCA_DMA_WIN_BASE (1UL*1024*1024*1024)
-#define LCA_DMA_WIN_SIZE (1UL*1024*1024*1024)
-
-
/*
* Memory Controller registers:
*/
@@ -209,29 +205,6 @@ union el_lca {
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-
-__EXTERN_INLINE unsigned long lca_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + LCA_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * lca_bus_to_virt(unsigned long address)
-{
- /*
- * This check is a sanity check but also ensures that bus
- * address 0 maps to virtual address 0 which is useful to
- * detect null "pointers" (the NCR driver is much simpler if
- * NULL pointers are preserved).
- */
- if (address < LCA_DMA_WIN_BASE)
- return 0;
- return phys_to_virt(address - LCA_DMA_WIN_BASE);
-}
-
-/*
* I/O functions:
*
* Unlike Jensen, the Noname machines have no concept of local
@@ -387,8 +360,6 @@ __EXTERN_INLINE int lca_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus lca_virt_to_bus
-#define bus_to_virt lca_bus_to_virt
#define __inb lca_inb
#define __inw lca_inw
#define __inl lca_inl
diff --git a/include/asm-alpha/core_mcpcia.h b/include/asm-alpha/core_mcpcia.h
index 577704129..fb100e508 100644
--- a/include/asm-alpha/core_mcpcia.h
+++ b/include/asm-alpha/core_mcpcia.h
@@ -72,13 +72,10 @@
*
*/
-#define MCPCIA_MEM_MASK 0x07ffffff /* SPARSE Mem region mask is 27 bits */
-
-#define MCPCIA_DMA_WIN_BASE (2UL*1024*1024*1024)
-#define MCPCIA_DMA_WIN_SIZE (2UL*1024*1024*1024)
-
#define MCPCIA_MID(m) ((unsigned long)(m) << 33)
+#define MCPCIA_MEM_MASK 0x07ffffff /* SPARSE Mem region mask is 27 bits */
+
/*
* Memory spaces:
*/
@@ -198,21 +195,6 @@ struct el_MCPCIA_uncorrected_frame_mcheck {
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-
-__EXTERN_INLINE unsigned long mcpcia_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + MCPCIA_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * mcpcia_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address - MCPCIA_DMA_WIN_BASE);
-}
-
-/*
* I/O functions:
*
* MCPCIA, the RAWHIDE family PCI/memory support chipset for the EV5 (21164)
@@ -452,9 +434,6 @@ __EXTERN_INLINE void mcpcia_writeq(unsigned long b, unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus mcpcia_virt_to_bus
-#define bus_to_virt mcpcia_bus_to_virt
-
#define __inb mcpcia_inb
#define __inw mcpcia_inw
#define __inl mcpcia_inl
diff --git a/include/asm-alpha/core_polaris.h b/include/asm-alpha/core_polaris.h
index da53edc22..626b24a22 100644
--- a/include/asm-alpha/core_polaris.h
+++ b/include/asm-alpha/core_polaris.h
@@ -36,14 +36,6 @@
#define POLARIS_W_CMD (POLARIS_DENSE_CONFIG_BASE+4)
#define POLARIS_W_STATUS (POLARIS_DENSE_CONFIG_BASE+6)
-/* No HAE address. Polaris has no concept of an HAE, since it
- * supports transfers of all sizes in dense space.
- */
-
-#define POLARIS_DMA_WIN_BASE 0x80000000UL /* fixed, 2G @ 2G */
-#define POLARIS_DMA_WIN_SIZE 0x80000000UL /* fixed, 2G @ 2G */
-
-
/*
* Data structure for handling POLARIS machine checks:
*/
@@ -61,16 +53,6 @@ struct el_POLARIS_sysdata_mcheck {
#define __IO_EXTERN_INLINE
#endif
-__EXTERN_INLINE unsigned long polaris_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + POLARIS_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * polaris_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address - POLARIS_DMA_WIN_BASE);
-}
-
/*
* I/O functions:
*
@@ -188,9 +170,6 @@ __EXTERN_INLINE int polaris_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus polaris_virt_to_bus
-#define bus_to_virt polaris_bus_to_virt
-
#define __inb polaris_inb
#define __inw polaris_inw
#define __inl polaris_inl
diff --git a/include/asm-alpha/core_pyxis.h b/include/asm-alpha/core_pyxis.h
index 2850a949c..18b523ef4 100644
--- a/include/asm-alpha/core_pyxis.h
+++ b/include/asm-alpha/core_pyxis.h
@@ -71,20 +71,6 @@
#define PYXIS_MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
#define PYXIS_MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
-#define PYXIS_DMA_WIN_BASE (1UL*1024*1024*1024)
-#define PYXIS_DMA_WIN_SIZE (2UL*1024*1024*1024)
-
-/* Window 0 at 1GB size 1GB mapping 0 */
-#define PYXIS_DMA_WIN0_BASE_DEFAULT (1UL*1024*1024*1024)
-#define PYXIS_DMA_WIN0_SIZE_DEFAULT (1UL*1024*1024*1024)
-#define PYXIS_DMA_WIN0_TRAN_DEFAULT (0UL)
-
-/* Window 0 at 2GB size 1GB mapping 1GB */
-#define PYXIS_DMA_WIN1_BASE_DEFAULT (2UL*1024*1024*1024)
-#define PYXIS_DMA_WIN1_SIZE_DEFAULT (1UL*1024*1024*1024)
-#define PYXIS_DMA_WIN1_TRAN_DEFAULT (1UL*1024*1024*1024)
-
-
/*
* General Registers
*/
@@ -272,22 +258,6 @@ struct el_PYXIS_sysdata_mcheck {
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-
-__EXTERN_INLINE unsigned long pyxis_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + PYXIS_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * pyxis_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address - PYXIS_DMA_WIN_BASE);
-}
-
-
-/*
* I/O functions:
*
* PYXIS, the 21174 PCI/memory support chipset for the EV56 (21164A)
@@ -430,9 +400,6 @@ __EXTERN_INLINE int pyxis_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus pyxis_virt_to_bus
-#define bus_to_virt pyxis_bus_to_virt
-
#define __inb pyxis_inb
#define __inw pyxis_inw
#define __inl pyxis_inl
diff --git a/include/asm-alpha/core_t2.h b/include/asm-alpha/core_t2.h
index 933be1714..d11506cd5 100644
--- a/include/asm-alpha/core_t2.h
+++ b/include/asm-alpha/core_t2.h
@@ -21,9 +21,6 @@
#define T2_MEM_R1_MASK 0x03ffffff /* Mem sparse region 1 mask is 26 bits */
-#define T2_DMA_WIN_BASE (1UL*1024*1024*1024)
-#define T2_DMA_WIN_SIZE (1UL*1024*1024*1024)
-
/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
#define _GAMMA_BIAS 0x8000000000UL
@@ -322,21 +319,6 @@ struct el_t2_frame_corrected {
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-
-__EXTERN_INLINE unsigned long t2_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + T2_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * t2_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address - T2_DMA_WIN_BASE);
-}
-
-/*
* I/O functions:
*
* T2 (the core logic PCI/memory support chipset for the SABLE
@@ -534,8 +516,6 @@ __EXTERN_INLINE int t2_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus t2_virt_to_bus
-#define bus_to_virt t2_bus_to_virt
#define __inb t2_inb
#define __inw t2_inw
#define __inl t2_inl
diff --git a/include/asm-alpha/core_tsunami.h b/include/asm-alpha/core_tsunami.h
index 5bfa4d658..ffbcc5c34 100644
--- a/include/asm-alpha/core_tsunami.h
+++ b/include/asm-alpha/core_tsunami.h
@@ -15,26 +15,6 @@
*
*/
-/*
- * We must actually use 2 windows to direct-map the 2GB space, because
- * of an "idiot-syncracy" of the CYPRESS chip used on DS20 and others.
- * It may respond to a PCI bus address in the last 1MB of the 4GB
- * address range, and that is where real memory may appear.
- *
- * Sigh...
- */
-#define TSUNAMI_DMA_WIN_BASE (1UL*1024*1024*1024)
-#define TSUNAMI_DMA_WIN_SIZE (2UL*1024*1024*1024)
-
-#define TSUNAMI_DMA_WIN0_BASE_DEFAULT (1UL*1024*1024*1024)
-#define TSUNAMI_DMA_WIN0_SIZE_DEFAULT (1UL*1024*1024*1024)
-#define TSUNAMI_DMA_WIN0_TRAN_DEFAULT (0UL)
-
-#define TSUNAMI_DMA_WIN1_BASE_DEFAULT (2UL*1024*1024*1024)
-#define TSUNAMI_DMA_WIN1_SIZE_DEFAULT (1UL*1024*1024*1024)
-#define TSUNAMI_DMA_WIN1_TRAN_DEFAULT (1UL*1024*1024*1024)
-
-
/* XXX: Do we need to conditionalize on this? */
#ifdef USE_48_BIT_KSEG
#define TS_BIAS 0x80000000000UL
@@ -309,20 +289,6 @@ struct el_TSUNAMI_sysdata_mcheck {
#endif
/*
- * Translate physical memory address as seen on (PCI) bus into
- * a kernel virtual address and vv.
- */
-__EXTERN_INLINE unsigned long tsunami_virt_to_bus(void * address)
-{
- return virt_to_phys(address) + TSUNAMI_DMA_WIN_BASE;
-}
-
-__EXTERN_INLINE void * tsunami_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address - TSUNAMI_DMA_WIN_BASE);
-}
-
-/*
* I/O functions:
*
* TSUNAMI, the 21??? PCI/memory support chipset for the EV6 (21264)
@@ -439,9 +405,6 @@ __EXTERN_INLINE void tsunami_writeq(unsigned long b, unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus tsunami_virt_to_bus
-#define bus_to_virt tsunami_bus_to_virt
-
#define __inb tsunami_inb
#define __inw tsunami_inw
#define __inl tsunami_inl
diff --git a/include/asm-alpha/hardirq.h b/include/asm-alpha/hardirq.h
index 67544e13d..86e895b83 100644
--- a/include/asm-alpha/hardirq.h
+++ b/include/asm-alpha/hardirq.h
@@ -23,6 +23,8 @@ extern int __local_irq_count;
(local_irq_count(__cpu) + local_bh_count(__cpu)) != 0; \
})
+#define in_irq() (local_irq_count(smp_processor_id()) != 0)
+
#ifndef __SMP__
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h
index 33c7b0006..59eeb9545 100644
--- a/include/asm-alpha/io.h
+++ b/include/asm-alpha/io.h
@@ -51,7 +51,7 @@ static inline void set_hae(unsigned long new_hae)
/*
* Change virtual addresses to physical addresses and vv.
*/
-static inline unsigned long virt_to_phys(volatile void * address)
+static inline unsigned long virt_to_phys(void *address)
{
return (unsigned long)address - IDENT_ADDR;
}
@@ -61,6 +61,36 @@ static inline void * phys_to_virt(unsigned long address)
return (void *) (address + IDENT_ADDR);
}
+/*
+ * Change addresses as seen by the kernel (virtual) to addresses as
+ * seen by a device (bus), and vice versa.
+ *
+ * Note that this only works for a limited range of kernel addresses,
+ * and very well may not span all memory. Consider this interface
+ * deprecated in favour of the mapping functions in <asm/pci.h>.
+ */
+extern unsigned long __direct_map_base;
+extern unsigned long __direct_map_size;
+
+static inline unsigned long virt_to_bus(void *address)
+{
+ unsigned long phys = virt_to_phys(address);
+ unsigned long bus = phys + __direct_map_base;
+ return phys <= __direct_map_size ? bus : 0;
+}
+
+static inline void *bus_to_virt(unsigned long address)
+{
+ void *virt;
+
+ /* This check is a sanity check but also ensures that bus address 0
+ maps to virtual address 0 which is useful to detect null pointers
+ (the NCR driver is much simpler if NULL pointers are preserved). */
+ address -= __direct_map_base;
+ virt = phys_to_virt(address);
+ return (long)address <= 0 ? NULL : virt;
+}
+
#else /* !__KERNEL__ */
/*
@@ -82,9 +112,6 @@ extern void _sethae (unsigned long addr); /* cached version */
/* In a generic kernel, we always go through the machine vector. */
-# define virt_to_bus(a) alpha_mv.mv_virt_to_bus(a)
-# define bus_to_virt(a) alpha_mv.mv_bus_to_virt(a)
-
# define __inb alpha_mv.mv_inb
# define __inw alpha_mv.mv_inw
# define __inl alpha_mv.mv_inl
diff --git a/include/asm-alpha/jensen.h b/include/asm-alpha/jensen.h
index 826dfc54d..4ebbb2102 100644
--- a/include/asm-alpha/jensen.h
+++ b/include/asm-alpha/jensen.h
@@ -80,24 +80,6 @@
#endif
/*
- * Change virtual addresses to bus addresses and vv.
- *
- * NOTE! On the Jensen, the physical address is the same
- * as the bus address, but this is not necessarily true on
- * other alpha hardware.
- */
-__EXTERN_INLINE unsigned long jensen_virt_to_bus(void * address)
-{
- return virt_to_phys(address);
-}
-
-__EXTERN_INLINE void * jensen_bus_to_virt(unsigned long address)
-{
- return phys_to_virt(address);
-}
-
-
-/*
* Handle the "host address register". This needs to be set
* to the high 7 bits of the EISA address. This is also needed
* for EISA IO addresses, which are only 16 bits wide (the
@@ -306,8 +288,6 @@ __EXTERN_INLINE int jensen_is_ioaddr(unsigned long addr)
#ifdef __WANT_IO_DEF
-#define virt_to_bus jensen_virt_to_bus
-#define bus_to_virt jensen_bus_to_virt
#define __inb jensen_inb
#define __inw jensen_inw
#define __inl jensen_inl
diff --git a/include/asm-alpha/machvec.h b/include/asm-alpha/machvec.h
index 2c2f82bbd..ccbed15fc 100644
--- a/include/asm-alpha/machvec.h
+++ b/include/asm-alpha/machvec.h
@@ -21,6 +21,7 @@ struct vm_area_struct;
struct linux_hose_info;
struct pci_dev;
struct pci_ops;
+struct pci_controler;
struct alpha_machine_vector
{
@@ -39,8 +40,8 @@ struct alpha_machine_vector
unsigned long min_io_address;
unsigned long min_mem_address;
- unsigned long (*mv_virt_to_bus)(void *);
- void * (*mv_bus_to_virt)(unsigned long);
+ void (*mv_pci_tbi)(struct pci_controler *hose,
+ dma_addr_t start, dma_addr_t end);
unsigned int (*mv_inb)(unsigned long);
unsigned int (*mv_inw)(unsigned long);
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index 18e316751..1d150d523 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -22,6 +22,10 @@
#include <asm/io.h>
#endif
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
+
extern inline unsigned long
__reload_thread(struct thread_struct *pcb)
{
diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h
index cc4ecb4bb..f5a9e09b8 100644
--- a/include/asm-alpha/pci.h
+++ b/include/asm-alpha/pci.h
@@ -1,27 +1,49 @@
#ifndef __ALPHA_PCI_H
#define __ALPHA_PCI_H
+#include <linux/spinlock.h>
+#include <asm/scatterlist.h>
#include <asm/machvec.h>
/*
* The following structure is used to manage multiple PCI busses.
*/
+struct pci_dev;
struct pci_bus;
struct resource;
+/* A PCI IOMMU allocation arena. There are typically two of these
+ regions per bus. */
+/* ??? The 8400 has a 32-byte pte entry, and the entire table apparently
+ lives directly on the host bridge (no tlb?). We don't support this
+ machine, but if we ever did, we'd need to parameterize all this quite
+ a bit further. Probably with per-bus operation tables. */
+
+struct pci_iommu_arena
+{
+ spinlock_t lock;
+ unsigned long *ptes;
+ dma_addr_t dma_base;
+ unsigned int size;
+ unsigned int alloc_hint;
+};
+
+/* A controler. Used to manage multiple PCI busses. */
+
struct pci_controler {
- /* Mandated. */
struct pci_controler *next;
struct pci_bus *bus;
struct resource *io_space;
struct resource *mem_space;
- /* Alpha specific. */
unsigned long config_space;
unsigned int index;
unsigned int first_busno;
unsigned int last_busno;
+
+ struct pci_iommu_arena *sg_pci;
+ struct pci_iommu_arena *sg_isa;
};
/* Override the logic in pci_scan_bus for skipping already-configured
@@ -32,5 +54,83 @@ struct pci_controler {
#define PCIBIOS_MIN_IO alpha_mv.min_io_address
#define PCIBIOS_MIN_MEM alpha_mv.min_mem_address
-#endif /* __ALPHA_PCI_H */
+/* IOMMU controls. */
+
+/* Allocate and map kernel buffer using consistant mode DMA for PCI
+ device. Returns non-NULL cpu-view pointer to the buffer if
+ successful and sets *DMA_ADDRP to the pci side dma address as well,
+ else DMA_ADDRP is undefined. */
+
+extern void *pci_alloc_consistent(struct pci_dev *, long, dma_addr_t *);
+
+/* Free and unmap a consistant DMA buffer. CPU_ADDR and DMA_ADDR must
+ be values that were returned from pci_alloc_consistant. SIZE must
+ be the same as what as passed into pci_alloc_consistant.
+ References to the memory and mappings assosciated with CPU_ADDR or
+ DMA_ADDR past this call are illegal. */
+
+extern void pci_free_consistent(struct pci_dev *, long, void *, dma_addr_t);
+
+/* Map a single buffer of the indicate size for PCI DMA in streaming
+ mode. The 32-bit PCI bus mastering address to use is returned.
+ Once the device is given the dma address, the device owns this memory
+ until either pci_unmap_single or pci_sync_single is performed. */
+
+extern dma_addr_t pci_map_single(struct pci_dev *, void *, long);
+
+/* Unmap a single streaming mode DMA translation. The DMA_ADDR and
+ SIZE must match what was provided for in a previous pci_map_single
+ call. All other usages are undefined. After this call, reads by
+ the cpu to the buffer are guarenteed to see whatever the device
+ wrote there. */
+extern void pci_unmap_single(struct pci_dev *, dma_addr_t, long);
+
+/* Map a set of buffers described by scatterlist in streaming mode for
+ PCI DMA. This is the scather-gather version of the above
+ pci_map_single interface. Here the scatter gather list elements
+ are each tagged with the appropriate PCI dma address and length.
+ They are obtained via sg_dma_{address,length}(SG).
+
+ NOTE: An implementation may be able to use a smaller number of DMA
+ address/length pairs than there are SG table elements. (for
+ example via virtual mapping capabilities) The routine returns the
+ number of addr/length pairs actually used, at most nents.
+
+ Device ownership issues as mentioned above for pci_map_single are
+ the same here. */
+
+extern int pci_map_sg(struct pci_dev *, struct scatterlist *, int);
+
+/* Unmap a set of streaming mode DMA translations. Again, cpu read
+ rules concerning calls here are the same as for pci_unmap_single()
+ above. */
+
+extern void pci_unmap_sg(struct pci_dev *, struct scatterlist *, int);
+
+/* Make physical memory consistant for a single streaming mode DMA
+ translation after a transfer.
+
+ If you perform a pci_map_single() but wish to interrogate the
+ buffer using the cpu, yet do not wish to teardown the PCI dma
+ mapping, you must call this function before doing so. At the next
+ point you give the PCI dma address back to the card, the device
+ again owns the buffer. */
+
+extern inline void
+pci_sync_single(struct pci_dev *dev, dma_addr_t dma_addr, long size)
+{
+ /* Nothing to do. */
+}
+
+/* Make physical memory consistant for a set of streaming mode DMA
+ translations after a transfer. The same as pci_dma_sync_single but
+ for a scatter-gather list, same rules and usage. */
+
+extern inline void
+pci_sync_sg(struct pci_dev *dev, struct scatterlist *sg, int size)
+{
+ /* Nothing to do. */
+}
+
+#endif /* __ALPHA_PCI_H */
diff --git a/include/asm-alpha/pgalloc.h b/include/asm-alpha/pgalloc.h
index 5ea0193be..ae10466b8 100644
--- a/include/asm-alpha/pgalloc.h
+++ b/include/asm-alpha/pgalloc.h
@@ -145,6 +145,18 @@ static inline void flush_tlb_range(struct mm_struct *mm,
flush_tlb_mm(mm);
}
+/*
+ * Flush a specified range of user mapping page tables
+ * from TLB.
+ * Although Alpha uses VPTE caches, this can be a nop, as Alpha does
+ * not have finegrained tlb flushing, so it will flush VPTE stuff
+ * during next flush_tlb_range.
+ */
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
#else /* __SMP__ */
extern void flush_tlb_all(void);
diff --git a/include/asm-alpha/scatterlist.h b/include/asm-alpha/scatterlist.h
index 98718797b..b7dad47f3 100644
--- a/include/asm-alpha/scatterlist.h
+++ b/include/asm-alpha/scatterlist.h
@@ -1,13 +1,20 @@
#ifndef _ALPHA_SCATTERLIST_H
#define _ALPHA_SCATTERLIST_H
+#include <linux/types.h>
+
struct scatterlist {
- char * address; /* Location data is to be transferred to */
- char * alt_address; /* Location of actual if address is a
- * dma indirect buffer. NULL otherwise */
- unsigned int length;
+ char *address; /* Source/target vaddr. */
+ char *alt_address; /* Location of actual if address is a
+ dma indirect buffer, else NULL. */
+ dma_addr_t dma_address;
+ unsigned int length;
+ unsigned int dma_length;
};
+#define sg_dma_address(sg) ((sg)->dma_address)
+#define sg_dma_len(sg) ((sg)->dma_length)
+
#define ISA_DMA_THRESHOLD (~0UL)
#endif /* !(_ALPHA_SCATTERLIST_H) */
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h
index 255888e8a..6cf9873f5 100644
--- a/include/asm-alpha/semaphore.h
+++ b/include/asm-alpha/semaphore.h
@@ -5,7 +5,7 @@
* SMP- and interrupt-safe semaphores..
*
* (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1996 Richard Henderson
+ * (C) Copyright 1996, 2000 Richard Henderson
*/
#include <asm/current.h>
@@ -42,7 +42,7 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
-extern inline void sema_init (struct semaphore *sem, int val)
+extern inline void sema_init(struct semaphore *sem, int val)
{
/*
* Logically,
@@ -110,8 +110,8 @@ extern inline void down(struct semaphore * sem)
__asm__ __volatile__ (
"/* semaphore down operation */\n"
"1: ldl_l $24,%1\n"
+ " subl $24,1,$28\n"
" subl $24,1,$24\n"
- " mov $24,$28\n"
" stl_c $28,%1\n"
" beq $28,2f\n"
" blt $24,3f\n"
@@ -146,8 +146,8 @@ extern inline int down_interruptible(struct semaphore * sem)
__asm__ __volatile__ (
"/* semaphore down interruptible operation */\n"
"1: ldl_l $24,%2\n"
+ " subl $24,1,$28\n"
" subl $24,1,$24\n"
- " mov $24,$28\n"
" stl_c $28,%2\n"
" beq $28,2f\n"
" blt $24,3f\n"
@@ -247,11 +247,10 @@ extern inline void up(struct semaphore * sem)
"/* semaphore up operation */\n"
" mb\n"
"1: ldl_l $24,%1\n"
+ " addl $24,1,$28\n"
" addl $24,1,$24\n"
- " mov $24,$28\n"
" stl_c $28,%1\n"
" beq $28,2f\n"
- " mb\n"
" ble $24,3f\n"
"4:\n"
".section .text2,\"ax\"\n"
@@ -266,4 +265,292 @@ extern inline void up(struct semaphore * sem)
: "$24", "$28", "memory");
}
+
+/* rw mutexes (should that be mutices? =) -- throw rw
+ * spinlocks and semaphores together, and this is what we
+ * end up with...
+ *
+ * The lock is initialized to BIAS. This way, a writer
+ * subtracts BIAS ands gets 0 for the case of an uncontended
+ * lock. Readers decrement by 1 and see a positive value
+ * when uncontended, negative if there are writers waiting
+ * (in which case it goes to sleep).
+ *
+ * The value 0x01000000 supports up to 128 processors and
+ * lots of processes. BIAS must be chosen such that subtracting
+ * BIAS once per CPU will result in the int remaining
+ * negative.
+ * In terms of fairness, this should result in the lock
+ * flopping back and forth between readers and writers
+ * under heavy use.
+ *
+ * -ben
+ *
+ * Once we start supporting machines with more than 128 CPUs,
+ * we should go for using a 64bit atomic type instead of 32bit
+ * as counter. We shall probably go for bias 0x80000000 then,
+ * so that single sethi can set it.
+ *
+ * -jj
+ */
+
+#define RW_LOCK_BIAS 0x01000000
+
+struct rw_semaphore {
+ int count;
+ /* bit 0 means read bias granted;
+ bit 1 means write bias granted. */
+ unsigned granted;
+ wait_queue_head_t wait;
+ wait_queue_head_t write_bias_wait;
+#if WAITQUEUE_DEBUG
+ long __magic;
+ atomic_t readers;
+ atomic_t writers;
+#endif
+};
+
+#if WAITQUEUE_DEBUG
+#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0)
+#else
+#define __RWSEM_DEBUG_INIT /* */
+#endif
+
+#define __RWSEM_INITIALIZER(name,count) \
+ { (count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
+ __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
+ __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
+
+#define __DECLARE_RWSEM_GENERIC(name,count) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
+
+#define DECLARE_RWSEM(name) \
+ __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS)
+#define DECLARE_RWSEM_READ_LOCKED(name) \
+ __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS-1)
+#define DECLARE_RWSEM_WRITE_LOCKED(name) \
+ __DECLARE_RWSEM_GENERIC(name, 0)
+
+extern inline void init_rwsem(struct rw_semaphore *sem)
+{
+ sem->count = RW_LOCK_BIAS;
+ sem->granted = 0;
+ init_waitqueue_head(&sem->wait);
+ init_waitqueue_head(&sem->write_bias_wait);
+#if WAITQUEUE_DEBUG
+ sem->__magic = (long)&sem->__magic;
+ atomic_set(&sem->readers, 0);
+ atomic_set(&sem->writers, 0);
+#endif
+}
+
+/* All have custom assembly linkages. */
+extern void __down_read_failed(struct rw_semaphore *sem);
+extern void __down_write_failed(struct rw_semaphore *sem);
+extern void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers);
+
+extern inline void down_read(struct rw_semaphore *sem)
+{
+ /* Given that we have to use particular hard registers to
+ communicate with __down_read_failed anyway, reuse them in
+ the atomic operation as well.
+
+ __down_read_failed takes the semaphore address in $24, the count
+ we read in $25, and it's return address in $28. The pv is loaded
+ as usual. The gp is clobbered (in the module case) as usual. */
+
+ /* This little bit of silliness is to get the GP loaded for
+ a function that ordinarily wouldn't. Otherwise we could
+ have it done by the macro directly, which can be optimized
+ the linker. */
+ register void *pv __asm__("$27");
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ pv = __down_read_failed;
+ __asm__ __volatile__(
+ "/* semaphore down_read operation */\n"
+ "1: ldl_l $24,%1\n"
+ " subl $24,1,$28\n"
+ " subl $24,1,$25\n"
+ " stl_c $28,%1\n"
+ " beq $28,2f\n"
+ " blt $25,3f\n"
+ "4: mb\n"
+ ".section .text2,\"ax\"\n"
+ "2: br 1b\n"
+ "3: lda $24,%1\n"
+ " jsr $28,($27),__down_read_failed\n"
+ " ldgp $29,0($28)\n"
+ " br 4b\n"
+ ".previous"
+ : "=r"(pv)
+ : "m"(sem->count), "r"(pv)
+ : "$24", "$25", "$28", "memory");
+
+#if WAITQUEUE_DEBUG
+ if (sem->granted & 2)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_inc(&sem->readers);
+#endif
+}
+
+extern inline void down_write(struct rw_semaphore *sem)
+{
+ /* Given that we have to use particular hard registers to
+ communicate with __down_write_failed anyway, reuse them in
+ the atomic operation as well.
+
+ __down_write_failed takes the semaphore address in $24, the count
+ we read in $25, and it's return address in $28. The pv is loaded
+ as usual. The gp is clobbered (in the module case) as usual. */
+
+ /* This little bit of silliness is to get the GP loaded for
+ a function that ordinarily wouldn't. Otherwise we could
+ have it done by the macro directly, which can be optimized
+ the linker. */
+ register void *pv __asm__("$27");
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ pv = __down_write_failed;
+ __asm__ __volatile__(
+ "/* semaphore down_write operation */\n"
+ "1: ldl_l $24,%1\n"
+ " ldah $28,%3($24)\n"
+ " ldah $25,%3($24)\n"
+ " stl_c $28,%1\n"
+ " beq $28,2f\n"
+ " bne $25,3f\n"
+ "4: mb\n"
+ ".section .text2,\"ax\"\n"
+ "2: br 1b\n"
+ "3: lda $24,%1\n"
+ " jsr $28,($27),__down_write_failed\n"
+ " ldgp $29,0($28)\n"
+ " br 4b\n"
+ ".previous"
+ : "=r"(pv)
+ : "m"(sem->count), "r"(pv), "i"(-(RW_LOCK_BIAS >> 16))
+ : "$24", "$25", "$28", "memory");
+
+#if WAITQUEUE_DEBUG
+ if (atomic_read(&sem->writers))
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ if (sem->granted & 3)
+ BUG();
+ atomic_inc(&sem->writers);
+#endif
+}
+
+/* When a reader does a release, the only significant case is when
+ there was a writer waiting, and we've * bumped the count to 0: we must
+wake the writer up. */
+
+extern inline void up_read(struct rw_semaphore *sem)
+{
+ /* Given that we have to use particular hard registers to
+ communicate with __rwsem_wake anyway, reuse them in
+ the atomic operation as well.
+
+ __rwsem_wake takes the semaphore address in $24, the
+ number of waiting readers in $25, and it's return address
+ in $28. The pv is loaded as usual. The gp is clobbered
+ (in the module case) as usual. */
+
+ register void *pv __asm__("$27");
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+ if (sem->granted & 2)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_dec(&sem->readers);
+#endif
+
+ pv = __rwsem_wake;
+ __asm__ __volatile__(
+ "/* semaphore up_read operation */\n"
+ " mb\n"
+ "1: ldl_l $24,%1\n"
+ " addl $24,1,$28\n"
+ " addl $24,1,$24\n"
+ " stl_c $28,%1\n"
+ " beq $28,2f\n"
+ " beq $24,3f\n"
+ "4:\n"
+ ".section .text2,\"ax\"\n"
+ "2: br 1b\n"
+ "3: lda $24,%1\n"
+ " mov 0,$25\n"
+ " jsr $28,($27),__rwsem_wake\n"
+ " ldgp $29,0($28)\n"
+ " br 4b\n"
+ ".previous"
+ : "=r"(pv)
+ : "m"(sem->count), "r"(pv)
+ : "$24", "$25", "$28", "memory");
+}
+
+/* releasing the writer is easy -- just release it and
+ * wake up any sleepers.
+ */
+extern inline void up_write(struct rw_semaphore *sem)
+{
+ /* Given that we have to use particular hard registers to
+ communicate with __rwsem_wake anyway, reuse them in
+ the atomic operation as well.
+
+ __rwsem_wake takes the semaphore address in $24, the
+ number of waiting readers in $25, and it's return address
+ in $28. The pv is loaded as usual. The gp is clobbered
+ (in the module case) as usual. */
+
+ register void *pv __asm__("$27");
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+ if (sem->granted & 3)
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ if (atomic_read(&sem->writers) != 1)
+ BUG();
+ atomic_dec(&sem->writers);
+#endif
+
+ pv = __rwsem_wake;
+ __asm__ __volatile__(
+ "/* semaphore up_write operation */\n"
+ " mb\n"
+ "1: ldl_l $24,%1\n"
+ " ldah $28,%3($24)\n"
+ " stl_c $28,%1\n"
+ " beq $28,2f\n"
+ " blt $24,3f\n"
+ "4:\n"
+ ".section .text2,\"ax\"\n"
+ "2: br 1b\n"
+ "3: ldah $25,%3($24)\n"
+ /* Only do the wake if we're no longer negative. */
+ " blt $25,4b\n"
+ " lda $24,%1\n"
+ " jsr $28,($27),__rwsem_wake\n"
+ " ldgp $29,0($28)\n"
+ " br 4b\n"
+ ".previous"
+ : "=r"(pv)
+ : "m"(sem->count), "r"(pv), "i"(RW_LOCK_BIAS >> 16)
+ : "$24", "$25", "$28", "memory");
+}
+
#endif
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index e85061736..e342d9bd0 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -42,7 +42,8 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
/* Map from cpu id to sequential logical cpu number. This will only
not be idempotent when cpus failed to come on-line. */
-extern int cpu_number_map[NR_CPUS];
+extern int __cpu_number_map[NR_CPUS];
+#define cpu_number_map(cpu) __cpu_number_map[cpu]
/* The reverse map from sequential logical cpu number to cpu id. */
extern int __cpu_logical_map[NR_CPUS];
diff --git a/include/asm-alpha/softirq.h b/include/asm-alpha/softirq.h
index d49064790..8b2713ed6 100644
--- a/include/asm-alpha/softirq.h
+++ b/include/asm-alpha/softirq.h
@@ -24,132 +24,9 @@ extern inline void cpu_bh_enable(int cpu)
local_bh_count(cpu)--;
}
-extern inline int cpu_bh_trylock(int cpu)
-{
- return local_bh_count(cpu) ? 0 : (local_bh_count(cpu) = 1);
-}
-
-extern inline void cpu_bh_endlock(int cpu)
-{
- local_bh_count(cpu) = 0;
-}
-
#define local_bh_enable() cpu_bh_enable(smp_processor_id())
#define local_bh_disable() cpu_bh_disable(smp_processor_id())
-#define get_active_bhs() (bh_mask & bh_active)
-
-static inline void clear_active_bhs(unsigned long x)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " bic %0,%2,%0\n"
- " stq_c %0,%1\n"
- " beq %0,2f\n"
- ".section .text2,\"ax\"\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (bh_active)
- :"Ir" (x), "m" (bh_active));
-}
-
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
-}
-
-extern inline void remove_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- wmb();
- bh_base[nr] = NULL;
-}
-
-extern inline void mark_bh(int nr)
-{
- set_bit(nr, &bh_active);
-}
-
-#ifdef __SMP__
-
-/*
- * The locking mechanism for base handlers, to prevent re-entrancy,
- * is entirely private to an implementation, it should not be
- * referenced at all outside of this file.
- */
-extern atomic_t global_bh_lock;
-extern atomic_t global_bh_count;
-
-extern void synchronize_bh(void);
-
-static inline void start_bh_atomic(void)
-{
- atomic_inc(&global_bh_lock);
- synchronize_bh();
-}
-
-static inline void end_bh_atomic(void)
-{
- atomic_dec(&global_bh_lock);
-}
-
-/* These are for the irq's testing the lock */
-static inline int softirq_trylock(int cpu)
-{
- if (cpu_bh_trylock(cpu)) {
- if (!test_and_set_bit(0, &global_bh_count)) {
- if (atomic_read(&global_bh_lock) == 0)
- return 1;
- clear_bit(0, &global_bh_count);
- }
- cpu_bh_endlock(cpu);
- }
- return 0;
-}
-
-static inline void softirq_endlock(int cpu)
-{
- cpu_bh_enable(cpu);
- clear_bit(0, &global_bh_count);
-}
-
-#else
-
-extern inline void start_bh_atomic(void)
-{
- local_bh_disable();
-}
-
-extern inline void end_bh_atomic(void)
-{
- local_bh_enable();
-}
-
-/* These are for the irq's testing the lock */
-#define softirq_trylock(cpu) cpu_bh_trylock(cpu)
-#define softirq_endlock(cpu) cpu_bh_endlock(cpu)
-#define synchronize_bh() barrier()
-
-#endif /* SMP */
-
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
- synchronize_bh();
-}
-
-extern inline void enable_bh(int nr)
-{
- if (atomic_dec_and_test(&bh_mask_count[nr]))
- bh_mask |= 1 << nr;
-}
+#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
#endif /* _ALPHA_SOFTIRQ_H */
diff --git a/include/asm-alpha/types.h b/include/asm-alpha/types.h
index 381d5f044..78ef3dda4 100644
--- a/include/asm-alpha/types.h
+++ b/include/asm-alpha/types.h
@@ -25,23 +25,9 @@ typedef unsigned short __u16;
typedef __signed__ int __s32;
typedef unsigned int __u32;
-/*
- * There are 32-bit compilers for the alpha out there..
- */
-#if ((~0UL) == 0xffffffff)
-
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
-typedef __signed__ long long __s64;
-typedef unsigned long long __u64;
-#endif
-
-#else
-
typedef __signed__ long __s64;
typedef unsigned long __u64;
-#endif
-
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
@@ -56,22 +42,15 @@ typedef unsigned short u16;
typedef signed int s32;
typedef unsigned int u32;
-/*
- * There are 32-bit compilers for the alpha out there..
- */
-#if ((~0UL) == 0xffffffff)
-
-typedef signed long long s64;
-typedef unsigned long long u64;
-#define BITS_PER_LONG 32
-
-#else
-
typedef signed long s64;
typedef unsigned long u64;
+
#define BITS_PER_LONG 64
-#endif
+/* PCI dma addresses are 32-bits wide. Ignore PCI64 for now, since
+ we'll typically be sending it all through iommu tables anyway. */
+
+typedef u32 dma_addr_t;
#endif /* __KERNEL__ */
#endif /* _ALPHA_TYPES_H */