summaryrefslogtreecommitdiffstats
path: root/arch/alpha
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-23 00:40:54 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-23 00:40:54 +0000
commit529c593ece216e4aaffd36bd940cb94f1fa63129 (patch)
tree78f1c0b805f5656aa7b0417a043c5346f700a2cf /arch/alpha
parent0bd079751d25808d1972baee5c4eaa1db2227257 (diff)
Merge with 2.3.43. I did ignore all modifications to the qlogicisp.c
driver due to the Origin A64 hacks.
Diffstat (limited to 'arch/alpha')
-rw-r--r--arch/alpha/config.in6
-rw-r--r--arch/alpha/kernel/Makefile4
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c8
-rw-r--r--arch/alpha/kernel/core_apecs.c51
-rw-r--r--arch/alpha/kernel/core_cia.c144
-rw-r--r--arch/alpha/kernel/core_irongate.c4
-rw-r--r--arch/alpha/kernel/core_lca.c52
-rw-r--r--arch/alpha/kernel/core_mcpcia.c47
-rw-r--r--arch/alpha/kernel/core_polaris.c6
-rw-r--r--arch/alpha/kernel/core_pyxis.c236
-rw-r--r--arch/alpha/kernel/core_t2.c4
-rw-r--r--arch/alpha/kernel/core_tsunami.c75
-rw-r--r--arch/alpha/kernel/entry.S24
-rw-r--r--arch/alpha/kernel/irq.c40
-rw-r--r--arch/alpha/kernel/machvec_impl.h3
-rw-r--r--arch/alpha/kernel/pci.c1
-rw-r--r--arch/alpha/kernel/pci_impl.h7
-rw-r--r--arch/alpha/kernel/pci_iommu.c531
-rw-r--r--arch/alpha/kernel/proto.h10
-rw-r--r--arch/alpha/kernel/semaphore.c168
-rw-r--r--arch/alpha/kernel/setup.c39
-rw-r--r--arch/alpha/kernel/smp.c50
-rw-r--r--arch/alpha/kernel/sys_jensen.c11
-rw-r--r--arch/alpha/kernel/sys_sio.c50
-rw-r--r--arch/alpha/lib/semaphore.S167
-rw-r--r--arch/alpha/mm/init.c6
-rw-r--r--arch/alpha/vmlinux.lds2
27 files changed, 1387 insertions, 359 deletions
diff --git a/arch/alpha/config.in b/arch/alpha/config.in
index ce5c0853e..8e44bb0e2 100644
--- a/arch/alpha/config.in
+++ b/arch/alpha/config.in
@@ -55,12 +55,16 @@ choice 'Alpha system type' \
# clear all implied options (don't want default values for those):
unset CONFIG_ALPHA_EV4 CONFIG_ALPHA_EV5 CONFIG_ALPHA_EV6
-unset CONFIG_PCI CONFIG_ALPHA_EISA
+unset CONFIG_PCI CONFIG_ISA CONFIG_ALPHA_EISA
unset CONFIG_ALPHA_LCA CONFIG_ALPHA_APECS CONFIG_ALPHA_CIA
unset CONFIG_ALPHA_T2 CONFIG_ALPHA_PYXIS CONFIG_ALPHA_POLARIS
unset CONFIG_ALPHA_TSUNAMI CONFIG_ALPHA_MCPCIA
unset CONFIG_ALPHA_IRONGATE
+# Most of these machines have ISA slots; not exactly sure which don't,
+# and this doesn't activate hordes of code, so do it always.
+define_bool CONFIG_ISA y
+
if [ "$CONFIG_ALPHA_GENERIC" = "y" ]
then
define_bool CONFIG_PCI y
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index 9210ae57c..7cf5dae80 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -26,11 +26,11 @@ O_OBJS += core_apecs.o core_cia.o core_irongate.o core_lca.o core_mcpcia.o \
sys_jensen.o sys_miata.o sys_mikasa.o sys_nautilus.o \
sys_noritake.o sys_rawhide.o sys_ruffian.o sys_rx164.o \
sys_sable.o sys_sio.o sys_sx164.o sys_takara.o sys_rx164.o \
- es1888.o smc37c669.o smc37c93x.o ns87312.o pci.o
+ es1888.o smc37c669.o smc37c93x.o ns87312.o pci.o pci_iommu.o
else
ifdef CONFIG_PCI
-O_OBJS += pci.o
+O_OBJS += pci.o pci_iommu.o
endif
# Core logic support
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index 877926974..15c7afd8c 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -149,6 +149,9 @@ EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL_NOVERS(__down_failed);
EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
EXPORT_SYMBOL_NOVERS(__up_wakeup);
+EXPORT_SYMBOL_NOVERS(__down_read_failed);
+EXPORT_SYMBOL_NOVERS(__down_write_failed);
+EXPORT_SYMBOL_NOVERS(__rwsem_wake);
/*
* SMP-specific symbols.
@@ -161,10 +164,7 @@ EXPORT_SYMBOL(flush_tlb_mm);
EXPORT_SYMBOL(flush_tlb_page);
EXPORT_SYMBOL(flush_tlb_range);
EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(cpu_number_map);
-EXPORT_SYMBOL(global_bh_lock);
-EXPORT_SYMBOL(global_bh_count);
-EXPORT_SYMBOL(synchronize_bh);
+EXPORT_SYMBOL(__cpu_number_map);
EXPORT_SYMBOL(global_irq_holder);
EXPORT_SYMBOL(__global_cli);
EXPORT_SYMBOL(__global_sti);
diff --git a/arch/alpha/kernel/core_apecs.c b/arch/alpha/kernel/core_apecs.c
index 04e556f2e..9ea4f53e9 100644
--- a/arch/alpha/kernel/core_apecs.c
+++ b/arch/alpha/kernel/core_apecs.c
@@ -356,22 +356,49 @@ struct pci_ops apecs_pci_ops =
write_dword: apecs_write_config_dword
};
+void
+apecs_pci_tbi(struct pci_controler *hose, dma_addr_t start, dma_addr_t end)
+{
+ wmb();
+ *(vip)APECS_IOC_TBIA = 0;
+ mb();
+}
+
void __init
apecs_init_arch(void)
{
struct pci_controler *hose;
/*
- * Set up the PCI->physical memory translation windows.
- * For now, window 2 is disabled. In the future, we may
- * want to use it to do scatter/gather DMA. Window 1
- * goes at 1 GB and is 1 GB large.
+ * Create our single hose.
*/
- *(vuip)APECS_IOC_PB1R = 1UL << 19 | (APECS_DMA_WIN_BASE & 0xfff00000U);
- *(vuip)APECS_IOC_PM1R = (APECS_DMA_WIN_SIZE - 1) & 0xfff00000U;
+
+ pci_isa_hose = hose = alloc_pci_controler();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->config_space = APECS_CONF;
+ hose->index = 0;
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Window 1 is direct access 1GB at 1GB
+ * Window 2 is scatter-gather 8MB at 8MB (for isa)
+ */
+ hose->sg_isa = iommu_arena_new(0x00800000, 0x00800000, PAGE_SIZE);
+ hose->sg_pci = NULL;
+ __direct_map_base = 0x40000000;
+ __direct_map_size = 0x40000000;
+
+ *(vuip)APECS_IOC_PB1R = __direct_map_base | 0x00080000;
+ *(vuip)APECS_IOC_PM1R = (__direct_map_size - 1) & 0xfff00000U;
*(vuip)APECS_IOC_TB1R = 0;
- *(vuip)APECS_IOC_PB2R = 0U; /* disable window 2 */
+ *(vuip)APECS_IOC_PB2R = hose->sg_isa->dma_base | 0x000c0000;
+ *(vuip)APECS_IOC_PM2R = (hose->sg_isa->size - 1) & 0xfff00000;
+ *(vuip)APECS_IOC_TB2R = virt_to_phys(hose->sg_isa->ptes) >> 1;
+
+ apecs_pci_tbi(hose, 0, -1);
/*
* Finally, clear the HAXR2 register, which gets used
@@ -381,16 +408,6 @@ apecs_init_arch(void)
*/
*(vuip)APECS_IOC_HAXR2 = 0;
mb();
-
- /*
- * Create our single hose.
- */
-
- hose = alloc_pci_controler();
- hose->io_space = &ioport_resource;
- hose->mem_space = &iomem_resource;
- hose->config_space = APECS_CONF;
- hose->index = 0;
}
void
diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c
index 9f628bff6..36b9a1fa9 100644
--- a/arch/alpha/kernel/core_cia.c
+++ b/arch/alpha/kernel/core_cia.c
@@ -314,12 +314,20 @@ struct pci_ops cia_pci_ops =
write_dword: cia_write_config_dword
};
+void
+cia_pci_tbi(struct pci_controler *hose, dma_addr_t start, dma_addr_t end)
+{
+ wmb();
+ *(vip)CIA_IOC_PCI_TBIA = 3; /* Flush all locked and unlocked. */
+ mb();
+}
+
void __init
cia_init_arch(void)
{
struct pci_controler *hose;
struct resource *hae_mem;
- unsigned int temp;
+ unsigned int temp;
#if DEBUG_DUMP_REGS
temp = *(vuip)CIA_IOC_CIA_REV; mb();
@@ -368,63 +376,11 @@ cia_init_arch(void)
printk("cia_init: W3_BASE was 0x%x\n", temp);
#endif /* DEBUG_DUMP_REGS */
- /*
- * Set up error reporting.
- */
- temp = *(vuip)CIA_IOC_CIA_ERR;
- temp |= 0x180; /* master, target abort */
- *(vuip)CIA_IOC_CIA_ERR = temp;
- mb();
-
- temp = *(vuip)CIA_IOC_CIA_CTRL;
- temp |= 0x400; /* turn on FILL_ERR to get mchecks */
- *(vuip)CIA_IOC_CIA_CTRL = temp;
- mb();
-
- /*
- * Set up the PCI->physical memory translation windows.
- * For now, windows 2 and 3 are disabled. In the future,
- * we may want to use them to do scatter/gather DMA.
- *
- * Window 0 goes at 1 GB and is 1 GB large.
- * Window 1 goes at 2 GB and is 1 GB large.
- */
-
- *(vuip)CIA_IOC_PCI_W0_BASE = CIA_DMA_WIN0_BASE_DEFAULT | 1U;
- *(vuip)CIA_IOC_PCI_W0_MASK = (CIA_DMA_WIN0_SIZE_DEFAULT - 1) &
- 0xfff00000U;
- *(vuip)CIA_IOC_PCI_T0_BASE = CIA_DMA_WIN0_TRAN_DEFAULT >> 2;
-
- *(vuip)CIA_IOC_PCI_W1_BASE = CIA_DMA_WIN1_BASE_DEFAULT | 1U;
- *(vuip)CIA_IOC_PCI_W1_MASK = (CIA_DMA_WIN1_SIZE_DEFAULT - 1) &
- 0xfff00000U;
- *(vuip)CIA_IOC_PCI_T1_BASE = CIA_DMA_WIN1_TRAN_DEFAULT >> 2;
-
- *(vuip)CIA_IOC_PCI_W2_BASE = 0x0;
- *(vuip)CIA_IOC_PCI_W3_BASE = 0x0;
- mb();
-
- /*
- * Next, clear the CIA_CFG register, which gets used
- * for PCI Config Space accesses. That is the way
- * we want to use it, and we do not want to depend on
- * what ARC or SRM might have left behind...
- */
- *((vuip)CIA_IOC_CFG) = 0; mb();
-
- /*
- * Zero the HAEs.
- */
- *((vuip)CIA_IOC_HAE_MEM) = 0; mb();
- *((vuip)CIA_IOC_HAE_MEM); /* read it back. */
- *((vuip)CIA_IOC_HAE_IO) = 0; mb();
- *((vuip)CIA_IOC_HAE_IO); /* read it back. */
-
/*
* Create our single hose.
*/
- hose = alloc_pci_controler();
+ pci_isa_hose = hose = alloc_pci_controler();
hae_mem = alloc_resource();
hose->io_space = &ioport_resource;
@@ -439,6 +395,64 @@ cia_init_arch(void)
if (request_resource(&iomem_resource, hae_mem) < 0)
printk(KERN_ERR "Failed to request HAE_MEM\n");
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Window 0 is scatter-gather 8MB at 8MB (for isa)
+ * Window 1 is scatter-gather 128MB at 1GB
+ * Window 2 is direct access 2GB at 2GB
+ * ??? We ought to scale window 1 with memory.
+ */
+
+ /* NetBSD hints that page tables must be aligned to 32K due
+ to a hardware bug. No description of what models affected. */
+ hose->sg_isa = iommu_arena_new(0x00800000, 0x00800000, 32768);
+ hose->sg_pci = iommu_arena_new(0x40000000, 0x08000000, 32768);
+ __direct_map_base = 0x80000000;
+ __direct_map_size = 0x80000000;
+
+ *(vuip)CIA_IOC_PCI_W0_BASE = hose->sg_isa->dma_base | 3;
+ *(vuip)CIA_IOC_PCI_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000;
+ *(vuip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2;
+
+ *(vuip)CIA_IOC_PCI_W1_BASE = hose->sg_pci->dma_base | 3;
+ *(vuip)CIA_IOC_PCI_W1_MASK = (hose->sg_pci->size - 1) & 0xfff00000;
+ *(vuip)CIA_IOC_PCI_T1_BASE = virt_to_phys(hose->sg_pci->ptes) >> 2;
+
+ *(vuip)CIA_IOC_PCI_W2_BASE = __direct_map_base | 1;
+ *(vuip)CIA_IOC_PCI_W2_MASK = (__direct_map_size - 1) & 0xfff00000;
+ *(vuip)CIA_IOC_PCI_T2_BASE = 0;
+
+ *(vuip)CIA_IOC_PCI_W3_BASE = 0;
+
+ cia_pci_tbi(hose, 0, -1);
+
+ /*
+ * Set up error reporting.
+ */
+ temp = *(vuip)CIA_IOC_CIA_ERR;
+ temp |= 0x180; /* master, target abort */
+ *(vuip)CIA_IOC_CIA_ERR = temp;
+
+ temp = *(vuip)CIA_IOC_CIA_CTRL;
+ temp |= 0x400; /* turn on FILL_ERR to get mchecks */
+ *(vuip)CIA_IOC_CIA_CTRL = temp;
+
+ /*
+ * Next, clear the CIA_CFG register, which gets used
+ * for PCI Config Space accesses. That is the way
+ * we want to use it, and we do not want to depend on
+ * what ARC or SRM might have left behind...
+ */
+ *(vuip)CIA_IOC_CFG = 0;
+
+ /*
+ * Zero the HAEs.
+ */
+ *(vuip)CIA_IOC_HAE_MEM = 0;
+ *(vuip)CIA_IOC_HAE_IO = 0;
+ mb();
}
static inline void
@@ -456,6 +470,8 @@ void
cia_machine_check(unsigned long vector, unsigned long la_ptr,
struct pt_regs * regs)
{
+ int expected;
+
/* Clear the error before any reporting. */
mb();
mb(); /* magic */
@@ -464,5 +480,23 @@ cia_machine_check(unsigned long vector, unsigned long la_ptr,
wrmces(rdmces()); /* reset machine check pending flag. */
mb();
- process_mcheck_info(vector, la_ptr, regs, "CIA", mcheck_expected(0));
+ expected = mcheck_expected(0);
+ if (!expected && vector == 0x660) {
+ struct el_common *com;
+ struct el_common_EV5_uncorrectable_mcheck *ev5;
+ struct el_CIA_sysdata_mcheck *cia;
+
+ com = (void *)la_ptr;
+ ev5 = (void *)(la_ptr + com->proc_offset);
+ cia = (void *)(la_ptr + com->sys_offset);
+
+ if (com->code == 0x202) {
+ printk(KERN_CRIT "CIA PCI machine check: err0=%08x "
+ "err1=%08x err2=%08x\n",
+ (int) cia->pci_err0, (int) cia->pci_err1,
+ (int) cia->pci_err2);
+ expected = 1;
+ }
+ }
+ process_mcheck_info(vector, la_ptr, regs, "CIA", expected);
}
diff --git a/arch/alpha/kernel/core_irongate.c b/arch/alpha/kernel/core_irongate.c
index 81615dbdf..5109f07ae 100644
--- a/arch/alpha/kernel/core_irongate.c
+++ b/arch/alpha/kernel/core_irongate.c
@@ -351,4 +351,8 @@ irongate_init_arch(void)
hose->mem_space = &iomem_resource;
hose->config_space = IRONGATE_CONF;
hose->index = 0;
+
+ hose->sg_isa = hose->sg_pci = NULL;
+ __direct_map_base = 0;
+ __direct_map_size = 0xffffffff;
}
diff --git a/arch/alpha/kernel/core_lca.c b/arch/alpha/kernel/core_lca.c
index 266023f77..2bee78a1b 100644
--- a/arch/alpha/kernel/core_lca.c
+++ b/arch/alpha/kernel/core_lca.c
@@ -278,23 +278,51 @@ struct pci_ops lca_pci_ops =
write_dword: lca_write_config_dword
};
+void
+lca_pci_tbi(struct pci_controler *hose, dma_addr_t start, dma_addr_t end)
+{
+ wmb();
+ *(vip)LCA_IOC_TBIA = 0;
+ mb();
+}
+
void __init
lca_init_arch(void)
{
struct pci_controler *hose;
/*
- * Set up the PCI->physical memory translation windows.
- * For now, window 1 is disabled. In the future, we may
- * want to use it to do scatter/gather DMA.
+ * Create our single hose.
+ */
+
+ pci_isa_hose = hose = alloc_pci_controler();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->config_space = LCA_CONF;
+ hose->index = 0;
+
+ /*
+ * Set up the PCI to main memory translation windows.
*
- * Window 0 goes at 1 GB and is 1 GB large.
+ * Window 0 is direct access 1GB at 1GB
+ * Window 1 is scatter-gather 8MB at 8MB (for isa)
*/
- *(vulp)LCA_IOC_W_BASE0 = 1UL << 33 | LCA_DMA_WIN_BASE;
- *(vulp)LCA_IOC_W_MASK0 = LCA_DMA_WIN_SIZE - 1;
+ hose->sg_isa = iommu_arena_new(0x00800000, 0x00800000, PAGE_SIZE);
+ hose->sg_pci = NULL;
+ __direct_map_base = 0x40000000;
+ __direct_map_size = 0x40000000;
+
+ *(vulp)LCA_IOC_W_BASE0 = __direct_map_base | (2UL << 32);
+ *(vulp)LCA_IOC_W_MASK0 = (__direct_map_size - 1) & 0xfff00000;
*(vulp)LCA_IOC_T_BASE0 = 0;
- *(vulp)LCA_IOC_W_BASE1 = 0UL;
+ *(vulp)LCA_IOC_W_BASE1 = hose->sg_isa->dma_base | (3UL << 32);
+ *(vulp)LCA_IOC_W_MASK1 = (hose->sg_isa->size - 1) & 0xfff00000;
+ *(vulp)LCA_IOC_T_BASE1 = virt_to_phys(hose->sg_isa->ptes);
+
+ *(vulp)LCA_IOC_TB_ENA = 0x80;
+
+ lca_pci_tbi(hose, 0, -1);
/*
* Disable PCI parity for now. The NCR53c810 chip has
@@ -302,16 +330,6 @@ lca_init_arch(void)
* data parity errors.
*/
*(vulp)LCA_IOC_PAR_DIS = 1UL<<5;
-
- /*
- * Create our single hose.
- */
-
- hose = alloc_pci_controler();
- hose->io_space = &ioport_resource;
- hose->mem_space = &iomem_resource;
- hose->config_space = LCA_CONF;
- hose->index = 0;
}
/*
diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c
index 847958212..19ceb2241 100644
--- a/arch/alpha/kernel/core_mcpcia.c
+++ b/arch/alpha/kernel/core_mcpcia.c
@@ -293,6 +293,14 @@ struct pci_ops mcpcia_pci_ops =
write_dword: mcpcia_write_config_dword
};
+void
+mcpcia_pci_tbi(struct pci_controler *hose, dma_addr_t start, dma_addr_t end)
+{
+ wmb();
+ BUG();
+ mb();
+}
+
static int __init
mcpcia_probe_hose(int h)
{
@@ -395,31 +403,36 @@ mcpcia_startup_hose(struct pci_controler *hose)
/*
* Set up the PCI->physical memory translation windows.
- * For now, windows 1,2 and 3 are disabled. In the
- * future, we may want to use them to do scatter/
- * gather DMA.
*
- * Window 0 goes at 2 GB and is 2 GB large.
+ * Window 0 is scatter-gather 8MB at 8MB (for isa)
+ * Window 1 is scatter-gather 128MB at 1GB
+ * Window 2 is direct access 2GB at 2GB
+ * ??? We ought to scale window 1 with memory.
*/
- *(vuip)MCPCIA_W0_BASE(mid) = 1U | (MCPCIA_DMA_WIN_BASE & 0xfff00000U);
- *(vuip)MCPCIA_W0_MASK(mid) = (MCPCIA_DMA_WIN_SIZE - 1) & 0xfff00000U;
- *(vuip)MCPCIA_T0_BASE(mid) = 0;
+ hose->sg_isa = iommu_arena_new(0x00800000, 0x00800000, PAGE_SIZE);
+ hose->sg_pci = iommu_arena_new(0x40000000, 0x08000000, PAGE_SIZE);
+ __direct_map_base = 0x80000000;
+ __direct_map_size = 0x80000000;
+
+ *(vuip)MCPCIA_W0_BASE(mid) = hose->sg_isa->dma_base | 3;
+ *(vuip)MCPCIA_W0_MASK(mid) = (hose->sg_isa->size - 1) & 0xfff00000;
+ *(vuip)MCPCIA_T0_BASE(mid) = virt_to_phys(hose->sg_isa->ptes) >> 2;
+
+ *(vuip)MCPCIA_W1_BASE(mid) = hose->sg_pci->dma_base | 3;
+ *(vuip)MCPCIA_W1_MASK(mid) = (hose->sg_pci->size - 1) & 0xfff00000;
+ *(vuip)MCPCIA_T1_BASE(mid) = virt_to_phys(hose->sg_pci->ptes) >> 2;
+
+ *(vuip)MCPCIA_W2_BASE(mid) = __direct_map_base | 1;
+ *(vuip)MCPCIA_W2_MASK(mid) = (__direct_map_size - 1) & 0xfff00000;
+ *(vuip)MCPCIA_T2_BASE(mid) = 0;
- *(vuip)MCPCIA_W1_BASE(mid) = 0x0;
- *(vuip)MCPCIA_W2_BASE(mid) = 0x0;
*(vuip)MCPCIA_W3_BASE(mid) = 0x0;
- *(vuip)MCPCIA_HBASE(mid) = 0x0;
- mb();
+ mcpcia_pci_tbi(hose, 0, -1);
-#if 0
- tmp = *(vuip)MCPCIA_INT_CTL(mid);
- printk("mcpcia_startup_hose: INT_CTL was 0x%x\n", tmp);
- *(vuip)MCPCIA_INT_CTL(mid) = 1U;
+ *(vuip)MCPCIA_HBASE(mid) = 0x0;
mb();
- tmp = *(vuip)MCPCIA_INT_CTL(mid);
-#endif
*(vuip)MCPCIA_HAE_MEM(mid) = 0U;
mb();
diff --git a/arch/alpha/kernel/core_polaris.c b/arch/alpha/kernel/core_polaris.c
index 972e707cb..0164a3e15 100644
--- a/arch/alpha/kernel/core_polaris.c
+++ b/arch/alpha/kernel/core_polaris.c
@@ -197,6 +197,12 @@ polaris_init_arch(void)
hose->mem_space = &iomem_resource;
hose->config_space = POLARIS_DENSE_CONFIG_BASE;
hose->index = 0;
+
+ hose->sg_isa = hose->sg_pci = NULL;
+
+ /* The I/O window is fixed at 2G @ 2G. */
+ __direct_map_base = 0x80000000;
+ __direct_map_size = 0x80000000;
}
static inline void
diff --git a/arch/alpha/kernel/core_pyxis.c b/arch/alpha/kernel/core_pyxis.c
index ee18e1472..cd81d5b91 100644
--- a/arch/alpha/kernel/core_pyxis.c
+++ b/arch/alpha/kernel/core_pyxis.c
@@ -6,20 +6,22 @@
* Code common to all PYXIS core logic chips.
*/
-#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/kernel.h>
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_pyxis.h>
+#undef __EXTERN_INLINE
+
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
+#include <linux/bootmem.h>
#include <asm/ptrace.h>
#include <asm/system.h>
-#define __EXTERN_INLINE inline
-#include <asm/io.h>
-#include <asm/core_pyxis.h>
-#undef __EXTERN_INLINE
-
#include "proto.h"
#include "pci_impl.h"
@@ -284,6 +286,84 @@ struct pci_ops pyxis_pci_ops =
write_dword: pyxis_write_config_dword
};
+void
+pyxis_pci_tbi(struct pci_controler *hose, dma_addr_t start, dma_addr_t end)
+{
+ wmb();
+ *(vip)PYXIS_TBIA = 3; /* Flush all locked and unlocked. */
+ mb();
+}
+
+/*
+ * Pass 1 and 2 have a broken scatter-gather tlb -- it cannot be invalidated.
+ * To work around this problem, we allocate mappings, and put the chip into
+ * DMA loopback mode to read a garbage page. This works by causing TLB
+ * misses, causing old entries to be purged to make room for the new entries
+ * coming in for the garbage page.
+ *
+ * Thanks to NetBSD sources for pointing out this bug. What a pain.
+ */
+
+static unsigned long broken_tbi_addr;
+
+#define BROKEN_TBI_READS 12
+
+static void
+pyxis_broken_pci_tbi(struct pci_controler *hose,
+ dma_addr_t start, dma_addr_t end)
+{
+ unsigned long flags;
+ unsigned long bus_addr;
+ unsigned int ctrl;
+ long i;
+
+ __save_and_cli(flags);
+
+ /* Put the chip into PCI loopback mode. */
+ mb();
+ ctrl = *(vuip)PYXIS_CTRL;
+ *(vuip)PYXIS_CTRL = ctrl | 4;
+ mb();
+
+ /* Read from PCI dense memory space at TBI_ADDR, skipping 64k
+ on each read. This forces SG TLB misses. It appears that
+ the TLB entries are "not quite LRU", meaning that we need
+ to read more times than there are actual tags. */
+
+ bus_addr = broken_tbi_addr;
+ for (i = 0; i < BROKEN_TBI_READS; ++i, bus_addr += 64*1024)
+ pyxis_readl(bus_addr);
+
+ /* Restore normal PCI operation. */
+ mb();
+ *(vuip)PYXIS_CTRL = ctrl;
+ mb();
+
+ __restore_flags(flags);
+}
+
+static void
+pyxis_enable_broken_tbi(struct pci_iommu_arena *arena)
+{
+ void *page;
+ unsigned long *ppte, ofs, pte;
+ long i, npages;
+
+ page = alloc_bootmem_pages(PAGE_SIZE);
+ pte = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1;
+ npages = (BROKEN_TBI_READS + 1) * 64*1024 / PAGE_SIZE;
+
+ ofs = iommu_arena_alloc(arena, npages);
+ ppte = arena->ptes + ofs;
+ for (i = 0; i < npages; ++i)
+ ppte[i] = pte;
+
+ broken_tbi_addr = pyxis_ioremap(arena->dma_base + ofs*PAGE_SIZE);
+ alpha_mv.mv_pci_tbi = pyxis_broken_pci_tbi;
+
+ printk("PYXIS: Enabling broken tbia workaround.\n");
+}
+
void __init
pyxis_init_arch(void)
{
@@ -306,84 +386,100 @@ pyxis_init_arch(void)
*/
temp = *(vuip)PYXIS_ERR_MASK;
temp &= ~4;
- *(vuip)PYXIS_ERR_MASK = temp; mb();
- temp = *(vuip)PYXIS_ERR_MASK; /* re-read to force write */
+ *(vuip)PYXIS_ERR_MASK = temp;
+ mb();
+ *(vuip)PYXIS_ERR_MASK; /* re-read to force write */
+
+ temp = *(vuip)PYXIS_ERR;
+ temp |= 0x180; /* master/target abort */
+ *(vuip)PYXIS_ERR = temp;
+ mb();
+ *(vuip)PYXIS_ERR; /* re-read to force write */
- temp = *(vuip)PYXIS_ERR ;
- temp |= 0x180; /* master/target abort */
- *(vuip)PYXIS_ERR = temp; mb();
- temp = *(vuip)PYXIS_ERR; /* re-read to force write */
+ /*
+ * Create our single hose.
+ */
+
+ hose = alloc_pci_controler();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->config_space = PYXIS_CONF;
+ hose->index = 0;
/*
- * Set up the PCI->physical memory translation windows.
- * For now, windows 2 and 3 are disabled. In the future, we may
- * want to use them to do scatter/gather DMA.
+ * Set up the PCI to main memory translation windows.
+ *
+ * Window 0 is scatter-gather 8MB at 8MB (for isa)
+ * Window 1 is scatter-gather 128MB at 3GB
+ * Window 2 is direct access 1GB at 1GB
+ * Window 3 is direct access 1GB at 2GB
+ * ??? We ought to scale window 1 with memory.
*
- * Window 0 goes at 2 GB and is 1 GB large.
- * Window 1 goes at 3 GB and is 1 GB large.
+ * We must actually use 2 windows to direct-map the 2GB space,
+ * because of an idiot-syncrasy of the CYPRESS chip. It may
+ * respond to a PCI bus address in the last 1MB of the 4GB
+ * address range.
*/
- *(vuip)PYXIS_W0_BASE = PYXIS_DMA_WIN0_BASE_DEFAULT | 1U;
- *(vuip)PYXIS_W0_MASK = (PYXIS_DMA_WIN0_SIZE_DEFAULT - 1) & 0xfff00000U;
- *(vuip)PYXIS_T0_BASE = PYXIS_DMA_WIN0_TRAN_DEFAULT >> 2;
+ /* NetBSD hints that page tables must be aligned to 32K due
+ to a hardware bug. No description of what models affected. */
+ hose->sg_isa = iommu_arena_new(0x00800000, 0x00800000, 32768);
+ hose->sg_pci = iommu_arena_new(0xc0000000, 0x08000000, 32768);
+ __direct_map_base = 0x40000000;
+ __direct_map_size = 0x80000000;
- *(vuip)PYXIS_W1_BASE = PYXIS_DMA_WIN1_BASE_DEFAULT | 1U;
- *(vuip)PYXIS_W1_MASK = (PYXIS_DMA_WIN1_SIZE_DEFAULT - 1) & 0xfff00000U;
- *(vuip)PYXIS_T1_BASE = PYXIS_DMA_WIN1_TRAN_DEFAULT >> 2;
+ *(vuip)PYXIS_W0_BASE = hose->sg_isa->dma_base | 3;
+ *(vuip)PYXIS_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000;
+ *(vuip)PYXIS_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2;
- *(vuip)PYXIS_W2_BASE = 0x0;
- *(vuip)PYXIS_W3_BASE = 0x0;
- mb();
+ *(vuip)PYXIS_W1_BASE = hose->sg_pci->dma_base | 3;
+ *(vuip)PYXIS_W1_MASK = (hose->sg_pci->size - 1) & 0xfff00000;
+ *(vuip)PYXIS_T1_BASE = virt_to_phys(hose->sg_pci->ptes) >> 2;
+
+ *(vuip)PYXIS_W2_BASE = 0x40000000 | 1;
+ *(vuip)PYXIS_W2_MASK = (0x40000000 - 1) & 0xfff00000;
+ *(vuip)PYXIS_T2_BASE = 0;
+
+ *(vuip)PYXIS_W3_BASE = 0x80000000 | 1;
+ *(vuip)PYXIS_W3_MASK = (0x40000000 - 1) & 0xfff00000;
+ *(vuip)PYXIS_T3_BASE = 0;
+
+ /* Pass 1 and 2 (ie revision <= 1) have a broken TBIA. See the
+ complete description next to pyxis_broken_pci_tbi for details. */
+ if ((*(vuip)PYXIS_REV & 0xff) <= 1)
+ pyxis_enable_broken_tbi(hose->sg_pci);
+
+ alpha_mv.mv_pci_tbi(hose, 0, -1);
/*
- * Next, clear the PYXIS_CFG register, which gets used
+ * Next, clear the PYXIS_CFG register, which gets used
* for PCI Config Space accesses. That is the way
* we want to use it, and we do not want to depend on
* what ARC or SRM might have left behind...
*/
- {
- unsigned int pyxis_cfg, temp;
- pyxis_cfg = *(vuip)PYXIS_CFG; mb();
- if (pyxis_cfg != 0) {
-#if 1
- printk("PYXIS_init: CFG was 0x%x\n", pyxis_cfg);
-#endif
- *(vuip)PYXIS_CFG = 0; mb();
- temp = *(vuip)PYXIS_CFG; /* re-read to force write */
- }
+ temp = *(vuip)PYXIS_CFG;
+ if (temp != 0) {
+ *(vuip)PYXIS_CFG = 0;
+ mb();
+ *(vuip)PYXIS_CFG; /* re-read to force write */
}
/* Zero the HAE. */
*(vuip)PYXIS_HAE_MEM = 0U; mb();
- *(vuip)PYXIS_HAE_MEM; /* re-read to force write */
+ *(vuip)PYXIS_HAE_MEM; /* re-read to force write */
*(vuip)PYXIS_HAE_IO = 0; mb();
- *(vuip)PYXIS_HAE_IO; /* re-read to force write */
+ *(vuip)PYXIS_HAE_IO; /* re-read to force write */
/*
* Finally, check that the PYXIS_CTRL1 has IOA_BEN set for
* enabling byte/word PCI bus space(s) access.
*/
- {
- unsigned int ctrl1;
- ctrl1 = *(vuip) PYXIS_CTRL1;
- if (!(ctrl1 & 1)) {
-#if 1
- printk("PYXIS_init: enabling byte/word PCI space\n");
-#endif
- *(vuip) PYXIS_CTRL1 = ctrl1 | 1; mb();
- ctrl1 = *(vuip)PYXIS_CTRL1; /* re-read */
- }
+ temp = *(vuip) PYXIS_CTRL1;
+ if (!(temp & 1)) {
+ *(vuip)PYXIS_CTRL1 = temp | 1;
+ mb();
+ *(vuip)PYXIS_CTRL1; /* re-read */
}
-
- /*
- * Create our single hose.
- */
-
- hose = alloc_pci_controler();
- hose->io_space = &ioport_resource;
- hose->mem_space = &iomem_resource;
- hose->config_space = PYXIS_CONF;
- hose->index = 0;
}
static inline void
@@ -401,6 +497,8 @@ void
pyxis_machine_check(unsigned long vector, unsigned long la_ptr,
struct pt_regs * regs)
{
+ int expected;
+
/* Clear the error before reporting anything. */
mb();
mb(); /* magic */
@@ -409,5 +507,23 @@ pyxis_machine_check(unsigned long vector, unsigned long la_ptr,
wrmces(0x7);
mb();
- process_mcheck_info(vector, la_ptr, regs, "PYXIS", mcheck_expected(0));
+ expected = mcheck_expected(0);
+ if (!expected && vector == 0x660) {
+ struct el_common *com;
+ struct el_common_EV5_uncorrectable_mcheck *ev5;
+ struct el_PYXIS_sysdata_mcheck *pyxis;
+
+ com = (void *)la_ptr;
+ ev5 = (void *)(la_ptr + com->proc_offset);
+ pyxis = (void *)(la_ptr + com->sys_offset);
+
+ if (com->code == 0x202) {
+ printk(KERN_CRIT "PYXIS PCI machine check: err0=%08x "
+ "err1=%08x err2=%08x\n",
+ (int) pyxis->pci_err0, (int) pyxis->pci_err1,
+ (int) pyxis->pci_err2);
+ expected = 1;
+ }
+ }
+ process_mcheck_info(vector, la_ptr, regs, "PYXIS", expected);
}
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c
index 079ad445c..d1037f57b 100644
--- a/arch/alpha/kernel/core_t2.c
+++ b/arch/alpha/kernel/core_t2.c
@@ -389,6 +389,10 @@ t2_init_arch(void)
hose->mem_space = &iomem_resource;
hose->config_space = T2_CONF;
hose->index = 0;
+
+ hose->sg_isa = hose->sg_pci = NULL;
+ __direct_map_base = 0x40000000;
+ __direct_map_size = 0x40000000;
}
#define SIC_SEIC (1UL << 33) /* System Event Clear */
diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c
index dab4e1733..0ad001b94 100644
--- a/arch/alpha/kernel/core_tsunami.c
+++ b/arch/alpha/kernel/core_tsunami.c
@@ -100,11 +100,11 @@ mk_conf_addr(struct pci_dev *dev, int where, unsigned long *pci_addr,
"pci_addr=0x%p, type1=0x%p)\n",
bus, device_fn, where, pci_addr, type1));
- if (hose->first_busno == dev->bus->number)
+ if (hose->first_busno == dev->bus->number)
bus = 0;
- *type1 = (bus != 0);
+ *type1 = (bus != 0);
- addr = (bus << 16) | (device_fn << 8) | where;
+ addr = (bus << 16) | (device_fn << 8) | where;
addr |= hose->config_space;
*pci_addr = addr;
@@ -206,6 +206,23 @@ struct pci_ops tsunami_pci_ops =
write_dword: tsunami_write_config_dword
};
+void
+tsunami_pci_tbi(struct pci_controler *hose, dma_addr_t start, dma_addr_t end)
+{
+ tsunami_pchip *pchip = hose->index ? TSUNAMI_pchip1 : TSUNAMI_pchip0;
+
+ wmb();
+
+ /* We can invalidate up to 8 tlb entries in a go. The flush
+ matches against <31:16> in the pci address. */
+ if (((start ^ end) & 0xffff0000) == 0)
+ pchip->tlbiv.csr = (start & 0xffff0000) >> 12;
+ else
+ pchip->tlbia.csr = 0;
+
+ mb();
+}
+
#ifdef NXM_MACHINE_CHECKS_ON_TSUNAMI
static long
tsunami_probe_read(volatile unsigned long *vaddr)
@@ -264,6 +281,8 @@ tsunami_init_one_pchip(tsunami_pchip *pchip, int index)
return;
hose = alloc_pci_controler();
+ if (index == 0)
+ pci_isa_hose = hose;
hose->io_space = alloc_resource();
hose->mem_space = alloc_resource();
@@ -307,27 +326,41 @@ tsunami_init_one_pchip(tsunami_pchip *pchip, int index)
saved_pchip[index].tba[3] = pchip->tba[3].csr;
/*
- * Set up the PCI->physical memory translation windows.
- * For now, windows 1,2 and 3 are disabled. In the future,
- * we may want to use them to do scatter/gather DMA.
+ * Set up the PCI to main memory translation windows.
+ *
+ * Window 0 is scatter-gather 8MB at 8MB (for isa)
+ * Window 1 is scatter-gather 128MB at 3GB
+ * Window 2 is direct access 1GB at 1GB
+ * Window 3 is direct access 1GB at 2GB
+ * ??? We ought to scale window 1 memory.
*
- * Window 0 goes at 1 GB and is 1 GB large, mapping to 0.
- * Window 1 goes at 2 GB and is 1 GB large, mapping to 1GB.
+ * We must actually use 2 windows to direct-map the 2GB space,
+ * because of an idiot-syncrasy of the CYPRESS chip. It may
+ * respond to a PCI bus address in the last 1MB of the 4GB
+ * address range.
*/
+ hose->sg_isa = iommu_arena_new(0x00800000, 0x00800000, PAGE_SIZE);
+ hose->sg_pci = iommu_arena_new(0xc0000000, 0x08000000, PAGE_SIZE);
+ __direct_map_base = 0x40000000;
+ __direct_map_size = 0x80000000;
- pchip->wsba[0].csr = TSUNAMI_DMA_WIN0_BASE_DEFAULT | 1UL;
- pchip->wsm[0].csr = (TSUNAMI_DMA_WIN0_SIZE_DEFAULT - 1) &
- 0xfff00000UL;
- pchip->tba[0].csr = TSUNAMI_DMA_WIN0_TRAN_DEFAULT;
+ pchip->wsba[0].csr = hose->sg_isa->dma_base | 3;
+ pchip->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000;
+ pchip->tba[0].csr = virt_to_phys(hose->sg_isa->ptes);
- pchip->wsba[1].csr = TSUNAMI_DMA_WIN1_BASE_DEFAULT | 1UL;
- pchip->wsm[1].csr = (TSUNAMI_DMA_WIN1_SIZE_DEFAULT - 1) &
- 0xfff00000UL;
- pchip->tba[1].csr = TSUNAMI_DMA_WIN1_TRAN_DEFAULT;
+ pchip->wsba[1].csr = hose->sg_pci->dma_base | 3;
+ pchip->wsm[1].csr = (hose->sg_pci->size - 1) & 0xfff00000;
+ pchip->tba[1].csr = virt_to_phys(hose->sg_pci->ptes);
- pchip->wsba[2].csr = 0;
- pchip->wsba[3].csr = 0;
- mb();
+ pchip->wsba[2].csr = 0x40000000 | 1;
+ pchip->wsm[2].csr = (0x40000000 - 1) & 0xfff00000;
+ pchip->tba[2].csr = 0;
+
+ pchip->wsba[3].csr = 0x80000000 | 1;
+ pchip->wsm[3].csr = (0x40000000 - 1) & 0xfff00000;
+ pchip->tba[3].csr = 0;
+
+ tsunami_pci_tbi(hose, 0, -1);
}
void __init
@@ -335,7 +368,7 @@ tsunami_init_arch(void)
{
#ifdef NXM_MACHINE_CHECKS_ON_TSUNAMI
extern asmlinkage void entInt(void);
- unsigned long tmp;
+ unsigned long tmp;
/* Ho hum.. init_arch is called before init_IRQ, but we need to be
able to handle machine checks. So install the handler now. */
@@ -426,7 +459,7 @@ tsunami_pci_clr_err(void)
/* TSUNAMI and TYPHOON can have 2, but might only have 1 (DS10) */
if (TSUNAMI_cchip->csc.csr & 1L<<14)
- tsunami_pci_clr_err_1(TSUNAMI_pchip1);
+ tsunami_pci_clr_err_1(TSUNAMI_pchip1);
}
void
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index af1567fd1..cd8ce5c67 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -32,6 +32,7 @@
#define TASK_ADDR_LIMIT 24
#define TASK_EXEC_DOMAIN 32
#define TASK_NEED_RESCHED 40
+#define TASK_PROCESSOR 100
/*
* task flags (must match include/linux/sched.h):
@@ -572,12 +573,15 @@ entSys:
.align 3
ret_from_sys_call:
cmovne $26,0,$19 /* $19 = 0 => non-restartable */
- /* check bottom half interrupts */
- ldq $3,bh_active
- ldq $4,bh_mask
- and $3,$4,$2
- bne $2,handle_bottom_half
-ret_from_handle_bh:
+ ldq $3,TASK_PROCESSOR($8)
+ lda $4,softirq_state
+ sll $3,5,$3
+ addq $3,$4,$4
+ ldq $4,0($4)
+ sll $4,32,$3
+ and $4,$3,$4
+ bne $4,handle_softirq
+ret_from_softirq:
ldq $0,SP_OFF($30)
and $0,8,$0
beq $0,restore_all
@@ -656,16 +660,16 @@ strace_error:
br ret_from_sys_call
.align 3
-handle_bottom_half:
+handle_softirq:
subq $30,16,$30
stq $19,0($30) /* save syscall nr */
stq $20,8($30) /* and error indication (a3) */
- jsr $26,do_bottom_half
+ jsr $26,do_softirq
ldq $19,0($30)
ldq $20,8($30)
addq $30,16,$30
- br ret_from_handle_bh
-
+ br ret_from_softirq
+
.align 3
syscall_error:
/*
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index d6d6e0611..bae28a6a4 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -377,10 +377,6 @@ spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED;
/* Global IRQ locking depth. */
atomic_t global_irq_count = ATOMIC_INIT(0);
-/* This protects BH software state (masks, things like that). */
-atomic_t global_bh_lock = ATOMIC_INIT(0);
-atomic_t global_bh_count = ATOMIC_INIT(0);
-
static void *previous_irqholder = NULL;
#define MAXCOUNT 100000000
@@ -401,7 +397,7 @@ wait_on_irq(int cpu, void *where)
*/
if (!atomic_read(&global_irq_count)) {
if (local_bh_count(cpu)
- || !atomic_read(&global_bh_count))
+ || !spin_is_locked(&global_bh_lock))
break;
}
@@ -422,7 +418,7 @@ wait_on_irq(int cpu, void *where)
if (spin_is_locked(&global_irq_lock))
continue;
if (!local_bh_count(cpu)
- && atomic_read(&global_bh_count))
+ && spin_is_locked(&global_bh_lock))
continue;
if (spin_trylock(&global_irq_lock))
break;
@@ -552,7 +548,7 @@ show(char * str, void *where)
cpu_data[1].irq_count);
printk("bh: %d [%d %d]\n",
- atomic_read(&global_bh_count),
+ spin_is_locked(&global_bh_lock) ? 1 : 0,
cpu_data[0].bh_count,
cpu_data[1].bh_count);
#if 0
@@ -567,35 +563,6 @@ show(char * str, void *where)
#endif
}
-static inline void
-wait_on_bh(void)
-{
- int count = MAXCOUNT;
- do {
- if (!--count) {
- show("wait_on_bh", 0);
- count = ~0;
- }
- /* nothing .. wait for the other bh's to go away */
- barrier();
- } while (atomic_read(&global_bh_count) != 0);
-}
-
-/*
- * This is called when we want to synchronize with
- * bottom half handlers. We need to wait until
- * no other CPU is executing any bottom half handler.
- *
- * Don't wait if we're already running in an interrupt
- * context or are inside a bh handler.
- */
-void
-synchronize_bh(void)
-{
- if (atomic_read(&global_bh_count) && !in_interrupt())
- wait_on_bh();
-}
-
/*
* From its use, I infer that synchronize_irq() stalls a thread until
* the effects of a command to an external device are known to have
@@ -897,6 +864,7 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr,
case 0x98: reason = "processor detected hard error"; break;
/* System specific (these are for Alcor, at least): */
+ case 0x202: reason = "system detected hard error"; break;
case 0x203: reason = "system detected uncorrectable ECC error"; break;
case 0x204: reason = "SIO SERR occurred on PCI bus"; break;
case 0x205: reason = "parity error detected by CIA"; break;
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h
index fc0d0cc0c..421591104 100644
--- a/arch/alpha/kernel/machvec_impl.h
+++ b/arch/alpha/kernel/machvec_impl.h
@@ -101,8 +101,7 @@
#define DO_TSUNAMI_IO IO(TSUNAMI,tsunami)
#define BUS(which) \
- mv_virt_to_bus: CAT(which,_virt_to_bus), \
- mv_bus_to_virt: CAT(which,_bus_to_virt)
+ mv_pci_tbi: CAT(which,_pci_tbi)
#define DO_APECS_BUS BUS(apecs)
#define DO_CIA_BUS BUS(cia)
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index aa0a8d968..b62179b8d 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -40,6 +40,7 @@ const char pci_hae0_name[] = "HAE0";
*/
struct pci_controler *hose_head, **hose_tail = &hose_head;
+struct pci_controler *pci_isa_hose;
/*
* Quirks.
diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h
index b46fb9980..f91978732 100644
--- a/arch/alpha/kernel/pci_impl.h
+++ b/arch/alpha/kernel/pci_impl.h
@@ -7,7 +7,7 @@
struct pci_dev;
struct pci_controler;
-
+struct pci_iommu_arena;
/*
* We can't just blindly use 64K for machines with EISA busses; they
@@ -125,12 +125,17 @@ static inline u8 bridge_swizzle(u8 pin, u8 slot)
/* The hose list. */
extern struct pci_controler *hose_head, **hose_tail;
+extern struct pci_controler *pci_isa_hose;
extern void common_init_pci(void);
extern u8 common_swizzle(struct pci_dev *, u8 *);
extern struct pci_controler *alloc_pci_controler(void);
extern struct resource *alloc_resource(void);
+extern struct pci_iommu_arena *iommu_arena_new(dma_addr_t, unsigned long,
+ unsigned long);
+extern long iommu_arena_alloc(struct pci_iommu_arena *arena, long n);
+
extern const char *const pci_io_names[];
extern const char *const pci_mem_names[];
extern const char pci_hae0_name[];
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
new file mode 100644
index 000000000..8faa66901
--- /dev/null
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -0,0 +1,531 @@
+/*
+ * linux/arch/alpha/kernel/pci_iommu.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+
+#include <asm/io.h>
+#include <asm/hwrpb.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+
+#define DEBUG_ALLOC 0
+
+#if DEBUG_ALLOC > 0
+# define DBGA(args...) printk(KERN_DEBUG ##args)
+#else
+# define DBGA(args...)
+#endif
+#if DEBUG_ALLOC > 1
+# define DBGA2(args...) printk(KERN_DEBUG ##args)
+#else
+# define DBGA2(args...)
+#endif
+
+
+static inline unsigned long
+mk_iommu_pte(unsigned long paddr)
+{
+ return (paddr >> (PAGE_SHIFT-1)) | 1;
+}
+
+static inline long
+calc_npages(long bytes)
+{
+ return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
+}
+
+struct pci_iommu_arena *
+iommu_arena_new(dma_addr_t base, unsigned long window_size,
+ unsigned long align)
+{
+ unsigned long entries, mem_size, mem_pages;
+ struct pci_iommu_arena *arena;
+
+ entries = window_size >> PAGE_SHIFT;
+ mem_size = entries * sizeof(unsigned long);
+ mem_pages = calc_npages(mem_size);
+
+ arena = alloc_bootmem(sizeof(*arena));
+ arena->ptes = __alloc_bootmem(mem_pages * PAGE_SIZE, align, 0);
+
+ spin_lock_init(&arena->lock);
+ arena->dma_base = base;
+ arena->size = window_size;
+ arena->alloc_hint = 0;
+
+ return arena;
+}
+
+long
+iommu_arena_alloc(struct pci_iommu_arena *arena, long n)
+{
+ unsigned long flags;
+ unsigned long *beg, *p, *end;
+ long i;
+
+ spin_lock_irqsave(&arena->lock, flags);
+
+ /* Search forward for the first sequence of N empty ptes. */
+ beg = arena->ptes;
+ end = beg + (arena->size >> PAGE_SHIFT);
+ p = beg + arena->alloc_hint;
+ i = 0;
+ while (i < n && p < end)
+ i = (*p++ == 0 ? i + 1 : 0);
+
+ if (p >= end) {
+ /* Failure. Assume the hint was wrong and go back to
+ search from the beginning. */
+ p = beg;
+ i = 0;
+ while (i < n && p < end)
+ i = (*p++ == 0 ? i + 1 : 0);
+
+ if (p >= end) {
+ spin_unlock_irqrestore(&arena->lock, flags);
+ return -1;
+ }
+ }
+
+ /* Success. Mark them all in use, ie not zero. Typically
+ bit zero is the valid bit, so write ~1 into everything.
+ The chip specific bits will fill this in with something
+ kosher when we return. */
+ for (p = p - n, i = 0; i < n; ++i)
+ p[i] = ~1UL;
+
+ arena->alloc_hint = p - beg + n;
+ spin_unlock_irqrestore(&arena->lock, flags);
+
+ return p - beg;
+}
+
+static void
+iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
+{
+ unsigned long *p;
+ long i;
+
+ p = arena->ptes + ofs;
+ for (i = 0; i < n; ++i)
+ p[i] = 0;
+ arena->alloc_hint = ofs;
+}
+
+/* Map a single buffer of the indicate size for PCI DMA in streaming
+ mode. The 32-bit PCI bus mastering address to use is returned.
+ Once the device is given the dma address, the device owns this memory
+ until either pci_unmap_single or pci_sync_single is performed. */
+
+dma_addr_t
+pci_map_single(struct pci_dev *pdev, void *cpu_addr, long size)
+{
+ struct pci_controler *hose = pdev ? pdev->sysdata : pci_isa_hose;
+ dma_addr_t max_dma = pdev ? pdev->dma_mask : 0x00ffffff;
+ struct pci_iommu_arena *arena;
+ long npages, dma_ofs, i;
+ unsigned long paddr;
+ dma_addr_t ret;
+
+ paddr = virt_to_phys(cpu_addr);
+
+ /* First check to see if we can use the direct map window. */
+ if (paddr + size + __direct_map_base - 1 <= max_dma
+ && paddr + size <= __direct_map_size) {
+ ret = paddr + __direct_map_base;
+
+ DBGA2("pci_map_single: [%p,%lx] -> direct %x from %p\n",
+ cpu_addr, size, ret, __builtin_return_address(0));
+
+ return ret;
+ }
+
+ /* If the machine doesn't define a pci_tbi routine, we have to
+ assume it doesn't support sg mapping. */
+ if (! alpha_mv.mv_pci_tbi) {
+ printk(KERN_INFO "pci_map_single failed: no hw sg\n");
+ return 0;
+ }
+
+ arena = hose->sg_pci;
+ if (!arena || arena->dma_base + arena->size > max_dma)
+ arena = hose->sg_isa;
+
+ npages = calc_npages((paddr & ~PAGE_MASK) + size);
+ dma_ofs = iommu_arena_alloc(arena, npages);
+ if (dma_ofs < 0) {
+ printk(KERN_INFO "pci_map_single failed: "
+ "could not allocate dma page tables\n");
+ return 0;
+ }
+
+ paddr &= PAGE_MASK;
+ for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
+ arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
+
+ ret = arena->dma_base + dma_ofs * PAGE_SIZE;
+ ret += (unsigned long)cpu_addr & ~PAGE_MASK;
+
+ /* ??? This shouldn't have been needed, since the entries
+ we've just modified were not in the iommu tlb. */
+ alpha_mv.mv_pci_tbi(hose, ret, ret + size - 1);
+
+ DBGA("pci_map_single: [%p,%lx] np %ld -> sg %x from %p\n",
+ cpu_addr, size, npages, ret, __builtin_return_address(0));
+
+ return ret;
+}
+
+
+/* Unmap a single streaming mode DMA translation. The DMA_ADDR and
+ SIZE must match what was provided for in a previous pci_map_single
+ call. All other usages are undefined. After this call, reads by
+ the cpu to the buffer are guarenteed to see whatever the device
+ wrote there. */
+
+void
+pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, long size)
+{
+ struct pci_controler *hose = pdev ? pdev->sysdata : pci_isa_hose;
+ struct pci_iommu_arena *arena;
+ long dma_ofs, npages;
+
+
+ if (dma_addr >= __direct_map_base
+ && dma_addr < __direct_map_base + __direct_map_size) {
+ /* Nothing to do. */
+
+ DBGA2("pci_unmap_single: direct [%x,%lx] from %p\n",
+ dma_addr, size, __builtin_return_address(0));
+
+ return;
+ }
+
+ arena = hose->sg_pci;
+ if (!arena || dma_addr < arena->dma_base)
+ arena = hose->sg_isa;
+
+ dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
+ if (dma_ofs * PAGE_SIZE >= arena->size) {
+ printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %x "
+ " base %x size %x\n", dma_addr, arena->dma_base,
+ arena->size);
+ return;
+ BUG();
+ }
+
+ npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
+ iommu_arena_free(arena, dma_ofs, npages);
+ alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
+
+ DBGA2("pci_unmap_single: sg [%x,%lx] np %ld from %p\n",
+ dma_addr, size, npages, __builtin_return_address(0));
+}
+
+
+/* Allocate and map kernel buffer using consistent mode DMA for PCI
+ device. Returns non-NULL cpu-view pointer to the buffer if
+ successful and sets *DMA_ADDRP to the pci side dma address as well,
+ else DMA_ADDRP is undefined. */
+
+void *
+pci_alloc_consistent(struct pci_dev *pdev, long size, dma_addr_t *dma_addrp)
+{
+ void *cpu_addr;
+
+ cpu_addr = kmalloc(size, GFP_ATOMIC);
+ if (! cpu_addr) {
+ printk(KERN_INFO "dma_alloc_consistent: "
+ "kmalloc failed from %p\n",
+ __builtin_return_address(0));
+ /* ??? Really atomic allocation? Otherwise we could play
+ with vmalloc and sg if we can't find contiguous memory. */
+ return NULL;
+ }
+ memset(cpu_addr, 0, size);
+
+ *dma_addrp = pci_map_single(pdev, cpu_addr, size);
+ if (*dma_addrp == 0) {
+ kfree_s(cpu_addr, size);
+ return NULL;
+ }
+
+ DBGA2("dma_alloc_consistent: %lx -> [%p,%x] from %p\n",
+ size, cpu_addr, *dma_addrp, __builtin_return_address(0));
+
+ return cpu_addr;
+}
+
+
+/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
+ be values that were returned from pci_alloc_consistent. SIZE must
+ be the same as what as passed into pci_alloc_consistent.
+ References to the memory and mappings assosciated with CPU_ADDR or
+ DMA_ADDR past this call are illegal. */
+
+void
+pci_free_consistent(struct pci_dev *pdev, long size, void *cpu_addr,
+ dma_addr_t dma_addr)
+{
+ pci_unmap_single(pdev, dma_addr, size);
+ kfree_s(cpu_addr, size);
+
+ DBGA2("dma_free_consistent: [%x,%lx] from %p\n",
+ dma_addr, size, __builtin_return_address(0));
+}
+
+
+/* Classify the elements of the scatterlist. Write dma_address
+ of each element with:
+ 0 : Not mergable.
+ 1 : Followers all physically adjacent.
+ [23]: Followers all virtually adjacent.
+ -1 : Not leader.
+ Write dma_length of each leader with the combined lengths of
+ the mergable followers. */
+
+static inline void
+sg_classify(struct scatterlist *sg, struct scatterlist *end)
+{
+ unsigned long next_vaddr;
+ struct scatterlist *leader;
+
+ leader = sg;
+ leader->dma_address = 0;
+ leader->dma_length = leader->length;
+ next_vaddr = (unsigned long)leader->address + leader->length;
+
+ for (++sg; sg < end; ++sg) {
+ unsigned long addr, len;
+ addr = (unsigned long) sg->address;
+ len = sg->length;
+
+ if (next_vaddr == addr) {
+ sg->dma_address = -1;
+ leader->dma_address |= 1;
+ leader->dma_length += len;
+ } else if (((next_vaddr | addr) & ~PAGE_MASK) == 0) {
+ sg->dma_address = -1;
+ leader->dma_address |= 2;
+ leader->dma_length += len;
+ } else {
+ leader = sg;
+ leader->dma_address = 0;
+ leader->dma_length = len;
+ }
+
+ next_vaddr = addr + len;
+ }
+}
+
+/* Given a scatterlist leader, choose an allocation method and fill
+ in the blanks. */
+
+static inline int
+sg_fill(struct scatterlist *leader, struct scatterlist *end,
+ struct scatterlist *out, struct pci_iommu_arena *arena,
+ dma_addr_t max_dma)
+{
+ unsigned long paddr = virt_to_phys(leader->address);
+ unsigned long size = leader->dma_length;
+ struct scatterlist *sg;
+ unsigned long *ptes;
+ long npages, dma_ofs, i;
+
+ /* If everything is physically contiguous, and the addresses
+ fall into the direct-map window, use it. */
+ if (leader->dma_address < 2
+ && paddr + size + __direct_map_base - 1 <= max_dma
+ && paddr + size <= __direct_map_size) {
+ out->dma_address = paddr + __direct_map_base;
+ out->dma_length = size;
+
+ DBGA2("sg_fill: [%p,%lx] -> direct %x\n",
+ leader->address, size, out->dma_address);
+
+ return 0;
+ }
+
+ /* Otherwise, we'll use the iommu to make the pages virtually
+ contiguous. */
+
+ paddr &= ~PAGE_MASK;
+ npages = calc_npages(paddr + size);
+ dma_ofs = iommu_arena_alloc(arena, npages);
+ if (dma_ofs < 0)
+ return -1;
+
+ out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
+ out->dma_length = size;
+
+ DBGA("sg_fill: [%p,%lx] -> sg %x\n",
+ leader->address, size, out->dma_address);
+
+ ptes = &arena->ptes[dma_ofs];
+ sg = leader;
+ do {
+ paddr = virt_to_phys(sg->address);
+ npages = calc_npages((paddr & ~PAGE_MASK) + sg->length);
+
+ DBGA(" (%ld) [%p,%x]\n",
+ sg - leader, sg->address, sg->length);
+
+ paddr &= PAGE_MASK;
+ for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
+ *ptes++ = mk_iommu_pte(paddr);
+
+ ++sg;
+ } while (sg < end && sg->dma_address == -1);
+
+ return 1;
+}
+
+/* TODO: Only use the iommu when it helps. Non-mergable scatterlist
+ entries might as well use direct mappings. */
+
+int
+pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents)
+{
+ struct scatterlist *start, *end, *out;
+ struct pci_controler *hose;
+ struct pci_iommu_arena *arena;
+ dma_addr_t max_dma, fstart, fend;
+
+ /* If pci_tbi is not available, we must not be able to control
+ an iommu. Direct map everything, no merging. */
+ if (! alpha_mv.mv_pci_tbi) {
+ for (end = sg + nents; sg < end; ++sg) {
+ sg->dma_address = virt_to_bus(sg->address);
+ sg->dma_length = sg->length;
+ }
+ return nents;
+ }
+
+ /* Fast path single entry scatterlists. */
+ if (nents == 1) {
+ sg->dma_length = sg->length;
+ sg->dma_address
+ = pci_map_single(pdev, sg->address, sg->length);
+ return sg->dma_address != 0;
+ }
+
+ hose = pdev ? pdev->sysdata : pci_isa_hose;
+ max_dma = pdev ? pdev->dma_mask : 0x00ffffff;
+ arena = hose->sg_pci;
+ if (!arena || arena->dma_base + arena->size > max_dma)
+ arena = hose->sg_isa;
+ start = sg;
+ end = sg + nents;
+ fstart = -1;
+ fend = 0;
+
+ /* First, prepare information about the entries. */
+ sg_classify(sg, end);
+
+ /* Second, iterate over the scatterlist leaders and allocate
+ dma space as needed. */
+ for (out = sg; sg < end; ++sg) {
+ int ret;
+
+ if (sg->dma_address == -1)
+ continue;
+
+ ret = sg_fill(sg, end, out, arena, max_dma);
+ if (ret < 0)
+ goto error;
+ else if (ret > 0) {
+ dma_addr_t ts, te;
+
+ ts = out->dma_address;
+ te = ts + out->dma_length - 1;
+ if (fstart > ts)
+ fstart = ts;
+ if (fend < te)
+ fend = te;
+ }
+ out++;
+ }
+
+ /* ??? This shouldn't have been needed, since the entries
+ we've just modified were not in the iommu tlb. */
+ if (fend)
+ alpha_mv.mv_pci_tbi(hose, fstart, fend);
+
+ if (out - start == 0)
+ printk(KERN_INFO "pci_map_sg failed: no entries?\n");
+
+ return out - start;
+
+error:
+ printk(KERN_INFO "pci_map_sg failed: "
+ "could not allocate dma page tables\n");
+
+ /* Some allocation failed while mapping the scatterlist
+ entries. Unmap them now. */
+ if (out > start)
+ pci_unmap_sg(pdev, start, out - start);
+ return 0;
+}
+
+
+/* Unmap a set of streaming mode DMA translations. Again, cpu read
+ rules concerning calls here are the same as for pci_unmap_single()
+ above. */
+
+void
+pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents)
+{
+ struct pci_controler *hose;
+ struct pci_iommu_arena *arena;
+ struct scatterlist *end;
+ dma_addr_t max_dma;
+ dma_addr_t fstart, fend;
+
+ if (! alpha_mv.mv_pci_tbi)
+ return;
+
+ hose = pdev ? pdev->sysdata : pci_isa_hose;
+ max_dma = pdev ? pdev->dma_mask : 0x00ffffff;
+ arena = hose->sg_pci;
+ if (!arena || arena->dma_base + arena->size > max_dma)
+ arena = hose->sg_isa;
+ fstart = -1;
+ fend = 0;
+
+ for (end = sg + nents; sg < end; ++sg) {
+ unsigned long addr, size;
+
+ addr = sg->dma_address;
+ size = sg->dma_length;
+
+ if (addr >= __direct_map_base
+ && addr < __direct_map_base + __direct_map_size) {
+ /* Nothing to do. */
+ DBGA2("pci_unmap_sg: direct [%lx,%lx]\n", addr, size);
+ } else {
+ long npages, ofs;
+ dma_addr_t tend;
+
+ npages = calc_npages((addr & ~PAGE_MASK) + size);
+ ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
+ iommu_arena_free(arena, ofs, npages);
+
+ tend = addr + size - 1;
+ if (fstart > addr)
+ fstart = addr;
+ if (fend < tend)
+ fend = tend;
+
+ DBGA2("pci_unmap_sg: sg [%lx,%lx]\n", addr, size);
+ }
+ }
+ if (fend)
+ alpha_mv.mv_pci_tbi(hose, fstart, fend);
+}
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index 3efbdda08..fa92b3bc3 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -9,55 +9,65 @@
struct pt_regs;
struct task_struct;
struct pci_dev;
+struct pci_controler;
/* core_apecs.c */
extern struct pci_ops apecs_pci_ops;
extern void apecs_init_arch(void);
extern void apecs_pci_clr_err(void);
extern void apecs_machine_check(u64, u64, struct pt_regs *);
+extern void apecs_pci_tbi(struct pci_controler *, dma_addr_t, dma_addr_t);
/* core_cia.c */
extern struct pci_ops cia_pci_ops;
extern void cia_init_arch(void);
extern void cia_machine_check(u64, u64, struct pt_regs *);
+extern void cia_pci_tbi(struct pci_controler *, dma_addr_t, dma_addr_t);
/* core_irongate.c */
extern struct pci_ops irongate_pci_ops;
extern int irongate_pci_clr_err(void);
extern void irongate_init_arch(void);
extern void irongate_machine_check(u64, u64, struct pt_regs *);
+#define irongate_pci_tbi ((void *)0)
/* core_lca.c */
extern struct pci_ops lca_pci_ops;
extern void lca_init_arch(void);
extern void lca_machine_check(u64, u64, struct pt_regs *);
+extern void lca_pci_tbi(struct pci_controler *, dma_addr_t, dma_addr_t);
/* core_mcpcia.c */
extern struct pci_ops mcpcia_pci_ops;
extern void mcpcia_init_arch(void);
extern void mcpcia_init_hoses(void);
extern void mcpcia_machine_check(u64, u64, struct pt_regs *);
+extern void mcpcia_pci_tbi(struct pci_controler *, dma_addr_t, dma_addr_t);
/* core_polaris.c */
extern struct pci_ops polaris_pci_ops;
extern void polaris_init_arch(void);
extern void polaris_machine_check(u64, u64, struct pt_regs *);
+#define polaris_pci_tbi ((void *)0)
/* core_pyxis.c */
extern struct pci_ops pyxis_pci_ops;
extern void pyxis_init_arch(void);
extern void pyxis_machine_check(u64, u64, struct pt_regs *);
+extern void pyxis_pci_tbi(struct pci_controler *, dma_addr_t, dma_addr_t);
/* core_t2.c */
extern struct pci_ops t2_pci_ops;
extern void t2_init_arch(void);
extern void t2_machine_check(u64, u64, struct pt_regs *);
+#define t2_pci_tbi ((void *)0)
/* core_tsunami.c */
extern struct pci_ops tsunami_pci_ops;
extern void tsunami_init_arch(void);
extern void tsunami_kill_arch(int);
extern void tsunami_machine_check(u64, u64, struct pt_regs *);
+extern void tsunami_pci_tbi(struct pci_controler *, dma_addr_t, dma_addr_t);
/* setup.c */
extern unsigned long srm_hae;
diff --git a/arch/alpha/kernel/semaphore.c b/arch/alpha/kernel/semaphore.c
index d62b355e1..d4793ecb4 100644
--- a/arch/alpha/kernel/semaphore.c
+++ b/arch/alpha/kernel/semaphore.c
@@ -36,7 +36,9 @@
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
-void __up(struct semaphore *sem)
+
+void
+__up(struct semaphore *sem)
{
wake_one_more(sem);
wake_up(&sem->wait);
@@ -63,7 +65,7 @@ void __up(struct semaphore *sem)
#define DOWN_VAR \
struct task_struct *tsk = current; \
wait_queue_t wait; \
- init_waitqueue_entry(&wait, tsk);
+ init_waitqueue_entry(&wait, tsk)
#define DOWN_HEAD(task_state) \
\
@@ -92,23 +94,27 @@ void __up(struct semaphore *sem)
tsk->state = (task_state); \
} \
tsk->state = TASK_RUNNING; \
- remove_wait_queue(&sem->wait, &wait);
+ remove_wait_queue(&sem->wait, &wait)
-void __down(struct semaphore * sem)
+void
+__down(struct semaphore * sem)
{
- DOWN_VAR
- DOWN_HEAD(TASK_UNINTERRUPTIBLE)
+ DOWN_VAR;
+ DOWN_HEAD(TASK_UNINTERRUPTIBLE);
+
if (waking_non_zero(sem))
break;
schedule();
- DOWN_TAIL(TASK_UNINTERRUPTIBLE)
+
+ DOWN_TAIL(TASK_UNINTERRUPTIBLE);
}
-int __down_interruptible(struct semaphore * sem)
+int
+__down_interruptible(struct semaphore * sem)
{
int ret = 0;
- DOWN_VAR
- DOWN_HEAD(TASK_INTERRUPTIBLE)
+ DOWN_VAR;
+ DOWN_HEAD(TASK_INTERRUPTIBLE);
ret = waking_non_zero_interruptible(sem, tsk);
if (ret)
@@ -119,11 +125,149 @@ int __down_interruptible(struct semaphore * sem)
break;
}
schedule();
- DOWN_TAIL(TASK_INTERRUPTIBLE)
+
+ DOWN_TAIL(TASK_INTERRUPTIBLE);
return ret;
}
-int __down_trylock(struct semaphore * sem)
+int
+__down_trylock(struct semaphore * sem)
{
return waking_non_zero_trylock(sem);
}
+
+
+/*
+ * RW Semaphores
+ */
+
+void
+__down_read(struct rw_semaphore *sem, int count)
+{
+ long tmp;
+ DOWN_VAR;
+
+ retry_down:
+ if (count < 0) {
+ /* Wait for the lock to become unbiased. Readers
+ are non-exclusive. */
+
+ /* This takes care of granting the lock. */
+ up_read(sem);
+
+ add_wait_queue(&sem->wait, &wait);
+ while (sem->count < 0) {
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ if (sem->count >= 0)
+ break;
+ schedule();
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ __asm __volatile (
+ " mb\n"
+ "1: ldl_l %0,%1\n"
+ " subl %0,1,%2\n"
+ " subl %0,1,%0\n"
+ " stl_c %2,%1\n"
+ " bne %2,2f\n"
+ ".section .text2,\"ax\"\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=r"(count), "=m"(sem->count), "=r"(tmp)
+ : : "memory");
+ if (count <= 0)
+ goto retry_down;
+ } else {
+ add_wait_queue(&sem->wait, &wait);
+
+ while (1) {
+ if (test_and_clear_bit(0, &sem->granted))
+ break;
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ if ((sem->granted & 1) == 0)
+ schedule();
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+ }
+}
+
+void
+__down_write(struct rw_semaphore *sem, int count)
+{
+ long tmp;
+ DOWN_VAR;
+
+ retry_down:
+ if (count + RW_LOCK_BIAS < 0) {
+ up_write(sem);
+
+ add_wait_queue_exclusive(&sem->wait, &wait);
+
+ while (sem->count < 0) {
+ set_task_state(tsk, (TASK_UNINTERRUPTIBLE
+ | TASK_EXCLUSIVE));
+ if (sem->count >= RW_LOCK_BIAS)
+ break;
+ schedule();
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ __asm __volatile (
+ " mb\n"
+ "1: ldl_l %0,%1\n"
+ " ldah %2,%3(%0)\n"
+ " ldah %0,%3(%0)\n"
+ " stl_c %2,%1\n"
+ " bne %2,2f\n"
+ ".section .text2,\"ax\"\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=r"(count), "=m"(sem->count), "=r"(tmp)
+ : "i"(-(RW_LOCK_BIAS >> 16))
+ : "memory");
+ if (count != 0)
+ goto retry_down;
+ } else {
+ /* Put ourselves at the end of the list. */
+ add_wait_queue_exclusive(&sem->write_bias_wait, &wait);
+
+ while (1) {
+ if (test_and_clear_bit(1, &sem->granted))
+ break;
+ set_task_state(tsk, (TASK_UNINTERRUPTIBLE
+ | TASK_EXCLUSIVE));
+ if ((sem->granted & 2) == 0)
+ schedule();
+ }
+
+ remove_wait_queue(&sem->write_bias_wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ /* If the lock is currently unbiased, awaken the sleepers.
+ FIXME: This wakes up the readers early in a bit of a
+ stampede -> bad! */
+ if (sem->count >= 0)
+ wake_up(&sem->wait);
+ }
+}
+
+void
+__do_rwsem_wake(struct rw_semaphore *sem, int readers)
+{
+ if (readers) {
+ if (test_and_set_bit(0, &sem->granted))
+ BUG();
+ wake_up(&sem->wait);
+ } else {
+ if (test_and_set_bit(1, &sem->granted))
+ BUG();
+ wake_up(&sem->write_bias_wait);
+ }
+}
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 3a1fb94a3..c92168195 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -96,6 +96,13 @@ struct screen_info screen_info = {
orig_video_points: 16
};
+/*
+ * The direct map I/O window, if any. This should be the same
+ * for all busses, since it's used by virt_to_bus.
+ */
+
+unsigned long __direct_map_base;
+unsigned long __direct_map_size;
/*
* Declare all of the machine vectors.
@@ -225,15 +232,8 @@ setup_memory(void)
max_low_pfn = end;
}
- /* Enforce maximum of 2GB even if there is more. Blah. */
- if (max_low_pfn > PFN_MAX)
- max_low_pfn = PFN_MAX;
- printk("max_low_pfn %ld\n", max_low_pfn);
-
/* Find the end of the kernel memory. */
start_pfn = PFN_UP(virt_to_phys(_end));
- printk("_end %p, start_pfn %ld\n", _end, start_pfn);
-
bootmap_start = -1;
try_again:
@@ -243,7 +243,6 @@ setup_memory(void)
/* We need to know how many physically contigous pages
we'll need for the bootmap. */
bootmap_pages = bootmem_bootmap_pages(max_low_pfn);
- printk("bootmap size: %ld pages\n", bootmap_pages);
/* Now find a good region where to allocate the bootmap. */
for_each_mem_cluster(memdesc, cluster, i) {
@@ -261,8 +260,6 @@ setup_memory(void)
if (end > max_low_pfn)
end = max_low_pfn;
if (end - start >= bootmap_pages) {
- printk("allocating bootmap in area %ld:%ld\n",
- start, start+bootmap_pages);
bootmap_start = start;
break;
}
@@ -270,8 +267,6 @@ setup_memory(void)
if (bootmap_start == -1) {
max_low_pfn >>= 1;
- printk("bootmap area not found now trying with %ld pages\n",
- max_low_pfn);
goto try_again;
}
@@ -304,8 +299,6 @@ setup_memory(void)
/* Reserve the bootmap memory. */
reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size);
- printk("reserving bootmap %ld:%ld\n", bootmap_start,
- bootmap_start + PFN_UP(bootmap_size));
#ifdef CONFIG_BLK_DEV_INITRD
initrd_start = INITRD_START;
@@ -328,27 +321,26 @@ setup_memory(void)
#endif /* CONFIG_BLK_DEV_INITRD */
}
-int __init page_is_ram(unsigned long pfn)
+int __init
+page_is_ram(unsigned long pfn)
{
struct memclust_struct * cluster;
struct memdesc_struct * memdesc;
int i;
- memdesc = (struct memdesc_struct *) (hwrpb->mddt_offset + (unsigned long) hwrpb);
+ memdesc = (struct memdesc_struct *)
+ (hwrpb->mddt_offset + (unsigned long) hwrpb);
for_each_mem_cluster(memdesc, cluster, i)
{
if (pfn >= cluster->start_pfn &&
- pfn < cluster->start_pfn + cluster->numpages)
- {
- if (cluster->usage & 3)
- return 0;
- else
- return 1;
+ pfn < cluster->start_pfn + cluster->numpages) {
+ return (cluster->usage & 3) ? 0 : 1;
}
}
return 0;
}
+
#undef PFN_UP
#undef PFN_DOWN
#undef PFN_PHYS
@@ -369,8 +361,7 @@ setup_arch(char **cmdline_p)
/* Hack for Jensen... since we're restricted to 8 or 16 chars for
boot flags depending on the boot mode, we need some shorthand.
- This should do for installation. Later we'll add other
- abbreviations as well... */
+ This should do for installation. */
if (strcmp(COMMAND_LINE, "INSTALL") == 0) {
strcpy(command_line, "root=/dev/fd0 load_ramdisk=1");
} else {
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 3fbf11495..30ed75ead 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -51,9 +51,9 @@ static struct {
} ipi_data[NR_CPUS] __cacheline_aligned;
enum ipi_message_type {
- IPI_RESCHEDULE,
- IPI_CALL_FUNC,
- IPI_CPU_STOP,
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNC,
+ IPI_CPU_STOP,
};
spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
@@ -70,7 +70,7 @@ int smp_num_cpus = 1; /* Number that came online. */
int smp_threads_ready; /* True once the per process idle is forked. */
cycles_t cacheflush_time;
-int cpu_number_map[NR_CPUS];
+int __cpu_number_map[NR_CPUS];
int __cpu_logical_map[NR_CPUS];
extern void calibrate_delay(void);
@@ -426,13 +426,13 @@ smp_boot_one_cpu(int cpuid, int cpunum)
if (fork_by_hand() < 0)
panic("failed fork for CPU %d", cpuid);
- idle = init_task.prev_task;
- if (!idle)
- panic("No idle process for CPU %d", cpuid);
+ idle = init_task.prev_task;
+ if (!idle)
+ panic("No idle process for CPU %d", cpuid);
idle->processor = cpuid;
__cpu_logical_map[cpunum] = cpuid;
- cpu_number_map[cpuid] = cpunum;
+ __cpu_number_map[cpuid] = cpunum;
idle->has_cpu = 1; /* we schedule the first task manually */
del_from_runqueue(idle);
@@ -461,7 +461,7 @@ smp_boot_one_cpu(int cpuid, int cpunum)
/* we must invalidate our stuff as we failed to boot the CPU */
__cpu_logical_map[cpunum] = -1;
- cpu_number_map[cpuid] = -1;
+ __cpu_number_map[cpuid] = -1;
/* the idle task is local to us so free it as we don't use it */
free_task_struct(idle);
@@ -534,11 +534,11 @@ smp_boot_cpus(void)
unsigned long bogosum;
/* Take care of some initial bookkeeping. */
- memset(cpu_number_map, -1, sizeof(cpu_number_map));
+ memset(__cpu_number_map, -1, sizeof(__cpu_number_map));
memset(__cpu_logical_map, -1, sizeof(__cpu_logical_map));
memset(ipi_data, 0, sizeof(ipi_data));
- cpu_number_map[smp_boot_cpuid] = 0;
+ __cpu_number_map[smp_boot_cpuid] = 0;
__cpu_logical_map[0] = smp_boot_cpuid;
current->processor = smp_boot_cpuid;
@@ -554,7 +554,7 @@ smp_boot_cpus(void)
/* Nothing to do on a UP box, or when told not to. */
if (smp_num_probed == 1 || max_cpus == 0) {
- printk(KERN_INFO "SMP mode deactivated.\n");
+ printk(KERN_INFO "SMP mode deactivated.\n");
return;
}
@@ -565,7 +565,7 @@ smp_boot_cpus(void)
if (i == smp_boot_cpuid)
continue;
- if (((cpu_present_mask >> i) & 1) == 0)
+ if (((cpu_present_mask >> i) & 1) == 0)
continue;
if (smp_boot_one_cpu(i, cpu_count))
@@ -580,10 +580,10 @@ smp_boot_cpus(void)
}
bogosum = 0;
- for (i = 0; i < NR_CPUS; i++) {
+ for (i = 0; i < NR_CPUS; i++) {
if (cpu_present_mask & (1L << i))
bogosum += cpu_data[i].loops_per_sec;
- }
+ }
printk(KERN_INFO "SMP: Total of %d processors activated "
"(%lu.%02lu BogoMIPS).\n",
cpu_count, (bogosum + 2500) / 500000,
@@ -605,7 +605,7 @@ smp_commence(void)
extern void update_one_process(struct task_struct *p, unsigned long ticks,
- unsigned long user, unsigned long system,
+ unsigned long user, unsigned long system,
int cpu);
void
@@ -626,13 +626,13 @@ smp_percpu_timer_interrupt(struct pt_regs *regs)
irq_enter(cpu, TIMER_IRQ);
update_one_process(current, 1, user, !user, cpu);
- if (current->pid) {
- if (--current->counter <= 0) {
+ if (current->pid) {
+ if (--current->counter <= 0) {
current->counter = 0;
- current->need_resched = 1;
- }
+ current->need_resched = 1;
+ }
- if (user) {
+ if (user) {
if (current->priority < DEF_PRIORITY) {
kstat.cpu_nice++;
kstat.per_cpu_nice[cpu]++;
@@ -640,11 +640,11 @@ smp_percpu_timer_interrupt(struct pt_regs *regs)
kstat.cpu_user++;
kstat.per_cpu_user[cpu]++;
}
- } else {
+ } else {
kstat.cpu_system++;
kstat.per_cpu_system[cpu]++;
- }
- }
+ }
+ }
data->prof_counter = data->prof_multiplier;
irq_exit(cpu, TIMER_IRQ);
@@ -722,7 +722,7 @@ again:
return -EBUSY;
while (*(void **)lock)
- schedule();
+ barrier();
goto again;
}
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index ad156c9f2..c2abe26f2 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -111,11 +111,20 @@ jensen_init_irq(void)
}
static void
+jensen_init_arch(void)
+{
+ __direct_map_base = 0;
+ __direct_map_size = 0xffffffff;
+}
+
+static void
jensen_machine_check (u64 vector, u64 la, struct pt_regs *regs)
{
printk(KERN_CRIT "Machine check\n");
}
+#define jensen_pci_tbi ((void*)0)
+
/*
* The System Vector
@@ -136,7 +145,7 @@ struct alpha_machine_vector jensen_mv __initmv = {
ack_irq: common_ack_irq,
device_interrupt: jensen_device_interrupt,
- init_arch: NULL,
+ init_arch: jensen_init_arch,
init_irq: jensen_init_irq,
init_pit: common_init_pit,
init_pci: NULL,
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index 78025bec9..2359755bf 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -55,52 +55,6 @@ sio_init_irq(void)
}
static inline void __init
-xl_init_arch(void)
-{
- struct pci_controler *hose;
-
- /*
- * Set up the PCI->physical memory translation windows. For
- * the XL we *must* use both windows, in order to maximize the
- * amount of physical memory that can be used to DMA from the
- * ISA bus, and still allow PCI bus devices access to all of
- * host memory.
- *
- * See <asm/apecs.h> for window bases and sizes.
- *
- * This restriction due to the true XL motherboards' 82379AB SIO
- * PCI<->ISA bridge chip which passes only 27 bits of address...
- */
-
- *(vuip)APECS_IOC_PB1R = 1<<19 | (APECS_XL_DMA_WIN1_BASE & 0xfff00000U);
- *(vuip)APECS_IOC_PM1R = (APECS_XL_DMA_WIN1_SIZE - 1) & 0xfff00000U;
- *(vuip)APECS_IOC_TB1R = 0;
-
- *(vuip)APECS_IOC_PB2R = 1<<19 | (APECS_XL_DMA_WIN2_BASE & 0xfff00000U);
- *(vuip)APECS_IOC_PM2R = (APECS_XL_DMA_WIN2_SIZE - 1) & 0xfff00000U;
- *(vuip)APECS_IOC_TB2R = 0;
-
- /*
- * Finally, clear the HAXR2 register, which gets used for PCI
- * Config Space accesses. That is the way we want to use it,
- * and we do not want to depend on what ARC or SRM might have
- * left behind...
- */
-
- *(vuip)APECS_IOC_HAXR2 = 0; mb();
-
- /*
- * Create our single hose.
- */
-
- hose = alloc_pci_controler();
- hose->io_space = &ioport_resource;
- hose->mem_space = &iomem_resource;
- hose->config_space = LCA_CONF;
- hose->index = 0;
-}
-
-static inline void __init
alphabook1_init_arch(void)
{
/* The AlphaBook1 has LCD video fixed at 800x600,
@@ -448,7 +402,7 @@ struct alpha_machine_vector xl_mv __initmv = {
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
- BUS(apecs_xl),
+ BUS(apecs),
machine_check: apecs_machine_check,
max_dma_address: ALPHA_XL_MAX_DMA_ADDRESS,
min_io_address: DEFAULT_IO_BASE,
@@ -460,7 +414,7 @@ struct alpha_machine_vector xl_mv __initmv = {
ack_irq: common_ack_irq,
device_interrupt: isa_device_interrupt,
- init_arch: xl_init_arch,
+ init_arch: lca_init_arch,
init_irq: sio_init_irq,
init_pit: common_init_pit,
init_pci: noname_init_pci,
diff --git a/arch/alpha/lib/semaphore.S b/arch/alpha/lib/semaphore.S
index 3dbeeec5f..517285ea4 100644
--- a/arch/alpha/lib/semaphore.S
+++ b/arch/alpha/lib/semaphore.S
@@ -1,7 +1,7 @@
/*
* linux/arch/alpha/lib/semaphore.S
*
- * Copyright (C) 1999 Richard Henderson
+ * Copyright (C) 1999, 2000 Richard Henderson
*/
/*
@@ -181,3 +181,168 @@ __up_wakeup:
lda $30, 20*8($30)
ret $31, ($28), 0
.end __up_wakeup
+
+/* __down_read_failed takes the semaphore in $24, count in $25;
+ clobbers $24, $25 and $28. */
+
+ .globl __down_read_failed
+ .ent __down_read_failed
+__down_read_failed:
+ ldgp $29,0($27)
+ lda $30, -18*8($30)
+ stq $28, 0*8($30)
+ stq $0, 1*8($30)
+ stq $1, 2*8($30)
+ stq $2, 3*8($30)
+ stq $3, 4*8($30)
+ stq $4, 5*8($30)
+ stq $5, 6*8($30)
+ stq $6, 7*8($30)
+ stq $7, 8*8($30)
+ stq $16, 9*8($30)
+ stq $17, 10*8($30)
+ stq $18, 11*8($30)
+ stq $19, 12*8($30)
+ stq $20, 13*8($30)
+ stq $21, 14*8($30)
+ stq $22, 15*8($30)
+ stq $23, 16*8($30)
+ stq $26, 17*8($30)
+ .frame $30, 18*8, $28
+ .prologue 1
+
+ mov $24, $16
+ mov $25, $17
+ jsr __down_read
+
+ ldq $28, 0*8($30)
+ ldq $0, 1*8($30)
+ ldq $1, 2*8($30)
+ ldq $2, 3*8($30)
+ ldq $3, 4*8($30)
+ ldq $4, 5*8($30)
+ ldq $5, 6*8($30)
+ ldq $6, 7*8($30)
+ ldq $7, 8*8($30)
+ ldq $16, 9*8($30)
+ ldq $17, 10*8($30)
+ ldq $18, 11*8($30)
+ ldq $19, 12*8($30)
+ ldq $20, 13*8($30)
+ ldq $21, 14*8($30)
+ ldq $22, 15*8($30)
+ ldq $23, 16*8($30)
+ ldq $26, 17*8($30)
+ lda $30, 18*8($30)
+ ret $31, ($28), 0
+ .end __down_read_failed
+
+/* __down_write_failed takes the semaphore in $24, count in $25;
+ clobbers $24, $25 and $28. */
+
+ .globl __down_write_failed
+ .ent __down_write_failed
+__down_write_failed:
+ ldgp $29,0($27)
+ lda $30, -20*8($30)
+ stq $28, 0*8($30)
+ stq $0, 1*8($30)
+ stq $1, 2*8($30)
+ stq $2, 3*8($30)
+ stq $3, 4*8($30)
+ stq $4, 5*8($30)
+ stq $5, 6*8($30)
+ stq $6, 7*8($30)
+ stq $7, 8*8($30)
+ stq $16, 9*8($30)
+ stq $17, 10*8($30)
+ stq $18, 11*8($30)
+ stq $19, 12*8($30)
+ stq $20, 13*8($30)
+ stq $21, 14*8($30)
+ stq $22, 15*8($30)
+ stq $23, 16*8($30)
+ stq $26, 17*8($30)
+ .frame $30, 18*8, $28
+ .prologue 1
+
+ mov $24, $16
+ mov $25, $17
+ jsr __down_write
+
+ ldq $28, 0*8($30)
+ ldq $0, 1*8($30)
+ ldq $1, 2*8($30)
+ ldq $2, 3*8($30)
+ ldq $3, 4*8($30)
+ ldq $4, 5*8($30)
+ ldq $5, 6*8($30)
+ ldq $6, 7*8($30)
+ ldq $7, 8*8($30)
+ ldq $16, 9*8($30)
+ ldq $17, 10*8($30)
+ ldq $18, 11*8($30)
+ ldq $19, 12*8($30)
+ ldq $20, 13*8($30)
+ ldq $21, 14*8($30)
+ ldq $22, 15*8($30)
+ ldq $23, 16*8($30)
+ ldq $26, 17*8($30)
+ lda $30, 18*8($30)
+ ret $31, ($28), 0
+ .end __down_write_failed
+
+/* __rwsem_wake takes the semaphore in $24, readers in $25;
+ clobbers $24, $25, and $28. */
+
+ .globl __rwsem_wake
+ .ent __rwsem_wake
+__rwsem_wake:
+ ldgp $29,0($27)
+ lda $30, -18*8($30)
+ stq $28, 0*8($30)
+ stq $0, 1*8($30)
+ stq $1, 2*8($30)
+ stq $2, 3*8($30)
+ stq $3, 4*8($30)
+ stq $4, 5*8($30)
+ stq $5, 6*8($30)
+ stq $6, 7*8($30)
+ stq $7, 8*8($30)
+ stq $16, 9*8($30)
+ stq $17, 10*8($30)
+ stq $18, 11*8($30)
+ stq $19, 12*8($30)
+ stq $20, 13*8($30)
+ stq $21, 14*8($30)
+ stq $22, 15*8($30)
+ stq $23, 16*8($30)
+ stq $26, 17*8($30)
+ .frame $30, 18*8, $28
+ .prologue 1
+
+ mov $24, $16
+ mov $25, $17
+ jsr __do_rwsem_wake
+
+ ldq $28, 0*8($30)
+ ldq $0, 1*8($30)
+ ldq $1, 2*8($30)
+ ldq $2, 3*8($30)
+ ldq $3, 4*8($30)
+ ldq $4, 5*8($30)
+ ldq $5, 6*8($30)
+ ldq $6, 7*8($30)
+ ldq $7, 8*8($30)
+ ldq $16, 9*8($30)
+ ldq $17, 10*8($30)
+ ldq $18, 11*8($30)
+ ldq $19, 12*8($30)
+ ldq $20, 13*8($30)
+ ldq $21, 14*8($30)
+ ldq $22, 15*8($30)
+ ldq $23, 16*8($30)
+ ldq $26, 17*8($30)
+ lda $30, 18*8($30)
+ ret $31, ($28), 0
+ .end __rwsem_wake
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 97bb6df9d..e2142b63c 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -34,7 +34,6 @@
static unsigned long totalram_pages;
extern void die_if_kernel(char *,struct pt_regs *,long);
-extern void show_net_buffers(void);
struct thread_struct original_pcb;
@@ -173,9 +172,6 @@ show_mem(void)
printk("%ld pages swap cached\n",cached);
printk("%ld pages in page table cache\n",pgtable_cache_size);
show_buffers();
-#ifdef CONFIG_NET
- show_net_buffers();
-#endif
}
static inline unsigned long
@@ -195,7 +191,7 @@ paging_init(void)
{
unsigned long newptbr;
unsigned long original_pcb_ptr;
- unsigned int zones_size[MAX_NR_ZONES] = {0, 0, 0};
+ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
unsigned long dma_pfn, high_pfn;
dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
diff --git a/arch/alpha/vmlinux.lds b/arch/alpha/vmlinux.lds
index 94270b390..4b49a5369 100644
--- a/arch/alpha/vmlinux.lds
+++ b/arch/alpha/vmlinux.lds
@@ -82,4 +82,6 @@ SECTIONS
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
+
+ /DISCARD/ : { *(.text.exit) *(.data.exit) }
}