diff options
Diffstat (limited to 'arch/alpha/kernel')
-rw-r--r-- | arch/alpha/kernel/alpha_ksyms.c | 6 | ||||
-rw-r--r-- | arch/alpha/kernel/core_tsunami.c | 35 | ||||
-rw-r--r-- | arch/alpha/kernel/irq.c | 68 | ||||
-rw-r--r-- | arch/alpha/kernel/pci_iommu.c | 53 | ||||
-rw-r--r-- | arch/alpha/kernel/process.c | 109 | ||||
-rw-r--r-- | arch/alpha/kernel/proto.h | 3 | ||||
-rw-r--r-- | arch/alpha/kernel/semaphore.c | 4 | ||||
-rw-r--r-- | arch/alpha/kernel/setup.c | 4 | ||||
-rw-r--r-- | arch/alpha/kernel/smp.c | 41 | ||||
-rw-r--r-- | arch/alpha/kernel/sys_cabriolet.c | 26 | ||||
-rw-r--r-- | arch/alpha/kernel/sys_dp264.c | 124 | ||||
-rw-r--r-- | arch/alpha/kernel/sys_sio.c | 2 | ||||
-rw-r--r-- | arch/alpha/kernel/time.c | 1 | ||||
-rw-r--r-- | arch/alpha/kernel/traps.c | 4 |
14 files changed, 308 insertions, 172 deletions
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c index 725dd4f51..25d9583dd 100644 --- a/arch/alpha/kernel/alpha_ksyms.c +++ b/arch/alpha/kernel/alpha_ksyms.c @@ -98,6 +98,8 @@ EXPORT_SYMBOL(__memset); EXPORT_SYMBOL(__memsetw); EXPORT_SYMBOL(__constant_c_memset); +EXPORT_SYMBOL(__direct_map_base); +EXPORT_SYMBOL(__direct_map_size); EXPORT_SYMBOL(pci_alloc_consistent); EXPORT_SYMBOL(pci_free_consistent); EXPORT_SYMBOL(pci_map_single); @@ -144,6 +146,10 @@ EXPORT_SYMBOL(alpha_fp_emul_imprecise); EXPORT_SYMBOL(alpha_fp_emul); #endif +#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK +EXPORT_SYMBOL(__min_ipl); +#endif + /* * The following are specially called from the uaccess assembly stubs. */ diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c index 5fa112173..1452b6336 100644 --- a/arch/alpha/kernel/core_tsunami.c +++ b/arch/alpha/kernel/core_tsunami.c @@ -24,7 +24,6 @@ #include "proto.h" #include "pci_impl.h" -int TSUNAMI_bootcpu; static struct { @@ -210,17 +209,23 @@ void tsunami_pci_tbi(struct pci_controler *hose, dma_addr_t start, dma_addr_t end) { tsunami_pchip *pchip = hose->index ? TSUNAMI_pchip1 : TSUNAMI_pchip0; - - wmb(); + volatile unsigned long *csr; + unsigned long value; /* We can invalidate up to 8 tlb entries in a go. The flush matches against <31:16> in the pci address. */ + csr = &pchip->tlbia.csr; if (((start ^ end) & 0xffff0000) == 0) - pchip->tlbiv.csr = (start & 0xffff0000) >> 12; - else - pchip->tlbia.csr = 0; + csr = &pchip->tlbiv.csr; + /* For TBIA, it doesn't matter what value we write. For TBI, + it's the shifted tag bits. */ + value = (start & 0xffff0000) >> 12; + + wmb(); + *csr = value; mb(); + *csr; } #ifdef NXM_MACHINE_CHECKS_ON_TSUNAMI @@ -229,7 +234,7 @@ tsunami_probe_read(volatile unsigned long *vaddr) { long dont_care, probe_result; int cpu = smp_processor_id(); - int s = swpipl(6); /* Block everything but machine checks. */ + int s = swpipl(IPL_MCHECK - 1); mcheck_taken(cpu) = 0; mcheck_expected(cpu) = 1; @@ -338,9 +343,13 @@ tsunami_init_one_pchip(tsunami_pchip *pchip, int index) * because of an idiot-syncrasy of the CYPRESS chip. It may * respond to a PCI bus address in the last 1MB of the 4GB * address range. + * + * Note that the TLB lookup logic uses bitwise concatenation, + * not addition, so the required arena alignment is based on + * the size of the window. */ - hose->sg_isa = iommu_arena_new(0x00800000, 0x00800000, PAGE_SIZE); - hose->sg_pci = iommu_arena_new(0xc0000000, 0x08000000, PAGE_SIZE); + hose->sg_isa = iommu_arena_new(0x00800000, 0x00800000, 0x00800000>>10); + hose->sg_pci = iommu_arena_new(0xc0000000, 0x08000000, 0x08000000>>10); __direct_map_base = 0x40000000; __direct_map_size = 0x80000000; @@ -399,8 +408,6 @@ tsunami_init_arch(void) printk("%s: CSR_STR 0x%lx\n", FN, TSUNAMI_dchip->str.csr); printk("%s: CSR_DREV 0x%lx\n", FN, TSUNAMI_dchip->drev.csr); #endif - TSUNAMI_bootcpu = __hard_smp_processor_id(); - /* With multiple PCI busses, we play with I/O as physical addrs. */ ioport_resource.end = ~0UL; iomem_resource.end = ~0UL; @@ -444,12 +451,10 @@ tsunami_kill_arch(int mode) static inline void tsunami_pci_clr_err_1(tsunami_pchip *pchip) { - unsigned int jd; - - jd = pchip->perror.csr; + pchip->perror.csr; pchip->perror.csr = 0x040; mb(); - jd = pchip->perror.csr; + pchip->perror.csr; } static inline void diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 3d593acf3..613a633ba 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -48,6 +48,12 @@ unsigned long __irq_attempt[NR_IRQS]; #define ACTUAL_NR_IRQS NR_IRQS #endif +/* Hack minimum IPL during interupt processing for broken hardware. */ + +#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK +int __min_ipl; +#endif + /* * Performance counter hook. A module can override this to * do something useful. @@ -283,30 +289,32 @@ handle_IRQ_event(unsigned int irq, struct pt_regs *regs, struct irqaction *action) { int status, cpu = smp_processor_id(); - unsigned long ipl; + int old_ipl, ipl; kstat.irqs[cpu][irq]++; irq_enter(cpu, irq); status = 1; /* Force the "do bottom halves" bit */ - ipl = rdps() & 7; + old_ipl = ipl = getipl(); do { - unsigned long newipl = (action->flags & SA_INTERRUPT ? 7 : 0); - if (newipl != ipl) { - swpipl(newipl); - ipl = newipl; + int new_ipl = IPL_MIN; + if (action->flags & SA_INTERRUPT) + new_ipl = IPL_MAX; + if (new_ipl != ipl) { + setipl(new_ipl); + ipl = new_ipl; } status |= action->flags; action->handler(irq, action->dev_id, regs); action = action->next; } while (action); + if (ipl != old_ipl) + setipl(old_ipl); + if (status & SA_SAMPLE_RANDOM) add_interrupt_randomness(irq); - if (ipl == 0) - __cli(); - irq_exit(cpu, irq); return status; @@ -325,7 +333,7 @@ disable_irq_nosync(unsigned int irq) spin_lock_irqsave(&irq_controller_lock, flags); if (!irq_desc[irq].depth++) { - irq_desc[irq].status |= IRQ_DISABLED; + irq_desc[irq].status |= IRQ_DISABLED | IRQ_MASKED; irq_desc[irq].handler->disable(irq); } spin_unlock_irqrestore(&irq_controller_lock, flags); @@ -356,14 +364,15 @@ enable_irq(unsigned int irq) switch (irq_desc[irq].depth) { case 1: { - unsigned int status = irq_desc[irq].status & ~IRQ_DISABLED; - irq_desc[irq].status = status; - if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { - irq_desc[irq].status = status | IRQ_REPLAY; + unsigned int status = irq_desc[irq].status; + status &= ~(IRQ_DISABLED | IRQ_MASKED); + if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { + status |= IRQ_REPLAY; /* ??? We can't re-send on (most?) alpha hw. hw_resend_irq(irq_desc[irq].handler,irq); */ } + irq_desc[irq].status = status; irq_desc[irq].handler->enable(irq); /* fall-through */ } @@ -425,7 +434,7 @@ setup_irq(unsigned int irq, struct irqaction * new) if (!shared) { irq_desc[irq].depth = 0; - irq_desc[irq].status &= ~IRQ_DISABLED; + irq_desc[irq].status &= ~(IRQ_DISABLED | IRQ_MASKED); irq_desc[irq].handler->startup(irq); } spin_unlock_irqrestore(&irq_controller_lock,flags); @@ -500,7 +509,7 @@ free_irq(unsigned int irq, void *dev_id) /* Found - now remove it from the list of entries. */ *pp = action->next; if (!irq_desc[irq].action) { - irq_desc[irq].status |= IRQ_DISABLED; + irq_desc[irq].status |= IRQ_DISABLED|IRQ_MASKED; irq_desc[irq].handler->shutdown(irq); } spin_unlock_irqrestore(&irq_controller_lock,flags); @@ -669,7 +678,7 @@ __global_cli(void) * Maximize ipl. If ipl was previously 0 and if this thread * is not in an irq, then take global_irq_lock. */ - if (swpipl(7) == 0 && !local_irq_count(cpu)) + if (swpipl(IPL_MAX) == IPL_MIN && !local_irq_count(cpu)) get_irqlock(cpu, where); } @@ -841,13 +850,25 @@ handle_irq(int irq, struct pt_regs * regs) desc = irq_desc + irq; spin_lock_irq(&irq_controller_lock); /* mask also the RTC */ desc->handler->ack(irq); + status = desc->status; + +#ifndef CONFIG_SMP + /* Look for broken irq masking. */ + if (status & IRQ_MASKED) { + static unsigned long last_printed; + if (time_after(jiffies, last_printed+HZ)) { + printk(KERN_CRIT "Mask didn't work for irq %d!\n", irq); + last_printed = jiffies; + } + } +#endif /* * REPLAY is when Linux resends an IRQ that was dropped earlier. * WAITING is used by probe to mark irqs that are being tested. */ - status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); - status |= IRQ_PENDING; /* we _want_ to handle it */ + status &= ~(IRQ_REPLAY | IRQ_WAITING); + status |= IRQ_PENDING | IRQ_MASKED; /* we _want_ to handle it */ /* * If the IRQ is disabled for whatever reason, we cannot @@ -890,9 +911,12 @@ handle_irq(int irq, struct pt_regs * regs) desc->status &= ~IRQ_PENDING; spin_unlock(&irq_controller_lock); } - desc->status &= ~IRQ_INPROGRESS; - if (!(desc->status & IRQ_DISABLED)) + status = desc->status & ~IRQ_INPROGRESS; + if (!(status & IRQ_DISABLED)) { + status &= ~IRQ_MASKED; desc->handler->end(irq); + } + desc->status = status; spin_unlock(&irq_controller_lock); } @@ -1056,7 +1080,7 @@ do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr, #ifdef CONFIG_SMP cpu_data[smp_processor_id()].smp_local_irq_count++; smp_percpu_timer_interrupt(®s); - if (smp_processor_id() == smp_boot_cpuid) + if (smp_processor_id() == boot_cpuid) #endif handle_irq(RTC_IRQ, ®s); return; diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 72ce8bcb6..f5a9bd990 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -133,6 +133,9 @@ pci_map_single(struct pci_dev *pdev, void *cpu_addr, long size, int direction) unsigned long paddr; dma_addr_t ret; + if (direction == PCI_DMA_NONE) + BUG(); + paddr = virt_to_phys(cpu_addr); /* First check to see if we can use the direct map window. */ @@ -186,12 +189,15 @@ pci_map_single(struct pci_dev *pdev, void *cpu_addr, long size, int direction) wrote there. */ void -pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, long size, int direction) +pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, long size, + int direction) { struct pci_controler *hose = pdev ? pdev->sysdata : pci_isa_hose; struct pci_iommu_arena *arena; long dma_ofs, npages; + if (direction == PCI_DMA_NONE) + BUG(); if (dma_addr >= __direct_map_base && dma_addr < __direct_map_base + __direct_map_size) { @@ -247,7 +253,8 @@ pci_alloc_consistent(struct pci_dev *pdev, long size, dma_addr_t *dma_addrp) } memset(cpu_addr, 0, size); - *dma_addrp = pci_map_single(pdev, cpu_addr, size, PCI_DMA_BIDIRECTIONAL); + *dma_addrp = pci_map_single(pdev, cpu_addr, size, + PCI_DMA_BIDIRECTIONAL); if (*dma_addrp == 0) { free_pages((unsigned long)cpu_addr, order); return NULL; @@ -424,13 +431,17 @@ sg_fill(struct scatterlist *leader, struct scatterlist *end, } int -pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, int direction) +pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, + int direction) { struct scatterlist *start, *end, *out; struct pci_controler *hose; struct pci_iommu_arena *arena; dma_addr_t max_dma; + if (direction == PCI_DMA_NONE) + BUG(); + /* Fast path single entry scatterlists. */ if (nents == 1) { sg->dma_length = sg->length; @@ -499,7 +510,8 @@ error: above. */ void -pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, int direction) +pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, + int direction) { struct pci_controler *hose; struct pci_iommu_arena *arena; @@ -507,6 +519,9 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, int direct dma_addr_t max_dma; dma_addr_t fstart, fend; + if (direction == PCI_DMA_NONE) + BUG(); + if (! alpha_mv.mv_pci_tbi) return; @@ -555,3 +570,33 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents, int direct DBGA("pci_unmap_sg: %d entries\n", nents - (end - sg)); } + +/* Return whether the given PCI device DMA address mask can be + supported properly. */ + +int +pci_dma_supported(struct pci_dev *pdev, dma_addr_t mask) +{ + struct pci_controler *hose; + struct pci_iommu_arena *arena; + + /* If there exists a direct map, and the mask fits either + MAX_DMA_ADDRESS defined such that GFP_DMA does something + useful, or the total system memory as shifted by the + map base. */ + if (__direct_map_size != 0 + && (__direct_map_base + MAX_DMA_ADDRESS-IDENT_ADDR-1 <= mask + || __direct_map_base + (max_low_pfn<<PAGE_SHIFT)-1 <= mask)) + return 1; + + /* Check that we have a scatter-gather arena that fits. */ + hose = pdev ? pdev->sysdata : pci_isa_hose; + arena = hose->sg_isa; + if (arena && arena->dma_base + arena->size <= mask) + return 1; + arena = hose->sg_pci; + if (arena && arena->dma_base + arena->size <= mask) + return 1; + + return 0; +} diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 31a818209..2e462550f 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -90,55 +90,82 @@ cpu_idle(void) } } + +struct halt_info { + int mode; + char *restart_cmd; +}; + static void -common_shutdown(int mode, char *restart_cmd) +common_shutdown_1(void *generic_ptr) { - /* The following currently only has any effect on SRM. We should - fix MILO to understand it. Should be pretty easy. Also we can - support RESTART2 via the ipc_buffer machinations pictured below, - which SRM ignores. */ + struct halt_info *how = (struct halt_info *)generic_ptr; + struct percpu_struct *cpup; + unsigned long *pflags, flags; + int cpuid = smp_processor_id(); - if (alpha_using_srm) { - struct percpu_struct *cpup; - unsigned long flags; - - cpup = (struct percpu_struct *) - ((unsigned long)hwrpb + hwrpb->processor_offset); - - flags = cpup->flags; - - /* Clear reason to "default"; clear "bootstrap in progress". */ - flags &= ~0x00ff0001UL; - - if (mode == LINUX_REBOOT_CMD_RESTART) { - if (!restart_cmd) { - flags |= 0x00020000UL; /* "cold bootstrap" */ - cpup->ipc_buffer[0] = 0; - } else { - flags |= 0x00030000UL; /* "warm bootstrap" */ - strncpy((char *)cpup->ipc_buffer, restart_cmd, - sizeof(cpup->ipc_buffer)); - } + /* No point in taking interrupts anymore. */ + __cli(); + + cpup = (struct percpu_struct *) + ((unsigned long)hwrpb + hwrpb->processor_offset + + hwrpb->processor_size * cpuid); + pflags = &cpup->flags; + flags = *pflags; + + /* Clear reason to "default"; clear "bootstrap in progress". */ + flags &= ~0x00ff0001UL; + +#ifdef __SMP__ + /* Secondaries halt here. */ + if (cpuid != boot_cpuid) { + flags |= 0x00040000UL; /* "remain halted" */ + *pflags = flags; + clear_bit(cpuid, &cpu_present_mask); + halt(); + } +#endif + + if (how->mode == LINUX_REBOOT_CMD_RESTART) { + if (!how->restart_cmd) { + flags |= 0x00020000UL; /* "cold bootstrap" */ } else { - flags |= 0x00040000UL; /* "remain halted" */ + /* For SRM, we could probably set environment + variables to get this to work. We'd have to + delay this until after srm_paging_stop unless + we ever got srm_fixup working. + + At the moment, SRM will use the last boot device, + but the file and flags will be the defaults, when + doing a "warm" bootstrap. */ + flags |= 0x00030000UL; /* "warm bootstrap" */ } - - cpup->flags = flags; - mb(); + } else { + flags |= 0x00040000UL; /* "remain halted" */ + } + *pflags = flags; - /* reset_for_srm(); */ - set_hae(srm_hae); +#ifdef __SMP__ + /* Wait for the secondaries to halt. */ + clear_bit(boot_cpuid, &cpu_present_mask); + while (cpu_present_mask) + barrier(); +#endif + /* If booted from SRM, reset some of the original environment. */ + if (alpha_using_srm) { #ifdef CONFIG_DUMMY_CONSOLE - /* This has the effect of reseting the VGA video origin. */ + /* This has the effect of resetting the VGA video origin. */ take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1); #endif + /* reset_for_srm(); */ + set_hae(srm_hae); } if (alpha_mv.kill_arch) - alpha_mv.kill_arch(mode); + alpha_mv.kill_arch(how->mode); - if (!alpha_using_srm && mode != LINUX_REBOOT_CMD_RESTART) { + if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) { /* Unfortunately, since MILO doesn't currently understand the hwrpb bits above, we can't reliably halt the processor and keep it halted. So just loop. */ @@ -151,6 +178,18 @@ common_shutdown(int mode, char *restart_cmd) halt(); } +static void +common_shutdown(int mode, char *restart_cmd) +{ + struct halt_info args; + args.mode = mode; + args.restart_cmd = restart_cmd; +#ifdef __SMP__ + smp_call_function(common_shutdown_1, &args, 1, 0); +#endif + common_shutdown_1(&args); +} + void machine_restart(char *restart_cmd) { diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h index dd63de4d2..a8859059b 100644 --- a/arch/alpha/kernel/proto.h +++ b/arch/alpha/kernel/proto.h @@ -74,13 +74,14 @@ extern void tsunami_pci_tbi(struct pci_controler *, dma_addr_t, dma_addr_t); /* setup.c */ extern unsigned long srm_hae; +extern int boot_cpuid; /* smp.c */ extern void setup_smp(void); extern int smp_info(char *buffer); extern void handle_ipi(struct pt_regs *); extern void smp_percpu_timer_interrupt(struct pt_regs *); -extern int smp_boot_cpuid; +extern unsigned long cpu_present_mask; /* bios32.c */ /* extern void reset_for_srm(void); */ diff --git a/arch/alpha/kernel/semaphore.c b/arch/alpha/kernel/semaphore.c index d4793ecb4..dc5209531 100644 --- a/arch/alpha/kernel/semaphore.c +++ b/arch/alpha/kernel/semaphore.c @@ -173,7 +173,7 @@ __down_read(struct rw_semaphore *sem, int count) " subl %0,1,%0\n" " stl_c %2,%1\n" " bne %2,2f\n" - ".section .text2,\"ax\"\n" + ".subsection 2\n" "2: br 1b\n" ".previous" : "=r"(count), "=m"(sem->count), "=r"(tmp) @@ -226,7 +226,7 @@ __down_write(struct rw_semaphore *sem, int count) " ldah %0,%3(%0)\n" " stl_c %2,%1\n" " bne %2,2f\n" - ".section .text2,\"ax\"\n" + ".subsection 2\n" "2: br 1b\n" ".previous" : "=r"(count), "=m"(sem->count), "=r"(tmp) diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 112976bcb..1311d939b 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -50,6 +50,9 @@ struct hwrpb_struct *hwrpb; unsigned long srm_hae; +/* Which processor we booted from. */ +int boot_cpuid; + #ifdef CONFIG_ALPHA_GENERIC struct alpha_machine_vector alpha_mv; int alpha_using_srm; @@ -351,6 +354,7 @@ setup_arch(char **cmdline_p) char *type_name, *var_name, *p; hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr); + boot_cpuid = hard_smp_processor_id(); /* * Locate the command line. diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index e3ae30973..be1a6440e 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -62,11 +62,13 @@ spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED; /* Set to a secondary's cpuid when it comes online. */ static unsigned long smp_secondary_alive; -unsigned long cpu_present_mask; /* Which cpus ids came online. */ -static unsigned long __cpu_present_mask __initdata = 0; /* cpu reported in the hwrpb */ +/* Which cpus ids came online. */ +unsigned long cpu_present_mask; + +/* cpus reported in the hwrpb */ +static unsigned long hwrpb_cpu_present_mask __initdata = 0; static int max_cpus = -1; /* Command-line limitation. */ -int smp_boot_cpuid; /* Which processor we booted from. */ int smp_num_probed; /* Internal processor count */ int smp_num_cpus = 1; /* Number that came online. */ int smp_threads_ready; /* True once the per process idle is forked. */ @@ -486,10 +488,9 @@ setup_smp(void) struct percpu_struct *cpubase, *cpu; int i; - smp_boot_cpuid = hard_smp_processor_id(); - if (smp_boot_cpuid != 0) { + if (boot_cpuid != 0) { printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n", - smp_boot_cpuid); + boot_cpuid); } if (hwrpb->nr_processors > 1) { @@ -508,7 +509,7 @@ setup_smp(void) if ((cpu->flags & 0x1cc) == 0x1cc) { smp_num_probed++; /* Assume here that "whami" == index */ - __cpu_present_mask |= (1L << i); + hwrpb_cpu_present_mask |= (1L << i); cpu->pal_revision = boot_cpu_palrev; } @@ -519,12 +520,12 @@ setup_smp(void) } } else { smp_num_probed = 1; - __cpu_present_mask = (1L << smp_boot_cpuid); + hwrpb_cpu_present_mask = (1L << boot_cpuid); } - cpu_present_mask = 1L << smp_boot_cpuid; + cpu_present_mask = 1L << boot_cpuid; printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n", - smp_num_probed, __cpu_present_mask); + smp_num_probed, hwrpb_cpu_present_mask); } /* @@ -541,13 +542,13 @@ smp_boot_cpus(void) memset(__cpu_logical_map, -1, sizeof(__cpu_logical_map)); memset(ipi_data, 0, sizeof(ipi_data)); - __cpu_number_map[smp_boot_cpuid] = 0; - __cpu_logical_map[0] = smp_boot_cpuid; - current->processor = smp_boot_cpuid; + __cpu_number_map[boot_cpuid] = 0; + __cpu_logical_map[0] = boot_cpuid; + current->processor = boot_cpuid; - smp_store_cpu_info(smp_boot_cpuid); + smp_store_cpu_info(boot_cpuid); smp_tune_scheduling(); - smp_setup_percpu_timer(smp_boot_cpuid); + smp_setup_percpu_timer(boot_cpuid); init_idle(); @@ -565,10 +566,10 @@ smp_boot_cpus(void) cpu_count = 1; for (i = 0; i < NR_CPUS; i++) { - if (i == smp_boot_cpuid) + if (i == boot_cpuid) continue; - if (((__cpu_present_mask >> i) & 1) == 0) + if (((hwrpb_cpu_present_mask >> i) & 1) == 0) continue; if (smp_boot_one_cpu(i, cpu_count)) @@ -1023,7 +1024,7 @@ debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no) " stl_c %0,%1\n" " beq %0,3f\n" "4: mb\n" - ".section .text2,\"ax\"\n" + ".subsection 2\n" "2: ldl %0,%1\n" " subq %2,1,%2\n" "3: blt %2,4b\n" @@ -1097,7 +1098,7 @@ void write_lock(rwlock_t * lock) " stl_c %1,%0\n" " beq %1,6f\n" "4: mb\n" - ".section .text2,\"ax\"\n" + ".subsection 2\n" "6: blt %3,4b # debug\n" " subl %3,1,%3 # debug\n" " ldl %1,%0\n" @@ -1140,7 +1141,7 @@ void read_lock(rwlock_t * lock) " stl_c %1,%0;" " beq %1,6f;" "4: mb\n" - ".section .text2,\"ax\"\n" + ".subsection 2\n" "6: ldl %1,%0;" " blt %2,4b # debug\n" " subl %2,1,%2 # debug\n" diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c index 1432496d8..acea58d1e 100644 --- a/arch/alpha/kernel/sys_cabriolet.c +++ b/arch/alpha/kernel/sys_cabriolet.c @@ -126,6 +126,30 @@ cabriolet_init_irq(void) setup_irq(16+4, &isa_cascade_irqaction); } +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164) +static void +pc164_device_interrupt(unsigned long v, struct pt_regs *r) +{ + /* In theory, the PC164 has the same interrupt hardware as + the other Cabriolet based systems. However, something + got screwed up late in the development cycle which broke + the interrupt masking hardware. Repeat, it is not + possible to mask and ack interrupts. At all. + + In an attempt to work around this, while processing + interrupts, we do not allow the IPL to drop below what + it is currently. This prevents the possibility of + recursion. + + ??? Another option might be to force all PCI devices + to use edge triggered rather than level triggered + interrupts. That might be too invasive though. */ + + __min_ipl = getipl(); + cabriolet_device_interrupt(v, r); + __min_ipl = 0; +} +#endif /* * The EB66+ is very similar to the EB66 except that it does not have @@ -379,7 +403,7 @@ struct alpha_machine_vector pc164_mv __initmv = { min_mem_address: CIA_DEFAULT_MEM_BASE, nr_irqs: 35, - device_interrupt: cabriolet_device_interrupt, + device_interrupt: pc164_device_interrupt, init_arch: cia_init_arch, init_irq: cabriolet_init_irq, diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index fbebdd5a5..7414b8cc2 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c @@ -33,94 +33,80 @@ #include "machvec_impl.h" +/* Note mask bit is true for ENABLED irqs. */ static unsigned long cached_irq_mask; - -#define TSUNAMI_SET_IRQ_MASK(cpu, value) \ -do { \ - volatile unsigned long *csr; \ - csr = &TSUNAMI_cchip->dim##cpu##.csr; \ - *csr = (value); \ - mb(); \ - *csr; \ -} while(0) - -static inline void -do_flush_irq_mask(unsigned long value) -{ - switch (TSUNAMI_bootcpu) { - case 0: - TSUNAMI_SET_IRQ_MASK(0, value); - break; - case 1: - TSUNAMI_SET_IRQ_MASK(1, value); - break; - case 2: - TSUNAMI_SET_IRQ_MASK(2, value); - break; - case 3: - TSUNAMI_SET_IRQ_MASK(3, value); - break; - } -} - -#ifdef CONFIG_SMP -static inline void -do_flush_smp_irq_mask(unsigned long value) -{ - extern unsigned long cpu_present_mask; - unsigned long other_cpus = cpu_present_mask & ~(1L << TSUNAMI_bootcpu); - - if (other_cpus & 1) - TSUNAMI_SET_IRQ_MASK(0, value); - if (other_cpus & 2) - TSUNAMI_SET_IRQ_MASK(1, value); - if (other_cpus & 4) - TSUNAMI_SET_IRQ_MASK(2, value); - if (other_cpus & 8) - TSUNAMI_SET_IRQ_MASK(3, value); -} -#endif - static void -dp264_flush_irq_mask(unsigned long mask) +tsunami_update_irq_hw(unsigned long mask, unsigned long isa_enable) { - unsigned long value; + register tsunami_cchip *cchip = TSUNAMI_cchip; + register int bcpu = boot_cpuid; #ifdef CONFIG_SMP - do_flush_smp_irq_mask(mask); + register unsigned long cpm = cpu_present_mask; + volatile unsigned long *dim0, *dim1, *dim2, *dim3; + unsigned long mask0, mask1, mask2, mask3, maskB, dummy; + + mask0 = mask1 = mask2 = mask3 = mask; + maskB = mask | isa_enable; + if (bcpu == 0) mask0 = maskB; + if (bcpu == 1) mask1 = maskB; + if (bcpu == 2) mask2 = maskB; + if (bcpu == 3) mask3 = maskB; + + dim0 = &cchip->dim0.csr; + dim1 = &cchip->dim1.csr; + dim2 = &cchip->dim2.csr; + dim3 = &cchip->dim3.csr; + if ((cpm & 1) == 0) dim0 = &dummy; + if ((cpm & 2) == 0) dim1 = &dummy; + if ((cpm & 4) == 0) dim2 = &dummy; + if ((cpm & 8) == 0) dim3 = &dummy; + + *dim0 = mask0; + *dim1 = mask1; + *dim2 = mask2; + *dim3 = mask3; + mb(); + *dim0; + *dim1; + *dim2; + *dim3; +#else + volatile unsigned long *dimB = &cchip->dim1.csr; + if (bcpu == 0) dimB = &cchip->dim0.csr; + if (bcpu == 2) dimB = &cchip->dim2.csr; + if (bcpu == 3) dimB = &cchip->dim3.csr; + *dimB = mask | isa_enable; + mb(); + *dimB; #endif - - value = mask | (1UL << 55) | 0xffff; /* isa irqs always enabled */ - do_flush_irq_mask(value); } -static void -clipper_flush_irq_mask(unsigned long mask) +static inline void +dp264_update_irq_hw(unsigned long mask) { - unsigned long value; - - value = mask >> 16; -#ifdef CONFIG_SMP - do_flush_smp_irq_mask(value); -#endif + tsunami_update_irq_hw(mask, (1UL << 55) | 0xffff); +} - value = value | (1UL << 55); /* master ISA enable */ - do_flush_irq_mask(value); +static inline void +clipper_update_irq_hw(unsigned long mask) +{ + tsunami_update_irq_hw(mask, 1UL << 55); } static inline void dp264_enable_irq(unsigned int irq) { cached_irq_mask |= 1UL << irq; - dp264_flush_irq_mask(cached_irq_mask); + dp264_update_irq_hw(cached_irq_mask); } static void dp264_disable_irq(unsigned int irq) { cached_irq_mask &= ~(1UL << irq); - dp264_flush_irq_mask(cached_irq_mask); + dp264_update_irq_hw(cached_irq_mask); } static unsigned int @@ -134,14 +120,14 @@ static inline void clipper_enable_irq(unsigned int irq) { cached_irq_mask |= 1UL << irq; - clipper_flush_irq_mask(cached_irq_mask); + clipper_update_irq_hw(cached_irq_mask); } static void clipper_disable_irq(unsigned int irq) { cached_irq_mask &= ~(1UL << irq); - clipper_flush_irq_mask(cached_irq_mask); + clipper_update_irq_hw(cached_irq_mask); } static unsigned int @@ -271,7 +257,7 @@ dp264_init_irq(void) if (alpha_using_srm) alpha_mv.device_interrupt = dp264_srm_device_interrupt; - dp264_flush_irq_mask(0UL); + dp264_update_irq_hw(0UL); init_i8259a_irqs(); init_rtc_irq(); @@ -289,7 +275,7 @@ clipper_init_irq(void) if (alpha_using_srm) alpha_mv.device_interrupt = clipper_srm_device_interrupt; - clipper_flush_irq_mask(0UL); + clipper_update_irq_hw(0UL); init_i8259a_irqs(); init_rtc_irq(); diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c index ccdcf3bdb..0230ec6d9 100644 --- a/arch/alpha/kernel/sys_sio.c +++ b/arch/alpha/kernel/sys_sio.c @@ -391,7 +391,7 @@ struct alpha_machine_vector xl_mv __initmv = { nr_irqs: 16, device_interrupt: isa_device_interrupt, - init_arch: lca_init_arch, + init_arch: apecs_init_arch, init_irq: sio_init_irq, init_rtc: common_init_rtc, init_pci: noname_init_pci, diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 8211045e8..d7b5cee8c 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -22,7 +22,6 @@ * fixed algorithm in do_gettimeofday() for calculating the precise time * from processor cycle counter (now taking lost_ticks into account) */ -#include <linux/config.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 36b0cc43a..828044b24 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -215,8 +215,10 @@ do_entIF(unsigned long type, unsigned long a1, /* EV4 does not implement anything except normal rounding. Everything else will come here as an illegal instruction. Emulate them. */ - if (alpha_fp_emul(regs.pc - 4)) + if (alpha_fp_emul(regs.pc)) { + regs.pc += 4; return; + } } send_sig(SIGILL, current, 1); break; |