diff options
Diffstat (limited to 'arch/i386/kernel/irq.c')
-rw-r--r-- | arch/i386/kernel/irq.c | 640 |
1 files changed, 155 insertions, 485 deletions
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index ea218fe45..3106f1966 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c @@ -1,3 +1,8 @@ +/* mostly architecture independent + some moved to i8259.c + the beautiful visws architecture code needs to be updated too. + and, finally, the BUILD_IRQ and SMP_BUILD macros in irq.h need fixed. + */ /* * linux/arch/i386/kernel/irq.c * @@ -15,7 +20,6 @@ * Naturally it's not a 1:1 relation, but there are similarities. */ -#include <linux/config.h> #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/kernel_stat.h> @@ -27,20 +31,19 @@ #include <linux/malloc.h> #include <linux/random.h> #include <linux/smp.h> -#include <linux/tasks.h> #include <linux/smp_lock.h> #include <linux/init.h> #include <asm/system.h> #include <asm/io.h> -#include <asm/irq.h> #include <asm/bitops.h> #include <asm/smp.h> #include <asm/pgtable.h> #include <asm/delay.h> #include <asm/desc.h> +#include <asm/irq.h> +#include <linux/irq.h> -#include "irq.h" unsigned int local_bh_count[NR_CPUS]; unsigned int local_irq_count[NR_CPUS]; @@ -68,297 +71,11 @@ atomic_t nmi_counter; * system. We never hold this lock when we call the actual * IRQ handler. */ -spinlock_t irq_controller_lock; - -/* - * Dummy controller type for unused interrupts - */ -static void do_none(unsigned int irq, struct pt_regs * regs) -{ - /* - * we are careful. While for ISA irqs it's common to happen - * outside of any driver (think autodetection), this is not - * at all nice for PCI interrupts. So we are stricter and - * print a warning when such spurious interrupts happen. - * Spurious interrupts can confuse other drivers if the PCI - * IRQ line is shared. - * - * Such spurious interrupts are either driver bugs, or - * sometimes hw (chipset) bugs. - */ - printk("unexpected IRQ vector %d on CPU#%d!\n",irq, smp_processor_id()); - -#ifdef __SMP__ - /* - * [currently unexpected vectors happen only on SMP and APIC. - * if we want to have non-APIC and non-8259A controllers - * in the future with unexpected vectors, this ack should - * probably be made controller-specific.] - */ - ack_APIC_irq(); -#endif -} -static void enable_none(unsigned int irq) { } -static void disable_none(unsigned int irq) { } - -/* startup is the same as "enable", shutdown is same as "disable" */ -#define startup_none enable_none -#define shutdown_none disable_none - -struct hw_interrupt_type no_irq_type = { - "none", - startup_none, - shutdown_none, - do_none, - enable_none, - disable_none -}; - -/* - * This is the 'legacy' 8259A Programmable Interrupt Controller, - * present in the majority of PC/AT boxes. - */ - -static void do_8259A_IRQ(unsigned int irq, struct pt_regs * regs); -static void enable_8259A_irq(unsigned int irq); -void disable_8259A_irq(unsigned int irq); - -/* startup is the same as "enable", shutdown is same as "disable" */ -#define startup_8259A_irq enable_8259A_irq -#define shutdown_8259A_irq disable_8259A_irq - -static struct hw_interrupt_type i8259A_irq_type = { - "XT-PIC", - startup_8259A_irq, - shutdown_8259A_irq, - do_8259A_IRQ, - enable_8259A_irq, - disable_8259A_irq -}; - +spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED; /* * Controller mappings for all interrupt sources: */ -irq_desc_t irq_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }}; - - -/* - * 8259A PIC functions to handle ISA devices: - */ - -/* - * This contains the irq mask for both 8259A irq controllers, - */ -static unsigned int cached_irq_mask = 0xffff; - -#define __byte(x,y) (((unsigned char *)&(y))[x]) -#define cached_21 (__byte(0,cached_irq_mask)) -#define cached_A1 (__byte(1,cached_irq_mask)) - -/* - * Not all IRQs can be routed through the IO-APIC, eg. on certain (older) - * boards the timer interrupt is not connected to any IO-APIC pin, it's - * fed to the CPU IRQ line directly. - * - * Any '1' bit in this mask means the IRQ is routed through the IO-APIC. - * this 'mixed mode' IRQ handling costs nothing because it's only used - * at IRQ setup time. - */ -unsigned long io_apic_irqs = 0; - -/* - * These have to be protected by the irq controller spinlock - * before being called. - */ -void disable_8259A_irq(unsigned int irq) -{ - unsigned int mask = 1 << irq; - cached_irq_mask |= mask; - if (irq & 8) { - outb(cached_A1,0xA1); - } else { - outb(cached_21,0x21); - } -} - -static void enable_8259A_irq(unsigned int irq) -{ - unsigned int mask = ~(1 << irq); - cached_irq_mask &= mask; - if (irq & 8) { - outb(cached_A1,0xA1); - } else { - outb(cached_21,0x21); - } -} - -int i8259A_irq_pending(unsigned int irq) -{ - unsigned int mask = 1<<irq; - - if (irq < 8) - return (inb(0x20) & mask); - return (inb(0xA0) & (mask >> 8)); -} - -void make_8259A_irq(unsigned int irq) -{ - disable_irq_nosync(irq); - io_apic_irqs &= ~(1<<irq); - irq_desc[irq].handler = &i8259A_irq_type; - enable_irq(irq); -} - -/* - * Careful! The 8259A is a fragile beast, it pretty - * much _has_ to be done exactly like this (mask it - * first, _then_ send the EOI, and the order of EOI - * to the two 8259s is important! - */ -static inline void mask_and_ack_8259A(unsigned int irq) -{ - cached_irq_mask |= 1 << irq; - if (irq & 8) { - inb(0xA1); /* DUMMY */ - outb(cached_A1,0xA1); - outb(0x62,0x20); /* Specific EOI to cascade */ - outb(0x20,0xA0); - } else { - inb(0x21); /* DUMMY */ - outb(cached_21,0x21); - outb(0x20,0x20); - } -} - -static void do_8259A_IRQ(unsigned int irq, struct pt_regs * regs) -{ - struct irqaction * action; - irq_desc_t *desc = irq_desc + irq; - - spin_lock(&irq_controller_lock); - { - unsigned int status; - mask_and_ack_8259A(irq); - status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); - action = NULL; - if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { - action = desc->action; - status |= IRQ_INPROGRESS; - } - desc->status = status; - } - spin_unlock(&irq_controller_lock); - - /* Exit early if we had no action or it was disabled */ - if (!action) - return; - - handle_IRQ_event(irq, regs, action); - - spin_lock(&irq_controller_lock); - { - unsigned int status = desc->status & ~IRQ_INPROGRESS; - desc->status = status; - if (!(status & IRQ_DISABLED)) - enable_8259A_irq(irq); - } - spin_unlock(&irq_controller_lock); -} - -/* - * This builds up the IRQ handler stubs using some ugly macros in irq.h - * - * These macros create the low-level assembly IRQ routines that save - * register context and call do_IRQ(). do_IRQ() then does all the - * operations that are needed to keep the AT (or SMP IOAPIC) - * interrupt-controller happy. - */ - - -BUILD_COMMON_IRQ() - -#define BI(x,y) \ - BUILD_IRQ(##x##y) - -#define BUILD_16_IRQS(x) \ - BI(x,0) BI(x,1) BI(x,2) BI(x,3) \ - BI(x,4) BI(x,5) BI(x,6) BI(x,7) \ - BI(x,8) BI(x,9) BI(x,a) BI(x,b) \ - BI(x,c) BI(x,d) BI(x,e) BI(x,f) - -/* - * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: - * (these are usually mapped to vectors 0x20-0x30) - */ -BUILD_16_IRQS(0x0) - -#ifdef CONFIG_X86_IO_APIC -/* - * The IO-APIC gives us many more interrupt sources. Most of these - * are unused but an SMP system is supposed to have enough memory ... - * sometimes (mostly wrt. hw bugs) we get corrupted vectors all - * across the spectrum, so we really want to be prepared to get all - * of these. Plus, more powerful systems might have more than 64 - * IO-APIC registers. - * - * (these are usually mapped into the 0x30-0xff vector range) - */ - BUILD_16_IRQS(0x1) BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3) -BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7) -BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb) -BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) -#endif - -#undef BUILD_16_IRQS -#undef BI - - -#ifdef __SMP__ -/* - * The following vectors are part of the Linux architecture, there - * is no hardware IRQ pin equivalent for them, they are triggered - * through the ICC by us (IPIs) - */ -BUILD_SMP_INTERRUPT(reschedule_interrupt) -BUILD_SMP_INTERRUPT(invalidate_interrupt) -BUILD_SMP_INTERRUPT(stop_cpu_interrupt) -BUILD_SMP_INTERRUPT(call_function_interrupt) -BUILD_SMP_INTERRUPT(spurious_interrupt) - -/* - * every pentium local APIC has two 'local interrupts', with a - * soft-definable vector attached to both interrupts, one of - * which is a timer interrupt, the other one is error counter - * overflow. Linux uses the local APIC timer interrupt to get - * a much simpler SMP time architecture: - */ -BUILD_SMP_TIMER_INTERRUPT(apic_timer_interrupt) - -#endif - -#define IRQ(x,y) \ - IRQ##x##y##_interrupt - -#define IRQLIST_16(x) \ - IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \ - IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \ - IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \ - IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f) - -static void (*interrupt[NR_IRQS])(void) = { - IRQLIST_16(0x0), - -#ifdef CONFIG_X86_IO_APIC - IRQLIST_16(0x1), IRQLIST_16(0x2), IRQLIST_16(0x3), - IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7), - IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb), - IRQLIST_16(0xc), IRQLIST_16(0xd) -#endif -}; - -#undef IRQ -#undef IRQLIST_16 - +irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }}; /* * Special irq handlers. @@ -366,36 +83,6 @@ static void (*interrupt[NR_IRQS])(void) = { void no_action(int cpl, void *dev_id, struct pt_regs *regs) { } -#ifndef CONFIG_VISWS -/* - * Note that on a 486, we don't want to do a SIGFPE on an irq13 - * as the irq is unreliable, and exception 16 works correctly - * (ie as explained in the intel literature). On a 386, you - * can't use exception 16 due to bad IBM design, so we have to - * rely on the less exact irq13. - * - * Careful.. Not only is IRQ13 unreliable, but it is also - * leads to races. IBM designers who came up with it should - * be shot. - */ - -static void math_error_irq(int cpl, void *dev_id, struct pt_regs *regs) -{ - outb(0,0xF0); - if (ignore_irq13 || !boot_cpu_data.hard_math) - return; - math_error(); -} - -static struct irqaction irq13 = { math_error_irq, 0, 0, "fpu", NULL, NULL }; - -/* - * IRQ2 is cascade interrupt to second interrupt controller - */ - -static struct irqaction irq2 = { no_action, 0, 0, "cascade", NULL, NULL}; -#endif - /* * Generic, controller-independent functions: */ @@ -438,10 +125,13 @@ int get_irq_list(char *buf) return p - buf; } + /* * Global interrupt locks for SMP. Allow interrupts to come in on any * CPU, yet make cli/sti act globally to protect critical regions.. */ +spinlock_t i386_bh_lock = SPIN_LOCK_UNLOCKED; + #ifdef __SMP__ unsigned char global_irq_holder = NO_PROC_ID; unsigned volatile int global_irq_lock; @@ -461,7 +151,10 @@ atomic_t global_bh_lock; static inline void check_smp_invalidate(int cpu) { if (test_bit(cpu, &smp_invalidate_needed)) { + struct mm_struct *mm = current->mm; clear_bit(cpu, &smp_invalidate_needed); + if (mm) + atomic_set_mask(1 << cpu, &mm->cpu_vm_mask); local_flush_tlb(); } } @@ -471,7 +164,6 @@ static void show(char * str) int i; unsigned long *stack; int cpu = smp_processor_id(); - extern char *get_options(char *str, int *ints); printk("\n%s, CPU %d:\n", str, cpu); printk("irq: %d [%d %d]\n", @@ -481,7 +173,7 @@ static void show(char * str) stack = (unsigned long *) &stack; for (i = 40; i ; i--) { unsigned long x = *++stack; - if (x > (unsigned long) &get_options && x < (unsigned long) &vsprintf) { + if (x > (unsigned long) &get_option && x < (unsigned long) &vsprintf) { printk("<[%08lx]> ", x); } } @@ -782,10 +474,16 @@ void enable_irq(unsigned int irq) spin_lock_irqsave(&irq_controller_lock, flags); switch (irq_desc[irq].depth) { - case 1: - irq_desc[irq].status &= ~IRQ_DISABLED; + case 1: { + unsigned int status = irq_desc[irq].status & ~IRQ_DISABLED; + irq_desc[irq].status = status; + if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { + irq_desc[irq].status = status | IRQ_REPLAY; + hw_resend_irq(irq_desc[irq].handler,irq); + } irq_desc[irq].handler->enable(irq); - /* fall throught */ + /* fall-through */ + } default: irq_desc[irq].depth--; break; @@ -801,7 +499,7 @@ void enable_irq(unsigned int irq) * SMP cross-CPU interrupts have their own specific * handlers). */ -asmlinkage void do_IRQ(struct pt_regs regs) +asmlinkage unsigned int do_IRQ(struct pt_regs regs) { /* * We ack quickly, we don't want the irq controller @@ -813,76 +511,81 @@ asmlinkage void do_IRQ(struct pt_regs regs) * 0 return value means that this irq is already being * handled by some other CPU. (or is disabled) */ - int irq = regs.orig_eax & 0xff; /* subtle, see irq.h */ + int irq = regs.orig_eax & 0xff; /* high bits used in ret_from_ code */ int cpu = smp_processor_id(); + irq_desc_t *desc; + struct irqaction * action; + unsigned int status; kstat.irqs[cpu][irq]++; - irq_desc[irq].handler->handle(irq, ®s); + desc = irq_desc + irq; + spin_lock(&irq_controller_lock); + irq_desc[irq].handler->ack(irq); + /* + REPLAY is when Linux resends an IRQ that was dropped earlier + WAITING is used by probe to mark irqs that are being tested + */ + status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); + status |= IRQ_PENDING; /* we _want_ to handle it */ /* - * This should be conditional: we should really get - * a return code from the irq handler to tell us - * whether the handler wants us to do software bottom - * half handling or not.. + * If the IRQ is disabled for whatever reason, we cannot + * use the action we have. */ - if (1) { - if (bh_active & bh_mask) - do_bottom_half(); + action = NULL; + if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { + action = desc->action; + status &= ~IRQ_PENDING; /* we commit to handling */ + status |= IRQ_INPROGRESS; /* we are handling it */ } -} - -int setup_x86_irq(unsigned int irq, struct irqaction * new) -{ - int shared = 0; - struct irqaction *old, **p; - unsigned long flags; + desc->status = status; + spin_unlock(&irq_controller_lock); /* - * Some drivers like serial.c use request_irq() heavily, - * so we have to be careful not to interfere with a - * running system. + * If there is no IRQ handler or it was disabled, exit early. + Since we set PENDING, if another processor is handling + a different instance of this same irq, the other processor + will take care of it. */ - if (new->flags & SA_SAMPLE_RANDOM) { - /* - * This function might sleep, we want to call it first, - * outside of the atomic block. - * Yes, this might clear the entropy pool if the wrong - * driver is attempted to be loaded, without actually - * installing a new handler, but is this really a problem, - * only the sysadmin is able to do this. - */ - rand_initialize_irq(irq); - } + if (!action) + return 1; /* - * The following block of code has to be executed atomically + * Edge triggered interrupts need to remember + * pending events. + * This applies to any hw interrupts that allow a second + * instance of the same irq to arrive while we are in do_IRQ + * or in the handler. But the code here only handles the _second_ + * instance of the irq, not the third or fourth. So it is mostly + * useful for irq hardware that does not mask cleanly in an + * SMP environment. */ - spin_lock_irqsave(&irq_controller_lock,flags); - p = &irq_desc[irq].action; - if ((old = *p) != NULL) { - /* Can't share interrupts unless both agree to */ - if (!(old->flags & new->flags & SA_SHIRQ)) { - spin_unlock_irqrestore(&irq_controller_lock,flags); - return -EBUSY; - } - - /* add new interrupt at end of irq queue */ - do { - p = &old->next; - old = *p; - } while (old); - shared = 1; + for (;;) { + handle_IRQ_event(irq, ®s, action); + spin_lock(&irq_controller_lock); + + if (!(desc->status & IRQ_PENDING)) + break; + desc->status &= ~IRQ_PENDING; + spin_unlock(&irq_controller_lock); } + desc->status &= ~IRQ_INPROGRESS; + if (!(desc->status & IRQ_DISABLED)){ + irq_desc[irq].handler->end(irq); + } + spin_unlock(&irq_controller_lock); - *p = new; - - if (!shared) { - irq_desc[irq].depth = 0; - irq_desc[irq].status &= ~IRQ_DISABLED; - irq_desc[irq].handler->startup(irq); + /* + * This should be conditional: we should really get + * a return code from the irq handler to tell us + * whether the handler wants us to do software bottom + * half handling or not.. + */ + if (1) { + if (bh_active & bh_mask) + do_bottom_half(); } - spin_unlock_irqrestore(&irq_controller_lock,flags); - return 0; + return 1; } int request_irq(unsigned int irq, @@ -911,8 +614,7 @@ int request_irq(unsigned int irq, action->next = NULL; action->dev_id = dev_id; - retval = setup_x86_irq(irq, action); - + retval = setup_irq(irq, action); if (retval) kfree(action); return retval; @@ -920,29 +622,40 @@ int request_irq(unsigned int irq, void free_irq(unsigned int irq, void *dev_id) { - struct irqaction * action, **p; + struct irqaction **p; unsigned long flags; if (irq >= NR_IRQS) return; spin_lock_irqsave(&irq_controller_lock,flags); - for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) { - if (action->dev_id != dev_id) - continue; + p = &irq_desc[irq].action; + for (;;) { + struct irqaction * action = *p; + if (action) { + struct irqaction **pp = p; + p = &action->next; + if (action->dev_id != dev_id) + continue; - /* Found it - now free it */ - *p = action->next; - kfree(action); - if (!irq_desc[irq].action) { - irq_desc[irq].status |= IRQ_DISABLED; - irq_desc[irq].handler->shutdown(irq); + /* Found it - now remove it from the list of entries */ + *pp = action->next; + if (!irq_desc[irq].action) { + irq_desc[irq].status |= IRQ_DISABLED; + irq_desc[irq].handler->shutdown(irq); + } + spin_unlock_irqrestore(&irq_controller_lock,flags); + + /* Wait to make sure it's not being used on another CPU */ + while (irq_desc[irq].status & IRQ_INPROGRESS) + barrier(); + kfree(action); + return; } - goto out; + printk("Trying to free free IRQ%d\n",irq); + spin_unlock_irqrestore(&irq_controller_lock,flags); + return; } - printk("Trying to free free IRQ%d\n",irq); -out: - spin_unlock_irqrestore(&irq_controller_lock,flags); } /* @@ -965,7 +678,8 @@ unsigned long probe_irq_on(void) for (i = NR_IRQS-1; i > 0; i--) { if (!irq_desc[i].action) { irq_desc[i].status |= IRQ_AUTODETECT | IRQ_WAITING; - irq_desc[i].handler->startup(i); + if(irq_desc[i].handler->startup(i)) + irq_desc[i].status |= IRQ_PENDING; } } spin_unlock_irq(&irq_controller_lock); @@ -1028,102 +742,58 @@ int probe_irq_off(unsigned long unused) return irq_found; } -void init_ISA_irqs (void) +/* this was setup_x86_irq but it seems pretty generic */ +int setup_irq(unsigned int irq, struct irqaction * new) { - int i; - - for (i = 0; i < NR_IRQS; i++) { - irq_desc[i].status = IRQ_DISABLED; - irq_desc[i].action = 0; - irq_desc[i].depth = 0; - - if (i < 16) { - /* - * 16 old-style INTA-cycle interrupts: - */ - irq_desc[i].handler = &i8259A_irq_type; - } else { - /* - * 'high' PCI IRQs filled in on demand - */ - irq_desc[i].handler = &no_irq_type; - } - } -} - -__initfunc(void init_IRQ(void)) -{ - int i; + int shared = 0; + struct irqaction *old, **p; + unsigned long flags; -#ifndef CONFIG_X86_VISWS_APIC - init_ISA_irqs(); -#else - init_VISWS_APIC_irqs(); -#endif /* - * Cover the whole vector space, no vector can escape - * us. (some of these will be overridden and become - * 'special' SMP interrupts) + * Some drivers like serial.c use request_irq() heavily, + * so we have to be careful not to interfere with a + * running system. */ - for (i = 0; i < NR_IRQS; i++) { - int vector = FIRST_EXTERNAL_VECTOR + i; - if (vector != SYSCALL_VECTOR) - set_intr_gate(vector, interrupt[i]); + if (new->flags & SA_SAMPLE_RANDOM) { + /* + * This function might sleep, we want to call it first, + * outside of the atomic block. + * Yes, this might clear the entropy pool if the wrong + * driver is attempted to be loaded, without actually + * installing a new handler, but is this really a problem, + * only the sysadmin is able to do this. + */ + rand_initialize_irq(irq); } -#ifdef __SMP__ - /* - IRQ0 must be given a fixed assignment and initialized - before init_IRQ_SMP. - */ - set_intr_gate(IRQ0_TRAP_VECTOR, interrupt[0]); - - /* - * The reschedule interrupt is a CPU-to-CPU reschedule-helper - * IPI, driven by wakeup. + * The following block of code has to be executed atomically */ - set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt); - - /* IPI for invalidation */ - set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt); - - /* IPI for CPU halt */ - set_intr_gate(STOP_CPU_VECTOR, stop_cpu_interrupt); - - /* self generated IPI for local APIC timer */ - set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); - - /* IPI for generic function call */ - set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); - - /* IPI vector for APIC spurious interrupts */ - set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); -#endif - request_region(0x20,0x20,"pic1"); - request_region(0xa0,0x20,"pic2"); + spin_lock_irqsave(&irq_controller_lock,flags); + p = &irq_desc[irq].action; + if ((old = *p) != NULL) { + /* Can't share interrupts unless both agree to */ + if (!(old->flags & new->flags & SA_SHIRQ)) { + spin_unlock_irqrestore(&irq_controller_lock,flags); + return -EBUSY; + } - /* - * Set the clock to 100 Hz, we already have a valid - * vector now: - */ - outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */ - outb_p(LATCH & 0xff , 0x40); /* LSB */ - outb(LATCH >> 8 , 0x40); /* MSB */ + /* add new interrupt at end of irq queue */ + do { + p = &old->next; + old = *p; + } while (old); + shared = 1; + } -#ifndef CONFIG_VISWS - setup_x86_irq(2, &irq2); - setup_x86_irq(13, &irq13); -#endif -} + *p = new; -#ifdef CONFIG_X86_IO_APIC -__initfunc(void init_IRQ_SMP(void)) -{ - int i; - for (i = 0; i < NR_IRQS ; i++) - if (IO_APIC_VECTOR(i) > 0) - set_intr_gate(IO_APIC_VECTOR(i), interrupt[i]); + if (!shared) { + irq_desc[irq].depth = 0; + irq_desc[irq].status &= ~IRQ_DISABLED; + irq_desc[irq].handler->startup(irq); + } + spin_unlock_irqrestore(&irq_controller_lock,flags); + return 0; } -#endif |