summaryrefslogtreecommitdiffstats
path: root/arch/alpha
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-03-07 16:02:18 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-03-07 16:02:18 +0000
commit478add0ea718dee38702c066907a510525964b48 (patch)
treedb719e8441d2c0bf223a6f86b36a5a25772957bd /arch/alpha
parent9f9f3e6e8548a596697778337110a423c384b6f3 (diff)
Rest of the merge.
Diffstat (limited to 'arch/alpha')
-rw-r--r--arch/alpha/kernel/irq_alpha.c248
-rw-r--r--arch/alpha/kernel/irq_i8259.c187
-rw-r--r--arch/alpha/kernel/irq_smp.c250
-rw-r--r--arch/alpha/kernel/irq_srm.c82
4 files changed, 767 insertions, 0 deletions
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
new file mode 100644
index 000000000..62ba2362f
--- /dev/null
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -0,0 +1,248 @@
+/*
+ * Alpha specific irq code.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+
+#include <asm/machvec.h>
+#include <asm/dma.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+
+/* Only uniprocessor needs this IRQ/BH locking depth, on SMP it lives
+ in the per-cpu structure for cache reasons. */
+#ifndef CONFIG_SMP
+int __local_irq_count;
+int __local_bh_count;
+unsigned long __irq_attempt[NR_IRQS];
+#endif
+
+/* Hack minimum IPL during interupt processing for broken hardware. */
+#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
+int __min_ipl;
+#endif
+
+/*
+ * Performance counter hook. A module can override this to
+ * do something useful.
+ */
+static void
+dummy_perf(unsigned long vector, struct pt_regs *regs)
+{
+ irq_err_count++;
+ printk(KERN_CRIT "Performance counter interrupt!\n");
+}
+
+void (*perf_irq)(unsigned long, struct pt_regs *) = dummy_perf;
+
+/*
+ * The main interrupt entry point.
+ */
+
+asmlinkage void
+do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr,
+ unsigned long a3, unsigned long a4, unsigned long a5,
+ struct pt_regs regs)
+{
+ switch (type) {
+ case 0:
+#ifdef CONFIG_SMP
+ handle_ipi(&regs);
+ return;
+#else
+ irq_err_count++;
+ printk(KERN_CRIT "Interprocessor interrupt? "
+ "You must be kidding!\n");
+#endif
+ break;
+ case 1:
+#ifdef CONFIG_SMP
+ cpu_data[smp_processor_id()].smp_local_irq_count++;
+ smp_percpu_timer_interrupt(&regs);
+ if (smp_processor_id() == boot_cpuid)
+#endif
+ handle_irq(RTC_IRQ, &regs);
+ return;
+ case 2:
+ irq_err_count++;
+ alpha_mv.machine_check(vector, la_ptr, &regs);
+ return;
+ case 3:
+ alpha_mv.device_interrupt(vector, &regs);
+ return;
+ case 4:
+ perf_irq(vector, &regs);
+ return;
+ default:
+ printk(KERN_CRIT "Hardware intr %ld %lx? Huh?\n",
+ type, vector);
+ }
+ printk("PC = %016lx PS=%04lx\n", regs.pc, regs.ps);
+}
+
+void __init
+common_init_isa_dma(void)
+{
+ outb(0, DMA1_RESET_REG);
+ outb(0, DMA2_RESET_REG);
+ outb(0, DMA1_CLR_MASK_REG);
+ outb(0, DMA2_CLR_MASK_REG);
+}
+
+void __init
+init_IRQ(void)
+{
+ alpha_mv.init_irq();
+ wrent(entInt, 0);
+}
+
+/*
+ * machine error checks
+ */
+#define MCHK_K_TPERR 0x0080
+#define MCHK_K_TCPERR 0x0082
+#define MCHK_K_HERR 0x0084
+#define MCHK_K_ECC_C 0x0086
+#define MCHK_K_ECC_NC 0x0088
+#define MCHK_K_OS_BUGCHECK 0x008A
+#define MCHK_K_PAL_BUGCHECK 0x0090
+
+#ifndef CONFIG_SMP
+struct mcheck_info __mcheck_info;
+#endif
+
+void
+process_mcheck_info(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs *regs, const char *machine,
+ int expected)
+{
+ struct el_common *mchk_header;
+ const char *reason;
+
+ /*
+ * See if the machine check is due to a badaddr() and if so,
+ * ignore it.
+ */
+
+#if DEBUG_MCHECK > 0
+ printk(KERN_CRIT "%s machine check %s\n", machine,
+ expected ? "expected." : "NOT expected!!!");
+#endif
+
+ if (expected) {
+ int cpu = smp_processor_id();
+ mcheck_expected(cpu) = 0;
+ mcheck_taken(cpu) = 1;
+ return;
+ }
+
+ mchk_header = (struct el_common *)la_ptr;
+
+ printk(KERN_CRIT "%s machine check: vector=0x%lx pc=0x%lx code=0x%lx\n",
+ machine, vector, regs->pc, mchk_header->code);
+
+ switch ((unsigned int) mchk_header->code) {
+ /* Machine check reasons. Defined according to PALcode sources. */
+ case 0x80: reason = "tag parity error"; break;
+ case 0x82: reason = "tag control parity error"; break;
+ case 0x84: reason = "generic hard error"; break;
+ case 0x86: reason = "correctable ECC error"; break;
+ case 0x88: reason = "uncorrectable ECC error"; break;
+ case 0x8A: reason = "OS-specific PAL bugcheck"; break;
+ case 0x90: reason = "callsys in kernel mode"; break;
+ case 0x96: reason = "i-cache read retryable error"; break;
+ case 0x98: reason = "processor detected hard error"; break;
+
+ /* System specific (these are for Alcor, at least): */
+ case 0x202: reason = "system detected hard error"; break;
+ case 0x203: reason = "system detected uncorrectable ECC error"; break;
+ case 0x204: reason = "SIO SERR occurred on PCI bus"; break;
+ case 0x205: reason = "parity error detected by CIA"; break;
+ case 0x206: reason = "SIO IOCHK occurred on ISA bus"; break;
+ case 0x207: reason = "non-existent memory error"; break;
+ case 0x208: reason = "MCHK_K_DCSR"; break;
+ case 0x209: reason = "PCI SERR detected"; break;
+ case 0x20b: reason = "PCI data parity error detected"; break;
+ case 0x20d: reason = "PCI address parity error detected"; break;
+ case 0x20f: reason = "PCI master abort error"; break;
+ case 0x211: reason = "PCI target abort error"; break;
+ case 0x213: reason = "scatter/gather PTE invalid error"; break;
+ case 0x215: reason = "flash ROM write error"; break;
+ case 0x217: reason = "IOA timeout detected"; break;
+ case 0x219: reason = "IOCHK#, EISA add-in board parity or other catastrophic error"; break;
+ case 0x21b: reason = "EISA fail-safe timer timeout"; break;
+ case 0x21d: reason = "EISA bus time-out"; break;
+ case 0x21f: reason = "EISA software generated NMI"; break;
+ case 0x221: reason = "unexpected ev5 IRQ[3] interrupt"; break;
+ default: reason = "unknown"; break;
+ }
+
+ printk(KERN_CRIT "machine check type: %s%s\n",
+ reason, mchk_header->retry ? " (retryable)" : "");
+
+ dik_show_regs(regs, NULL);
+
+#if DEBUG_MCHECK > 1
+ {
+ /* Dump the logout area to give all info. */
+ unsigned long *ptr = (unsigned long *)la_ptr;
+ long i;
+ for (i = 0; i < mchk_header->size / sizeof(long); i += 2) {
+ printk(KERN_CRIT " +%8lx %016lx %016lx\n",
+ i*sizeof(long), ptr[i], ptr[i+1]);
+ }
+ }
+#endif
+}
+
+/*
+ * The special RTC interrupt type. The interrupt itself was
+ * processed by PALcode, and comes in via entInt vector 1.
+ */
+
+static void rtc_enable_disable(unsigned int irq) { }
+static unsigned int rtc_startup(unsigned int irq) { return 0; }
+
+struct irqaction timer_irqaction = {
+ handler: timer_interrupt,
+ flags: SA_INTERRUPT,
+ name: "timer",
+};
+
+static struct hw_interrupt_type rtc_irq_type = {
+ typename: "RTC",
+ startup: rtc_startup,
+ shutdown: rtc_enable_disable,
+ enable: rtc_enable_disable,
+ disable: rtc_enable_disable,
+ ack: rtc_enable_disable,
+ end: rtc_enable_disable,
+};
+
+void __init
+init_rtc_irq(void)
+{
+ irq_desc[RTC_IRQ].status = IRQ_DISABLED;
+ irq_desc[RTC_IRQ].handler = &rtc_irq_type;
+ setup_irq(RTC_IRQ, &timer_irqaction);
+}
+
+/* Dummy irqactions. */
+struct irqaction isa_cascade_irqaction = {
+ handler: no_action,
+ name: "isa-cascade"
+};
+
+struct irqaction timer_cascade_irqaction = {
+ handler: no_action,
+ name: "timer-cascade"
+};
+
+struct irqaction halt_switch_irqaction = {
+ handler: no_action,
+ name: "halt-switch"
+};
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
new file mode 100644
index 000000000..a47178050
--- /dev/null
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -0,0 +1,187 @@
+/*
+ * linux/arch/alpha/kernel/irq_i8259.c
+ *
+ * This is the 'legacy' 8259A Programmable Interrupt Controller,
+ * present in the majority of PC/AT boxes.
+ *
+ * Started hacking from linux-2.3.30pre6/arch/i386/kernel/i8259.c.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+
+
+/* Note mask bit is true for DISABLED irqs. */
+static unsigned int cached_irq_mask = 0xffff;
+spinlock_t i8259_irq_lock = SPIN_LOCK_UNLOCKED;
+
+static inline void
+i8259_update_irq_hw(unsigned int irq, unsigned long mask)
+{
+ int port = 0x21;
+ if (irq & 8) mask >>= 8;
+ if (irq & 8) port = 0xA1;
+ outb(mask, port);
+}
+
+inline void
+i8259a_enable_irq(unsigned int irq)
+{
+ spin_lock(&i8259_irq_lock);
+ i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
+ spin_unlock(&i8259_irq_lock);
+}
+
+static inline void
+__i8259a_disable_irq(unsigned int irq)
+{
+ i8259_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
+}
+
+void
+i8259a_disable_irq(unsigned int irq)
+{
+ spin_lock(&i8259_irq_lock);
+ __i8259a_disable_irq(irq);
+ spin_unlock(&i8259_irq_lock);
+}
+
+void
+i8259a_mask_and_ack_irq(unsigned int irq)
+{
+ spin_lock(&i8259_irq_lock);
+ __i8259a_disable_irq(irq);
+
+ /* Ack the interrupt making it the lowest priority. */
+ if (irq >= 8) {
+ outb(0xE0 | (irq - 8), 0xa0); /* ack the slave */
+ irq = 2;
+ }
+ outb(0xE0 | irq, 0x20); /* ack the master */
+ spin_unlock(&i8259_irq_lock);
+}
+
+unsigned int
+i8259a_startup_irq(unsigned int irq)
+{
+ i8259a_enable_irq(irq);
+ return 0; /* never anything pending */
+}
+
+void
+i8259a_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ i8259a_enable_irq(irq);
+}
+
+struct hw_interrupt_type i8259a_irq_type = {
+ typename: "XT-PIC",
+ startup: i8259a_startup_irq,
+ shutdown: i8259a_disable_irq,
+ enable: i8259a_enable_irq,
+ disable: i8259a_disable_irq,
+ ack: i8259a_mask_and_ack_irq,
+ end: i8259a_end_irq,
+};
+
+void __init
+init_i8259a_irqs(void)
+{
+ static struct irqaction cascade = {
+ handler: no_action,
+ name: "cascade",
+ };
+
+ long i;
+
+ outb(0xff, 0x21); /* mask all of 8259A-1 */
+ outb(0xff, 0xA1); /* mask all of 8259A-2 */
+
+ for (i = 0; i < 16; i++) {
+ irq_desc[i].status = IRQ_DISABLED;
+ irq_desc[i].handler = &i8259a_irq_type;
+ }
+
+ setup_irq(2, &cascade);
+}
+
+
+#if defined(CONFIG_ALPHA_GENERIC)
+# define IACK_SC alpha_mv.iack_sc
+#elif defined(CONFIG_ALPHA_APECS)
+# define IACK_SC APECS_IACK_SC
+#elif defined(CONFIG_ALPHA_LCA)
+# define IACK_SC LCA_IACK_SC
+#elif defined(CONFIG_ALPHA_CIA)
+# define IACK_SC CIA_IACK_SC
+#elif defined(CONFIG_ALPHA_PYXIS)
+# define IACK_SC PYXIS_IACK_SC
+#elif defined(CONFIG_ALPHA_TSUNAMI)
+# define IACK_SC TSUNAMI_IACK_SC
+#elif defined(CONFIG_ALPHA_POLARIS)
+# define IACK_SC POLARIS_IACK_SC
+#elif defined(CONFIG_ALPHA_IRONGATE)
+# define IACK_SC IRONGATE_IACK_SC
+#endif
+
+#if defined(IACK_SC)
+void
+isa_device_interrupt(unsigned long vector, struct pt_regs *regs)
+{
+ /*
+ * Generate a PCI interrupt acknowledge cycle. The PIC will
+ * respond with the interrupt vector of the highest priority
+ * interrupt that is pending. The PALcode sets up the
+ * interrupts vectors such that irq level L generates vector L.
+ */
+ int j = *(vuip) IACK_SC;
+ j &= 0xff;
+ if (j == 7) {
+ if (!(inb(0x20) & 0x80)) {
+ /* It's only a passive release... */
+ return;
+ }
+ }
+ handle_irq(j, regs);
+}
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || !defined(IACK_SC)
+void
+isa_no_iack_sc_device_interrupt(unsigned long vector, struct pt_regs *regs)
+{
+ unsigned long pic;
+
+ /*
+ * It seems to me that the probability of two or more *device*
+ * interrupts occurring at almost exactly the same time is
+ * pretty low. So why pay the price of checking for
+ * additional interrupts here if the common case can be
+ * handled so much easier?
+ */
+ /*
+ * The first read of gives you *all* interrupting lines.
+ * Therefore, read the mask register and and out those lines
+ * not enabled. Note that some documentation has 21 and a1
+ * write only. This is not true.
+ */
+ pic = inb(0x20) | (inb(0xA0) << 8); /* read isr */
+ pic &= 0xFFFB; /* mask out cascade & hibits */
+
+ while (pic) {
+ int j = ffz(~pic);
+ pic &= pic - 1;
+ handle_irq(j, regs);
+ }
+}
+#endif
diff --git a/arch/alpha/kernel/irq_smp.c b/arch/alpha/kernel/irq_smp.c
new file mode 100644
index 000000000..90a3c30b2
--- /dev/null
+++ b/arch/alpha/kernel/irq_smp.c
@@ -0,0 +1,250 @@
+/*
+ * linux/arch/alpha/kernel/irq_smp.c
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+
+
+/* Who has global_irq_lock. */
+int global_irq_holder = NO_PROC_ID;
+
+/* This protects IRQ's. */
+spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED;
+
+/* Global IRQ locking depth. */
+static void *previous_irqholder = NULL;
+
+#define MAXCOUNT 100000000
+
+
+static void
+show(char * str, void *where)
+{
+#if 0
+ int i;
+ unsigned long *stack;
+#endif
+ int cpu = smp_processor_id();
+
+ printk("\n%s, CPU %d: %p\n", str, cpu, where);
+ printk("irq: %d [%d %d]\n",
+ irqs_running(),
+ cpu_data[0].irq_count,
+ cpu_data[1].irq_count);
+
+ printk("bh: %d [%d %d]\n",
+ spin_is_locked(&global_bh_lock) ? 1 : 0,
+ cpu_data[0].bh_count,
+ cpu_data[1].bh_count);
+#if 0
+ stack = (unsigned long *) &str;
+ for (i = 40; i ; i--) {
+ unsigned long x = *++stack;
+ if (x > (unsigned long) &init_task_union &&
+ x < (unsigned long) &vsprintf) {
+ printk("<[%08lx]> ", x);
+ }
+ }
+#endif
+}
+
+static inline void
+wait_on_irq(int cpu, void *where)
+{
+ int count = MAXCOUNT;
+
+ for (;;) {
+
+ /*
+ * Wait until all interrupts are gone. Wait
+ * for bottom half handlers unless we're
+ * already executing in one..
+ */
+ if (!irqs_running()) {
+ if (local_bh_count(cpu)
+ || !spin_is_locked(&global_bh_lock))
+ break;
+ }
+
+ /* Duh, we have to loop. Release the lock to avoid deadlocks */
+ spin_unlock(&global_irq_lock);
+
+ for (;;) {
+ if (!--count) {
+ show("wait_on_irq", where);
+ count = MAXCOUNT;
+ }
+ __sti();
+ udelay(1); /* make sure to run pending irqs */
+ __cli();
+
+ if (irqs_running())
+ continue;
+ if (spin_is_locked(&global_irq_lock))
+ continue;
+ if (!local_bh_count(cpu)
+ && spin_is_locked(&global_bh_lock))
+ continue;
+ if (spin_trylock(&global_irq_lock))
+ break;
+ }
+ }
+}
+
+static inline void
+get_irqlock(int cpu, void* where)
+{
+ if (!spin_trylock(&global_irq_lock)) {
+ /* Do we already hold the lock? */
+ if (cpu == global_irq_holder)
+ return;
+ /* Uhhuh.. Somebody else got it. Wait. */
+ spin_lock(&global_irq_lock);
+ }
+
+ /*
+ * Ok, we got the lock bit.
+ * But that's actually just the easy part.. Now
+ * we need to make sure that nobody else is running
+ * in an interrupt context.
+ */
+ wait_on_irq(cpu, where);
+
+ /*
+ * Finally.
+ */
+#if DEBUG_SPINLOCK
+ global_irq_lock.task = current;
+ global_irq_lock.previous = where;
+#endif
+ global_irq_holder = cpu;
+ previous_irqholder = where;
+}
+
+void
+__global_cli(void)
+{
+ int cpu = smp_processor_id();
+ void *where = __builtin_return_address(0);
+
+ /*
+ * Maximize ipl. If ipl was previously 0 and if this thread
+ * is not in an irq, then take global_irq_lock.
+ */
+ if (swpipl(IPL_MAX) == IPL_MIN && !local_irq_count(cpu))
+ get_irqlock(cpu, where);
+}
+
+void
+__global_sti(void)
+{
+ int cpu = smp_processor_id();
+
+ if (!local_irq_count(cpu))
+ release_irqlock(cpu);
+ __sti();
+}
+
+/*
+ * SMP flags value to restore to:
+ * 0 - global cli
+ * 1 - global sti
+ * 2 - local cli
+ * 3 - local sti
+ */
+unsigned long
+__global_save_flags(void)
+{
+ int retval;
+ int local_enabled;
+ unsigned long flags;
+ int cpu = smp_processor_id();
+
+ __save_flags(flags);
+ local_enabled = (!(flags & 7));
+ /* default to local */
+ retval = 2 + local_enabled;
+
+ /* Check for global flags if we're not in an interrupt. */
+ if (!local_irq_count(cpu)) {
+ if (local_enabled)
+ retval = 1;
+ if (global_irq_holder == cpu)
+ retval = 0;
+ }
+ return retval;
+}
+
+void
+__global_restore_flags(unsigned long flags)
+{
+ switch (flags) {
+ case 0:
+ __global_cli();
+ break;
+ case 1:
+ __global_sti();
+ break;
+ case 2:
+ __cli();
+ break;
+ case 3:
+ __sti();
+ break;
+ default:
+ printk(KERN_ERR "global_restore_flags: %08lx (%p)\n",
+ flags, __builtin_return_address(0));
+ }
+}
+
+/*
+ * From its use, I infer that synchronize_irq() stalls a thread until
+ * the effects of a command to an external device are known to have
+ * taken hold. Typically, the command is to stop sending interrupts.
+ * The strategy here is wait until there is at most one processor
+ * (this one) in an irq. The memory barrier serializes the write to
+ * the device and the subsequent accesses of global_irq_count.
+ * --jmartin
+ */
+#define DEBUG_SYNCHRONIZE_IRQ 0
+
+void
+synchronize_irq(void)
+{
+#if 0
+ /* Joe's version. */
+ int cpu = smp_processor_id();
+ int local_count;
+ int global_count;
+ int countdown = 1<<24;
+ void *where = __builtin_return_address(0);
+
+ mb();
+ do {
+ local_count = local_irq_count(cpu);
+ global_count = atomic_read(&global_irq_count);
+ if (DEBUG_SYNCHRONIZE_IRQ && (--countdown == 0)) {
+ printk("%d:%d/%d\n", cpu, local_count, global_count);
+ show("synchronize_irq", where);
+ break;
+ }
+ } while (global_count != local_count);
+#else
+ /* Jay's version. */
+ if (irqs_running()) {
+ cli();
+ sti();
+ }
+#endif
+}
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c
new file mode 100644
index 000000000..8b14a59fe
--- /dev/null
+++ b/arch/alpha/kernel/irq_srm.c
@@ -0,0 +1,82 @@
+/*
+ * Handle interrupts from the SRM, assuming no additional weirdness.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+
+#include <asm/machvec.h>
+#include <asm/dma.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+
+
+/*
+ * Is the palcode SMP safe? In other words: can we call cserve_ena/dis
+ * at the same time in multiple CPUs? To be safe I added a spinlock
+ * but it can be removed trivially if the palcode is robust against smp.
+ */
+spinlock_t srm_irq_lock = SPIN_LOCK_UNLOCKED;
+
+static inline void
+srm_enable_irq(unsigned int irq)
+{
+ spin_lock(&srm_irq_lock);
+ cserve_ena(irq - 16);
+ spin_unlock(&srm_irq_lock);
+}
+
+static void
+srm_disable_irq(unsigned int irq)
+{
+ spin_lock(&srm_irq_lock);
+ cserve_dis(irq - 16);
+ spin_unlock(&srm_irq_lock);
+}
+
+static unsigned int
+srm_startup_irq(unsigned int irq)
+{
+ srm_enable_irq(irq);
+ return 0;
+}
+
+static void
+srm_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ srm_enable_irq(irq);
+}
+
+/* Handle interrupts from the SRM, assuming no additional weirdness. */
+static struct hw_interrupt_type srm_irq_type = {
+ typename: "SRM",
+ startup: srm_startup_irq,
+ shutdown: srm_disable_irq,
+ enable: srm_enable_irq,
+ disable: srm_disable_irq,
+ ack: srm_disable_irq,
+ end: srm_end_irq,
+};
+
+void __init
+init_srm_irqs(long max, unsigned long ignore_mask)
+{
+ long i;
+
+ for (i = 16; i < max; ++i) {
+ if (i < 64 && ((ignore_mask >> i) & 1))
+ continue;
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = &srm_irq_type;
+ }
+}
+
+void
+srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ int irq = (vector - 0x800) >> 4;
+ handle_irq(irq, regs);
+}