summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/irq.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-06-22 23:05:57 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-06-22 23:05:57 +0000
commit51d3b7814cdccef9188240fe0cbd8d97ff2c7470 (patch)
tree5cbb01d0323d4f63ade66bdf48ba4a91aaa6df16 /arch/arm/kernel/irq.c
parent52273a23c9a84336b93a35e4847fc88fac7eb0e4 (diff)
Merge with Linux 2.3.7.
WARNING: 2.3.7 is known to eat filesystems for breakfast and little children for lunch, so if you try this on your machine make backups first ...
Diffstat (limited to 'arch/arm/kernel/irq.c')
-rw-r--r--arch/arm/kernel/irq.c144
1 files changed, 106 insertions, 38 deletions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 332e8940d..ee6e07c6c 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -23,7 +23,6 @@
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
-#include <linux/timex.h>
#include <linux/malloc.h>
#include <linux/random.h>
#include <linux/smp.h>
@@ -32,7 +31,6 @@
#include <asm/hardware.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/system.h>
#ifndef SMP
@@ -46,10 +44,22 @@
#define cliIF()
#endif
+/*
+ * Maximum IRQ count. Currently, this is arbitary.
+ * However, it should not be set too low to prevent
+ * false triggering. Conversely, if it is set too
+ * high, then you could miss a stuck IRQ.
+ *
+ * Maybe we ought to set a timer and re-enable the
+ * IRQ at a later time?
+ */
+#define MAX_IRQ_CNT 100000
+
unsigned int local_bh_count[NR_CPUS];
unsigned int local_irq_count[NR_CPUS];
spinlock_t irq_controller_lock;
+int setup_arm_irq(int, struct irqaction *);
extern int get_fiq_list(char *);
extern void init_FIQ(void);
@@ -60,17 +70,29 @@ struct irqdesc {
unsigned int probing : 1; /* IRQ in use for a probe */
unsigned int probe_ok : 1; /* IRQ can be used for probe */
unsigned int valid : 1; /* IRQ claimable */
- unsigned int unused :26;
+ unsigned int noautoenable : 1; /* don't automatically enable IRQ */
+ unsigned int unused :25;
void (*mask_ack)(unsigned int irq); /* Mask and acknowledge IRQ */
void (*mask)(unsigned int irq); /* Mask IRQ */
void (*unmask)(unsigned int irq); /* Unmask IRQ */
struct irqaction *action;
- unsigned int unused2[3];
+ /*
+ * IRQ lock detection
+ */
+ unsigned int lck_cnt;
+ unsigned int lck_pc;
+ unsigned int lck_jif;
};
static struct irqdesc irq_desc[NR_IRQS];
/*
+ * Get architecture specific interrupt handlers
+ * and interrupt initialisation.
+ */
+#include <asm/arch/irq.h>
+
+/*
* Dummy mask/unmask handler
*/
static void dummy_mask_unmask_irq(unsigned int irq)
@@ -94,10 +116,12 @@ void enable_irq(unsigned int irq)
spin_lock_irqsave(&irq_controller_lock, flags);
cliIF();
- irq_desc[irq].enabled = 1;
irq_desc[irq].probing = 0;
irq_desc[irq].triggered = 0;
- irq_desc[irq].unmask(irq);
+ if (!irq_desc[irq].noautoenable) {
+ irq_desc[irq].enabled = 1;
+ irq_desc[irq].unmask(irq);
+ }
spin_unlock_irqrestore(&irq_controller_lock, flags);
}
@@ -119,21 +143,52 @@ int get_irq_list(char *buf)
*p++ = '\n';
}
-#ifdef CONFIG_ACORN
+#ifdef CONFIG_ARCH_ACORN
p += get_fiq_list(p);
#endif
return p - buf;
}
/*
+ * IRQ lock detection.
+ *
+ * Hopefully, this should get us out of a few locked situations.
+ * However, it may take a while for this to happen, since we need
+ * a large number if IRQs to appear in the same jiffie with the
+ * same instruction pointer (or within 2 instructions).
+ */
+static void check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
+{
+ unsigned long instr_ptr = instruction_pointer(regs);
+
+ if (desc->lck_jif == jiffies &&
+ desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
+ desc->lck_cnt += 1;
+
+ if (desc->lck_cnt > MAX_IRQ_CNT) {
+ printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
+ disable_irq(irq);
+ }
+ } else {
+ desc->lck_cnt = 0;
+ desc->lck_pc = instruction_pointer(regs);
+ desc->lck_jif = jiffies;
+ }
+}
+
+/*
* do_IRQ handles all normal device IRQ's
*/
asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
{
- struct irqdesc * desc = irq_desc + irq;
+ struct irqdesc * desc;
struct irqaction * action;
int status, cpu;
+ irq = fixup_irq(irq);
+
+ desc = irq_desc + irq;
+
spin_lock(&irq_controller_lock);
desc->mask_ack(irq);
spin_unlock(&irq_controller_lock);
@@ -174,6 +229,12 @@ asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
}
}
+ /*
+ * Debug measure - hopefully we can continue if an
+ * IRQ lockup problem occurs...
+ */
+ check_irq_lock(desc, irq, regs);
+
irq_exit(cpu, irq);
/*
@@ -181,15 +242,10 @@ asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
* a return code from the irq handler to tell us
* whether the handler wants us to do software bottom
* half handling or not..
- *
- * ** IMPORTANT NOTE: do_bottom_half() ENABLES IRQS!!! **
- * ** WE MUST DISABLE THEM AGAIN, ELSE IDE DISKS GO **
- * ** AWOL **
*/
if (1) {
if (bh_active & bh_mask)
do_bottom_half();
- __cli();
}
}
@@ -227,11 +283,27 @@ int setup_arm_irq(int irq, struct irqaction * new)
struct irqaction *old, **p;
unsigned long flags;
- if (new->flags & SA_SAMPLE_RANDOM)
+ /*
+ * Some drivers like serial.c use request_irq() heavily,
+ * so we have to be careful not to interfere with a
+ * running system.
+ */
+ if (new->flags & SA_SAMPLE_RANDOM) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
rand_initialize_irq(irq);
+ }
+ /*
+ * The following block of code has to be executed atomically
+ */
spin_lock_irqsave(&irq_controller_lock, flags);
-
p = &irq_desc[irq].action;
if ((old = *p) != NULL) {
/* Can't share interrupts unless both agree to */
@@ -252,28 +324,24 @@ int setup_arm_irq(int irq, struct irqaction * new)
if (!shared) {
irq_desc[irq].nomask = (new->flags & SA_IRQNOMASK) ? 1 : 0;
- irq_desc[irq].enabled = 1;
irq_desc[irq].probing = 0;
- irq_desc[irq].unmask(irq);
+ if (!irq_desc[irq].noautoenable) {
+ irq_desc[irq].enabled = 1;
+ irq_desc[irq].unmask(irq);
+ }
}
spin_unlock_irqrestore(&irq_controller_lock, flags);
return 0;
}
-/*
- * Using "struct sigaction" is slightly silly, but there
- * are historical reasons and it works well, so..
- */
int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
unsigned long irq_flags, const char * devname, void *dev_id)
{
unsigned long retval;
struct irqaction *action;
- if (!irq_desc[irq].valid)
- return -EINVAL;
- if (!handler)
+ if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler)
return -EINVAL;
action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
@@ -299,28 +367,30 @@ void free_irq(unsigned int irq, void *dev_id)
struct irqaction * action, **p;
unsigned long flags;
- if (!irq_desc[irq].valid) {
+ if (irq >= NR_IRQS || !irq_desc[irq].valid) {
printk(KERN_ERR "Trying to free IRQ%d\n",irq);
#ifdef CONFIG_DEBUG_ERRORS
__backtrace();
#endif
return;
}
+
+ spin_lock_irqsave(&irq_controller_lock, flags);
for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
if (action->dev_id != dev_id)
continue;
/* Found it - now free it */
- save_flags_cli (flags);
*p = action->next;
- restore_flags (flags);
kfree(action);
- return;
+ goto out;
}
printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
#ifdef CONFIG_DEBUG_ERRORS
__backtrace();
#endif
+out:
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
}
/* Start the interrupt probing. Unlike other architectures,
@@ -346,7 +416,6 @@ unsigned long probe_irq_on(void)
continue;
irq_desc[i].probing = 1;
- irq_desc[i].enabled = 1;
irq_desc[i].triggered = 0;
irq_desc[i].unmask(i);
irqs += 1;
@@ -364,7 +433,8 @@ unsigned long probe_irq_on(void)
*/
spin_lock_irq(&irq_controller_lock);
for (i = 0; i < NR_IRQS; i++) {
- if (irq_desc[i].probing && irq_desc[i].triggered) {
+ if (irq_desc[i].probing &&
+ irq_desc[i].triggered) {
irq_desc[i].probing = 0;
irqs -= 1;
}
@@ -383,7 +453,7 @@ unsigned long probe_irq_on(void)
int probe_irq_off(unsigned long irqs)
{
unsigned int i;
- int irq_found = -1;
+ int irq_found = NO_IRQ;
/*
* look at the interrupts, and find exactly one
@@ -393,7 +463,7 @@ int probe_irq_off(unsigned long irqs)
for (i = 0; i < NR_IRQS; i++) {
if (irq_desc[i].probing &&
irq_desc[i].triggered) {
- if (irq_found != -1) {
+ if (irq_found != NO_IRQ) {
irq_found = NO_IRQ;
goto out;
}
@@ -405,21 +475,19 @@ int probe_irq_off(unsigned long irqs)
irq_found = NO_IRQ;
out:
spin_unlock_irq(&irq_controller_lock);
+
return irq_found;
}
-/*
- * Get architecture specific interrupt handlers
- * and interrupt initialisation.
- */
-#include <asm/arch/irq.h>
-
__initfunc(void init_IRQ(void))
{
extern void init_dma(void);
int irq;
for (irq = 0; irq < NR_IRQS; irq++) {
+ irq_desc[irq].probe_ok = 0;
+ irq_desc[irq].valid = 0;
+ irq_desc[irq].noautoenable = 0;
irq_desc[irq].mask_ack = dummy_mask_unmask_irq;
irq_desc[irq].mask = dummy_mask_unmask_irq;
irq_desc[irq].unmask = dummy_mask_unmask_irq;