diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-02-23 00:40:54 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-02-23 00:40:54 +0000 |
commit | 529c593ece216e4aaffd36bd940cb94f1fa63129 (patch) | |
tree | 78f1c0b805f5656aa7b0417a043c5346f700a2cf /include/linux/interrupt.h | |
parent | 0bd079751d25808d1972baee5c4eaa1db2227257 (diff) |
Merge with 2.3.43. I did ignore all modifications to the qlogicisp.c
driver due to the Origin A64 hacks.
Diffstat (limited to 'include/linux/interrupt.h')
-rw-r--r-- | include/linux/interrupt.h | 235 |
1 files changed, 224 insertions, 11 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 8bc1f9ee6..29a42aef8 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -3,6 +3,7 @@ #define _LINUX_INTERRUPT_H #include <linux/kernel.h> +#include <linux/smp.h> #include <asm/bitops.h> #include <asm/atomic.h> @@ -15,17 +16,9 @@ struct irqaction { struct irqaction *next; }; -extern volatile unsigned char bh_running; - -extern atomic_t bh_mask_count[32]; -extern unsigned long bh_active; -extern unsigned long bh_mask; -extern void (*bh_base[32])(void); - -asmlinkage void do_bottom_half(void); /* Who gets which entry in bh_base. Things which will occur most often - should come first - in which case NET should be up the top with SERIAL/TQUEUE! */ + should come first */ enum { TIMER_BH = 0, @@ -37,10 +30,8 @@ enum { SPECIALIX_BH, AURORA_BH, ESP_BH, - NET_BH, SCSI_BH, IMMEDIATE_BH, - KEYBOARD_BH, CYCLADES_BH, CM206_BH, JS_BH, @@ -51,6 +42,228 @@ enum { #include <asm/hardirq.h> #include <asm/softirq.h> + + +/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high + frequency threaded job scheduling. For almost all the purposes + tasklets are more than enough. F.e. KEYBOARD_BH, CONSOLE_BH, all serial + device BHs et al. are converted to tasklets, not to softirqs. + */ + +enum +{ + HI_SOFTIRQ=0, + NET_TX_SOFTIRQ, + NET_RX_SOFTIRQ, + TASKLET_SOFTIRQ +}; + +#if SMP_CACHE_BYTES <= 32 +/* It is trick to make assembly easier. */ +#define SOFTIRQ_STATE_PAD 32 +#else +#define SOFTIRQ_STATE_PAD SMP_CACHE_BYTES +#endif + +struct softirq_state +{ + __u32 active; + __u32 mask; +} __attribute__ ((__aligned__(SOFTIRQ_STATE_PAD))); + +extern struct softirq_state softirq_state[NR_CPUS]; + +struct softirq_action +{ + void (*action)(struct softirq_action *); + void *data; +}; + +asmlinkage void do_softirq(void); +extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); + +extern __inline__ void __cpu_raise_softirq(int cpu, int nr) +{ + softirq_state[cpu].active |= (1<<nr); +} + + +/* I do not want to use atomic variables now, so that cli/sti */ +extern __inline__ void raise_softirq(int nr) +{ + unsigned long flags; + + local_irq_save(flags); + __cpu_raise_softirq(smp_processor_id(), nr); + local_irq_restore(flags); +} + +extern void softirq_init(void); + + + +/* Tasklets --- multithreaded analogue of BHs. + + Main feature differing them of generic softirqs: tasklet + is running only on one CPU simultaneously. + + Main feature differing them of BHs: different tasklets + may be run simultaneously on different CPUs. + + Properties: + * If tasklet_schedule() is called, then tasklet is guaranteed + to be executed on some cpu at least once after this. + * If the tasklet is already scheduled, but its excecution is still not + started, it will be executed only once. + * If this tasklet is already running on another CPU (or schedule is called + from tasklet itself), it is rescheduled for later. + * Tasklet is strictly serialized wrt itself, but not + wrt another tasklets. If client needs some intertask synchronization, + he makes it with spinlocks. + */ + +struct tasklet_struct +{ + struct tasklet_struct *next; + unsigned long state; + atomic_t count; + void (*func)(unsigned long); + unsigned long data; +}; + +#define DECLARE_TASKLET(name, func, data) \ +struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } + +#define DECLARE_TASKLET_DISABLED(name, func, data) \ +struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } + + +enum +{ + TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ + TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ +}; + +struct tasklet_head +{ + struct tasklet_struct *list; +} __attribute__ ((__aligned__(SMP_CACHE_BYTES))); + +extern struct tasklet_head tasklet_vec[NR_CPUS]; +extern struct tasklet_head tasklet_hi_vec[NR_CPUS]; + +#ifdef __SMP__ +#define tasklet_trylock(t) (!test_and_set_bit(TASKLET_STATE_RUN, &(t)->state)) +#define tasklet_unlock_wait(t) while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { /* NOTHING */ } +#define tasklet_unlock(t) clear_bit(TASKLET_STATE_RUN, &(t)->state) +#else +#define tasklet_trylock(t) 1 +#define tasklet_unlock_wait(t) do { } while (0) +#define tasklet_unlock(t) do { } while (0) +#endif + +extern __inline__ void tasklet_schedule(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { + int cpu = smp_processor_id(); + unsigned long flags; + + local_irq_save(flags); + t->next = tasklet_vec[cpu].list; + tasklet_vec[cpu].list = t; + __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ); + local_irq_restore(flags); + } +} + +extern __inline__ void tasklet_hi_schedule(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { + int cpu = smp_processor_id(); + unsigned long flags; + + local_irq_save(flags); + t->next = tasklet_hi_vec[cpu].list; + tasklet_hi_vec[cpu].list = t; + __cpu_raise_softirq(cpu, HI_SOFTIRQ); + local_irq_restore(flags); + } +} + + +extern __inline__ void tasklet_disable_nosync(struct tasklet_struct *t) +{ + atomic_inc(&t->count); +} + +extern __inline__ void tasklet_disable(struct tasklet_struct *t) +{ + tasklet_disable_nosync(t); + tasklet_unlock_wait(t); +} + +extern __inline__ void tasklet_enable(struct tasklet_struct *t) +{ + atomic_dec(&t->count); +} + +extern void tasklet_kill(struct tasklet_struct *t); +extern void tasklet_init(struct tasklet_struct *t, + void (*func)(unsigned long), unsigned long data); + +#ifdef __SMP__ + +#define SMP_TIMER_NAME(name) name##__thr + +#define SMP_TIMER_DEFINE(name, task) \ +DECLARE_TASKLET(task, name##__thr, 0); \ +static void name (unsigned long dummy) \ +{ \ + tasklet_schedule(&(task)); \ +} + +#else /* __SMP__ */ + +#define SMP_TIMER_NAME(name) name +#define SMP_TIMER_DEFINE(name, task) + +#endif /* __SMP__ */ + + +/* Old BH definitions */ + +extern struct tasklet_struct bh_task_vec[]; + +/* It is exported _ONLY_ for wait_on_irq(). */ +extern spinlock_t global_bh_lock; + +extern __inline__ void mark_bh(int nr) +{ + tasklet_hi_schedule(bh_task_vec+nr); +} + +extern __inline__ void disable_bh_nosync(int nr) +{ + tasklet_disable_nosync(bh_task_vec+nr); +} + +extern __inline__ void disable_bh(int nr) +{ + tasklet_disable_nosync(bh_task_vec+nr); + if (!in_interrupt()) + tasklet_unlock_wait(bh_task_vec+nr); +} + +extern __inline__ void enable_bh(int nr) +{ + tasklet_enable(bh_task_vec+nr); +} + + +extern void init_bh(int nr, void (*routine)(void)); +extern void remove_bh(int nr); + + /* * Autoprobing for irqs: * |