blob: de398bcff1dba6abee110d37e7639a6448792f70 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
|
/*
* linux/kernel/softirq.c
*
* Copyright (C) 1992 Linus Torvalds
*
* do_bottom_half() runs at normal kernel priority: all interrupts
* enabled. do_bottom_half() is atomic with respect to itself: a
* bottom_half handler need not be re-entrant.
*/
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/bitops.h>
#include <asm/atomic.h>
/* intr_count died a painless death... -DaveM */
int bh_mask_count[32];
unsigned long bh_active = 0;
unsigned long bh_mask = 0;
void (*bh_base[32])(void);
/*
* This needs to make sure that only one bottom half handler
* is ever active at a time. We do this without locking by
* doing an atomic increment on the intr_count, and checking
* (nonatomically) against 1. Only if it's 1 do we schedule
* the bottom half.
*
* Note that the non-atomicity of the test (as opposed to the
* actual update) means that the test may fail, and _nobody_
* runs the handlers if there is a race that makes multiple
* CPU's get here at the same time. That's ok, we'll run them
* next time around.
*/
static inline void run_bottom_halves(void)
{
unsigned long active;
void (**bh)(void);
active = get_active_bhs();
clear_active_bhs(active);
bh = bh_base;
do {
if (active & 1)
(*bh)();
bh++;
active >>= 1;
} while (active);
}
asmlinkage void do_bottom_half(void)
{
if (softirq_trylock()) {
int cpu = smp_processor_id();
if (hardirq_trylock(cpu)) {
__sti();
run_bottom_halves();
hardirq_endlock(cpu);
}
softirq_endlock();
}
}
|