1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
|
#ifndef __ASM_MIPS_SOFTIRQ_H
#define __ASM_MIPS_SOFTIRQ_H
/* The locking mechanism for base handlers, to prevent re-entrancy,
* is entirely private to an implementation, it should not be
* referenced at all outside of this file.
*/
extern atomic_t __mips_bh_counter;
#define get_active_bhs() (bh_mask & bh_active)
static inline void clear_active_bhs(unsigned long x)
{
unsigned long temp;
__asm__ __volatile__(
"1:\tll\t%0,%1\n\t"
"and\t%0,%2\n\t"
"sc\t%0,%1\n\t"
"beqz\t%0,1b"
:"=&r" (temp),
"=m" (bh_active)
:"Ir" (x),
"m" (bh_active));
}
extern inline void init_bh(int nr, void (*routine)(void))
{
bh_base[nr] = routine;
bh_mask_count[nr] = 0;
bh_mask |= 1 << nr;
}
extern inline void remove_bh(int nr)
{
bh_base[nr] = NULL;
bh_mask &= ~(1 << nr);
}
extern inline void mark_bh(int nr)
{
set_bit(nr, &bh_active);
}
/*
* These use a mask count to correctly handle
* nested disable/enable calls
*/
extern inline void disable_bh(int nr)
{
bh_mask &= ~(1 << nr);
bh_mask_count[nr]++;
}
extern inline void enable_bh(int nr)
{
if (!--bh_mask_count[nr])
bh_mask |= 1 << nr;
}
/*
* start_bh_atomic/end_bh_atomic also nest
* naturally by using a counter
*/
extern inline void start_bh_atomic(void)
{
#ifdef __SMP__
atomic_inc(&__mips_bh_counter);
synchronize_irq();
#else
atomic_inc(&__mips_bh_counter);
#endif
}
extern inline void end_bh_atomic(void)
{
atomic_dec(&__mips_bh_counter);
}
#ifndef __SMP__
/* These are for the irq's testing the lock */
#define softirq_trylock() (atomic_read(&__mips_bh_counter) ? \
0 : \
((atomic_set(&__mips_bh_counter,1)),1))
#define softirq_endlock() (atomic_set(&__mips_bh_counter, 0))
#else
#error FIXME
#endif /* __SMP__ */
#endif /* __ASM_MIPS_SOFTIRQ_H */
|