1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
|
/* softirq.h: 64-bit Sparc soft IRQ support.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef __SPARC64_SOFTIRQ_H
#define __SPARC64_SOFTIRQ_H
#include <asm/atomic.h>
#include <asm/hardirq.h>
/* The locking mechanism for base handlers, to prevent re-entrancy,
* is entirely private to an implementation, it should not be
* referenced at all outside of this file.
*/
#define get_active_bhs() (bh_mask & bh_active)
#ifndef __SMP__
extern int __sparc64_bh_counter;
#define softirq_trylock() (__sparc64_bh_counter ? 0 : (__sparc64_bh_counter=1))
#define softirq_endlock() (__sparc64_bh_counter = 0)
#define clear_active_bhs(x) (bh_active &= ~(x))
#define init_bh(nr, routine) \
do { int ent = nr; \
bh_base[ent] = routine; \
bh_mask_count[ent] = 0; \
bh_mask |= 1 << ent; \
} while(0)
#define remove_bh(nr) \
do { int ent = nr; \
bh_base[ent] = NULL; \
bh_mask &= ~(1 << ent); \
} while(0)
#define mark_bh(nr) (bh_active |= (1 << (nr)))
#define disable_bh(nr) \
do { int ent = nr; \
bh_mask &= ~(1 << ent); \
bh_mask_count[ent]++; \
} while(0)
#define enable_bh(nr) \
do { int ent = nr; \
if (!--bh_mask_count[ent]) \
bh_mask |= 1 << ent; \
} while(0)
#define start_bh_atomic() do { __sparc64_bh_counter++; barrier(); } while(0)
#define end_bh_atomic() do { barrier(); __sparc64_bh_counter--; } while(0)
#else /* (__SMP__) */
extern atomic_t __sparc64_bh_counter;
#define start_bh_atomic() \
do { atomic_inc(&__sparc64_bh_counter); synchronize_irq(); } while(0)
#define end_bh_atomic() atomic_dec(&__sparc64_bh_counter)
#include <asm/spinlock.h>
#define init_bh(nr, routine) \
do { unsigned long flags; \
int ent = nr; \
spin_lock_irqsave(&global_bh_lock, flags); \
bh_base[ent] = routine; \
bh_mask_count[ent] = 0; \
bh_mask |= 1 << ent; \
spin_unlock_irqrestore(&global_bh_lock, flags); \
} while(0)
#define remove_bh(nr) \
do { unsigned long flags; \
int ent = nr; \
spin_lock_irqsave(&global_bh_lock, flags); \
bh_base[ent] = NULL; \
bh_mask &= ~(1 << ent); \
spin_unlock_irqrestore(&global_bh_lock, flags); \
} while(0)
#define mark_bh(nr) \
do { unsigned long flags; \
spin_lock_irqsave(&global_bh_lock, flags); \
bh_active |= (1 << nr); \
spin_unlock_irqrestore(&global_bh_lock, flags); \
} while(0)
#define disable_bh(nr) \
do { unsigned long flags; \
int ent = nr; \
spin_lock_irqsave(&global_bh_lock, flags); \
bh_mask &= ~(1 << ent); \
bh_mask_count[ent]++; \
spin_unlock_irqrestore(&global_bh_lock, flags); \
} while(0)
#define enable_bh(nr) \
do { unsigned long flags; \
int ent = nr; \
spin_lock_irqsave(&global_bh_lock, flags); \
if (!--bh_mask_count[ent]) \
bh_mask |= 1 << ent; \
spin_unlock_irqrestore(&global_bh_lock, flags); \
} while(0)
#define softirq_trylock() \
({ \
int ret = 1; \
if(atomic_add_return(1, &__sparc_bh_counter) != 1) { \
atomic_dec(&__sparc_bh_counter); \
ret = 0; \
} \
ret; \
})
#define softirq_endlock() atomic_dec(&__sparc_bh_counter)
#define clear_active_bhs(mask) \
do { unsigned long flags; \
spin_lock_irqsave(&global_bh_lock, flags); \
bh_active &= ~(mask); \
spin_unlock_irqrestore(&global_bh_lock, flags); \
} while(0)
#endif /* (__SMP__) */
#endif /* !(__SPARC64_SOFTIRQ_H) */
|