summaryrefslogtreecommitdiffstats
path: root/include/asm-i386/softirq.h
blob: b8f33d7af7a104349fd33bd57880541e2525ce1f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
#ifndef __ASM_SOFTIRQ_H
#define __ASM_SOFTIRQ_H

#include <asm/atomic.h>
#include <asm/hardirq.h>

extern unsigned int local_bh_count[NR_CPUS];

#define cpu_bh_disable(cpu)	do { local_bh_count[(cpu)]++; barrier(); } while (0)
#define cpu_bh_enable(cpu)	do { barrier(); local_bh_count[(cpu)]--; } while (0)

#define cpu_bh_trylock(cpu)	(local_bh_count[(cpu)] ? 0 : (local_bh_count[(cpu)] = 1))
#define cpu_bh_endlock(cpu)	(local_bh_count[(cpu)] = 0)

#define local_bh_disable()	cpu_bh_disable(smp_processor_id())
#define local_bh_enable()	cpu_bh_enable(smp_processor_id())

#define get_active_bhs()	(bh_mask & bh_active)
#define clear_active_bhs(x)	atomic_clear_mask((x),&bh_active)

extern inline void init_bh(int nr, void (*routine)(void))
{
	bh_base[nr] = routine;
	atomic_set(&bh_mask_count[nr], 0);
	bh_mask |= 1 << nr;
}

extern inline void remove_bh(int nr)
{
	bh_mask &= ~(1 << nr);
	mb();
	bh_base[nr] = NULL;
}

extern inline void mark_bh(int nr)
{
	set_bit(nr, &bh_active);
}

#ifdef __SMP__

/*
 * The locking mechanism for base handlers, to prevent re-entrancy,
 * is entirely private to an implementation, it should not be
 * referenced at all outside of this file.
 */
extern atomic_t global_bh_lock;
extern atomic_t global_bh_count;

extern void synchronize_bh(void);

static inline void start_bh_atomic(void)
{
	atomic_inc(&global_bh_lock);
	synchronize_bh();
}

static inline void end_bh_atomic(void)
{
	atomic_dec(&global_bh_lock);
}

/* These are for the IRQs testing the lock */
static inline int softirq_trylock(int cpu)
{
	if (cpu_bh_trylock(cpu)) {
		if (!test_and_set_bit(0,&global_bh_count)) {
			if (atomic_read(&global_bh_lock) == 0)
				return 1;
			clear_bit(0,&global_bh_count);
		}
		cpu_bh_endlock(cpu);
	}
	return 0;
}

static inline void softirq_endlock(int cpu)
{
	cpu_bh_enable(cpu);
	clear_bit(0,&global_bh_count);
}

#else

extern inline void start_bh_atomic(void)
{
	local_bh_disable();
	barrier();
}

extern inline void end_bh_atomic(void)
{
	barrier();
	local_bh_enable();
}

/* These are for the irq's testing the lock */
#define softirq_trylock(cpu)	(cpu_bh_trylock(cpu))
#define softirq_endlock(cpu)	(cpu_bh_endlock(cpu))
#define synchronize_bh()	barrier()

#endif	/* SMP */

/*
 * These use a mask count to correctly handle
 * nested disable/enable calls
 */
extern inline void disable_bh(int nr)
{
	bh_mask &= ~(1 << nr);
	atomic_inc(&bh_mask_count[nr]);
	synchronize_bh();
}

extern inline void enable_bh(int nr)
{
	if (atomic_dec_and_test(&bh_mask_count[nr]))
		bh_mask |= 1 << nr;
}

#endif	/* __ASM_SOFTIRQ_H */