blob: 556e744d2eb0ff5f71a8bfd7a33123d8d904cec0 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
|
#ifndef _ALPHA_HARDIRQ_H
#define _ALPHA_HARDIRQ_H
#include <linux/tasks.h>
extern unsigned int local_irq_count[NR_CPUS];
#ifndef __SMP__
#define hardirq_trylock(cpu) (local_irq_count[cpu] == 0)
#define hardirq_endlock(cpu) do { } while (0)
#define hardirq_enter(cpu) (local_irq_count[cpu]++)
#define hardirq_exit(cpu) (local_irq_count[cpu]--)
#define synchronize_irq() do { } while (0)
#else
/* initially just a straight copy if the i386 code */
#include <asm/atomic.h>
#include <asm/spinlock.h>
#include <asm/system.h>
#include <asm/smp.h>
extern unsigned char global_irq_holder;
extern spinlock_t global_irq_lock;
extern atomic_t global_irq_count;
static inline void release_irqlock(int cpu)
{
/* if we didn't own the irq lock, just ignore.. */
if (global_irq_holder == (unsigned char) cpu) {
global_irq_holder = NO_PROC_ID;
spin_unlock(&global_irq_lock);
}
}
/* Ordering of the counter bumps is _deadly_ important. */
static inline void hardirq_enter(int cpu)
{
++local_irq_count[cpu];
atomic_inc(&global_irq_count);
}
static inline void hardirq_exit(int cpu)
{
atomic_dec(&global_irq_count);
--local_irq_count[cpu];
}
static inline int hardirq_trylock(int cpu)
{
unsigned long flags;
int ret = 1;
__save_and_cli(flags);
if ((atomic_add_return(1, &global_irq_count) != 1) ||
(global_irq_lock.lock != 0)) {
atomic_dec(&global_irq_count);
__restore_flags(flags);
ret = 0;
} else {
++local_irq_count[cpu];
__sti();
}
return ret;
}
#define hardirq_endlock(cpu) \
do { \
__cli(); \
hardirq_exit(cpu); \
__sti(); \
} while (0)
extern void synchronize_irq(void);
#endif /* __SMP__ */
#endif /* _ALPHA_HARDIRQ_H */
|