blob: 95523854eb7cc03d6e1d5cd1ca5da59157fcd330 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
|
#ifndef _ASM_IA64_HARDIRQ_H
#define _ASM_IA64_HARDIRQ_H
/*
* Copyright (C) 1998, 1999 Hewlett-Packard Co
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#include <linux/threads.h>
extern unsigned int local_irq_count[NR_CPUS];
extern unsigned long hardirq_no[NR_CPUS];
/*
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
*/
#define in_interrupt() \
({ \
int __cpu = smp_processor_id(); \
(local_irq_count[__cpu] + local_bh_count[__cpu]) != 0; \
})
#ifndef CONFIG_SMP
# define hardirq_trylock(cpu) (local_irq_count[cpu] == 0)
# define hardirq_endlock(cpu) ((void) 0)
# define hardirq_enter(cpu, irq) (local_irq_count[cpu]++)
# define hardirq_exit(cpu, irq) (local_irq_count[cpu]--)
# define synchronize_irq() barrier()
#else
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <asm/smp.h>
extern int global_irq_holder;
extern spinlock_t global_irq_lock;
extern atomic_t global_irq_count;
static inline void release_irqlock(int cpu)
{
/* if we didn't own the irq lock, just ignore.. */
if (global_irq_holder == cpu) {
global_irq_holder = NO_PROC_ID;
spin_unlock(&global_irq_lock);
}
}
static inline void hardirq_enter(int cpu, int irq)
{
++local_irq_count[cpu];
atomic_inc(&global_irq_count);
}
static inline void hardirq_exit(int cpu, int irq)
{
atomic_dec(&global_irq_count);
--local_irq_count[cpu];
}
static inline int hardirq_trylock(int cpu)
{
return !local_irq_count[cpu] && !test_bit(0,&global_irq_lock);
}
#define hardirq_endlock(cpu) ((void)0)
extern void synchronize_irq(void);
#endif /* CONFIG_SMP */
#endif /* _ASM_IA64_HARDIRQ_H */
|