1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
|
#ifndef __ARCH_I386_ATOMIC__
#define __ARCH_I386_ATOMIC__
#include <linux/config.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#ifdef CONFIG_SMP
#define LOCK "lock ; "
#else
#define LOCK ""
#endif
/*
* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information.
*/
#define __atomic_fool_gcc(x) (*(volatile struct { int a[100]; } *)x)
#ifdef CONFIG_SMP
typedef struct { volatile int counter; } atomic_t;
#else
typedef struct { int counter; } atomic_t;
#endif
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
static __inline__ void atomic_add(int i, volatile atomic_t *v)
{
__asm__ __volatile__(
LOCK "addl %1,%0"
:"=m" (__atomic_fool_gcc(v))
:"ir" (i), "m" (__atomic_fool_gcc(v)));
}
static __inline__ void atomic_sub(int i, volatile atomic_t *v)
{
__asm__ __volatile__(
LOCK "subl %1,%0"
:"=m" (__atomic_fool_gcc(v))
:"ir" (i), "m" (__atomic_fool_gcc(v)));
}
static __inline__ int atomic_sub_and_test(int i, volatile atomic_t *v)
{
unsigned char c;
__asm__ __volatile__(
LOCK "subl %2,%0; sete %1"
:"=m" (__atomic_fool_gcc(v)), "=qm" (c)
:"ir" (i), "m" (__atomic_fool_gcc(v)));
return c;
}
static __inline__ void atomic_inc(volatile atomic_t *v)
{
__asm__ __volatile__(
LOCK "incl %0"
:"=m" (__atomic_fool_gcc(v))
:"m" (__atomic_fool_gcc(v)));
}
static __inline__ void atomic_dec(volatile atomic_t *v)
{
__asm__ __volatile__(
LOCK "decl %0"
:"=m" (__atomic_fool_gcc(v))
:"m" (__atomic_fool_gcc(v)));
}
static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
{
unsigned char c;
__asm__ __volatile__(
LOCK "decl %0; sete %1"
:"=m" (__atomic_fool_gcc(v)), "=qm" (c)
:"m" (__atomic_fool_gcc(v)));
return c != 0;
}
static __inline__ int atomic_inc_and_test(volatile atomic_t *v)
{
unsigned char c;
__asm__ __volatile__(
LOCK "incl %0; sete %1"
:"=m" (__atomic_fool_gcc(v)), "=qm" (c)
:"m" (__atomic_fool_gcc(v)));
return c != 0;
}
extern __inline__ int atomic_add_negative(int i, volatile atomic_t *v)
{
unsigned char c;
__asm__ __volatile__(
LOCK "addl %2,%0; sets %1"
:"=m" (__atomic_fool_gcc(v)), "=qm" (c)
:"ir" (i), "m" (__atomic_fool_gcc(v)));
return c;
}
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
__asm__ __volatile__(LOCK "andl %0,%1" \
: : "r" (~(mask)),"m" (__atomic_fool_gcc(addr)) : "memory")
#define atomic_set_mask(mask, addr) \
__asm__ __volatile__(LOCK "orl %0,%1" \
: : "r" (mask),"m" (__atomic_fool_gcc(addr)) : "memory")
#endif
|