1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
/* $Id$
*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
* But use these as seldom as possible since they are much more slower
* than regular operations.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 1997, 1999 by Ralf Baechle
*/
#ifndef _ASM_ATOMIC_H
#define _ASM_ATOMIC_H
#include <linux/config.h>
#include <asm/sgidefs.h>
#ifdef CONFIG_SMP
typedef struct { volatile int counter; } atomic_t;
#else
typedef struct { int counter; } atomic_t;
#endif
#ifdef __KERNEL__
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
/*
* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information.
*/
#define __atomic_fool_gcc(x) (*(volatile struct { int a[100]; } *)x)
extern __inline__ void atomic_add(int i, volatile atomic_t * v)
{
unsigned long temp;
__asm__ __volatile__(
"1:\tll\t%0,%1\n\t"
"addu\t%0,%2\n\t"
"sc\t%0,%1\n\t"
"beqz\t%0,1b"
:"=&r" (temp),
"=m" (__atomic_fool_gcc(v))
:"Ir" (i),
"m" (__atomic_fool_gcc(v)));
}
extern __inline__ void atomic_sub(int i, volatile atomic_t * v)
{
unsigned long temp;
__asm__ __volatile__(
"1:\tll\t%0,%1\n\t"
"subu\t%0,%2\n\t"
"sc\t%0,%1\n\t"
"beqz\t%0,1b"
:"=&r" (temp),
"=m" (__atomic_fool_gcc(v))
:"Ir" (i),
"m" (__atomic_fool_gcc(v)));
}
/*
* Same as above, but return the result value
*/
extern __inline__ int atomic_add_return(int i, atomic_t * v)
{
unsigned long temp, result;
__asm__ __volatile__(
".set\tnoreorder\n"
"1:\tll\t%1,%2\n\t"
"addu\t%0,%1,%3\n\t"
"sc\t%0,%2\n\t"
"beqz\t%0,1b\n\t"
"addu\t%0,%1,%3\n\t"
".set\treorder"
:"=&r" (result),
"=&r" (temp),
"=m" (__atomic_fool_gcc(v))
:"Ir" (i),
"m" (__atomic_fool_gcc(v)));
return result;
}
extern __inline__ int atomic_sub_return(int i, atomic_t * v)
{
unsigned long temp, result;
__asm__ __volatile__(
".set\tnoreorder\n"
"1:\tll\t%1,%2\n\t"
"subu\t%0,%1,%3\n\t"
"sc\t%0,%2\n\t"
"beqz\t%0,1b\n\t"
"subu\t%0,%1,%3\n\t"
".set\treorder"
:"=&r" (result),
"=&r" (temp),
"=m" (__atomic_fool_gcc(v))
:"Ir" (i),
"m" (__atomic_fool_gcc(v)));
return result;
}
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v))
#endif /* defined(__KERNEL__) */
#endif /* _ASM_ATOMIC_H */
|