summaryrefslogtreecommitdiffstats
path: root/include/asm-mips/atomic.h
blob: d9f5fe48c2d40d2a820acbc13092b6e0db39d40d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
/*
 * Atomic operations that C can't guarantee us.  Useful for
 * resource counting etc..
 *
 * But use these as seldom as possible since they are much more slower
 * than regular operations.
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 1996 by Ralf Baechle
 *
 * $Id: atomic.h,v 1.2 1997/06/20 03:05:12 ralf Exp $
 */
#ifndef __ASM_MIPS_ATOMIC_H
#define __ASM_MIPS_ATOMIC_H

#include <asm/sgidefs.h>

#ifdef __SMP__
typedef struct { volatile int counter; } atomic_t;
#else
typedef struct { int counter; } atomic_t;
#endif

#ifdef __KERNEL__
#define ATOMIC_INIT(i)    { (i) }

#define atomic_read(v)	((v)->counter)
#define atomic_set(v,i)	((v)->counter = (i))

#if (_MIPS_ISA == _MIPS_ISA_MIPS1)

#include <asm/system.h>

/*
 * The MIPS I implementation is only atomic with respect to
 * interrupts.  R3000 based multiprocessor machines are rare anyway ...
 */
extern __inline__ void atomic_add(int i, volatile atomic_t * v)
{
	int	flags;

	save_flags(flags);
	cli();
	*v += i;
	restore_flags(flags);
}

extern __inline__ void atomic_sub(int i, volatile atomic_t * v)
{
	int	flags;

	save_flags(flags);
	cli();
	*v -= i;
	restore_flags(flags);
}

extern __inline__ int atomic_add_return(int i, atomic_t * v)
{
	int	temp, flags;

	save_flags(flags);
	cli();
	temp = *v;
	temp += i;
	*v = temp;
	restore_flags(flags);

	return temp;
}

extern __inline__ int atomic_sub_return(int i, atomic_t * v)
{
	int	temp, flags;

	save_flags(flags);
	cli();
	temp = *v;
	temp -= i;
	*v = temp;
	restore_flags(flags);

	return temp;
}
#endif

#if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) || \
    (_MIPS_ISA == _MIPS_ISA_MIPS4) || (_MIPS_ISA == _MIPS_ISA_MIPS5)
/*
 * ... while for MIPS II and better we can use ll/sc instruction.  This
 * implementation is SMP safe ...
 */

/*
 * Make sure gcc doesn't try to be clever and move things around
 * on us. We need to use _exactly_ the address the user gave us,
 * not some alias that contains the same information.
 */
#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)

extern __inline__ void atomic_add(int i, volatile atomic_t * v)
{
	unsigned long temp;

	__asm__ __volatile__(
		"1:\tll\t%0,%1\n\t"
		"addu\t%0,%2\n\t"
		"sc\t%0,%1\n\t"
		"beqz\t%0,1b"
		:"=&r" (temp),
		 "=m" (__atomic_fool_gcc(v))
		:"Ir" (i),
		 "m" (__atomic_fool_gcc(v)));
}

extern __inline__ void atomic_sub(int i, volatile atomic_t * v)
{
	unsigned long temp;

	__asm__ __volatile__(
		"1:\tll\t%0,%1\n\t"
		"subu\t%0,%2\n\t"
		"sc\t%0,%1\n\t"
		"beqz\t%0,1b"
		:"=&r" (temp),
		 "=m" (__atomic_fool_gcc(v))
		:"Ir" (i),
		 "m" (__atomic_fool_gcc(v)));
}

/*
 * Same as above, but return the result value
 */
extern __inline__ int atomic_add_return(int i, atomic_t * v)
{
	unsigned long temp, result;

	__asm__ __volatile__(
		".set\tnoreorder\n"
		"1:\tll\t%1,%2\n\t"
		"addu\t%0,%1,%3\n\t"
		"sc\t%0,%2\n\t"
		"beqz\t%0,1b\n\t"
		"addu\t%0,%1,%3\n\t"
		".set\treorder"
		:"=&r" (result),
		 "=&r" (temp),
		 "=m" (__atomic_fool_gcc(v))
		:"Ir" (i),
		 "m" (__atomic_fool_gcc(v)));

	return result;
}

extern __inline__ int atomic_sub_return(int i, atomic_t * v)
{
	unsigned long temp, result;

	__asm__ __volatile__(
		".set\tnoreorder\n"
		"1:\tll\t%1,%2\n\t"
		"subu\t%0,%1,%3\n\t"
		"sc\t%0,%2\n\t"
		"beqz\t%0,1b\n\t"
		"subu\t%0,%1,%3\n\t"
		".set\treorder"
		:"=&r" (result),
		 "=&r" (temp),
		 "=m" (__atomic_fool_gcc(v))
		:"Ir" (i),
		 "m" (__atomic_fool_gcc(v)));

	return result;
}
#endif

#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))

#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)

#define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v))
#endif /* defined(__KERNEL__) */

#endif /* __ASM_MIPS_ATOMIC_H */