summaryrefslogtreecommitdiffstats
path: root/include/asm-sparc/atomic.h
blob: dc29a13cdf9c193880c98bd184884d70c7e292ed (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
/* atomic.h: These still suck, but the I-cache hit rate is higher.
 *
 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
 */

#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__

#include <linux/config.h>

#ifdef CONFIG_SMP
/* This is a temporary measure. -DaveM */
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i)	{ (i << 8) }
#else
typedef struct { int counter; } atomic_t;
#define ATOMIC_INIT(i)  { (i) }
#endif

#ifdef __KERNEL__
#include <asm/system.h>
#include <asm/psr.h>

#ifndef CONFIG_SMP

#define atomic_read(v)          ((v)->counter)
#define atomic_set(v, i)        (((v)->counter) = i)

#else
/* We do the bulk of the actual work out of line in two common
 * routines in assembler, see arch/sparc/lib/atomic.S for the
 * "fun" details.
 *
 * For SMP the trick is you embed the spin lock byte within
 * the word, use the low byte so signedness is easily retained
 * via a quick arithmetic shift.  It looks like this:
 *
 *	----------------------------------------
 *	| signed 24-bit counter value |  lock  |  atomic_t
 *	----------------------------------------
 *	 31                          8 7      0
 */

static __inline__ int atomic_read(atomic_t *v)
{
	int ret = v->counter;

	while(ret & 0xff)
		ret = v->counter;

	return ret >> 8;
}

#define atomic_set(v, i)	(((v)->counter) = ((i) << 8))
#endif

/* Make sure gcc doesn't try to be clever and move things around
 * on us. We need to use _exactly_ the address the user gave us,
 * not some alias that contains the same information.
 */
#define __atomic_fool_gcc(x) ((struct { int a[100]; } *)x)

static __inline__ void atomic_add(int i, atomic_t *v)
{
	register atomic_t *ptr asm("g1");
	register int increment asm("g2");
	ptr = (atomic_t *) __atomic_fool_gcc(v);
	increment = i;

	__asm__ __volatile__("
	mov	%%o7, %%g4
	call	___atomic_add
	 add	%%o7, 8, %%o7
"	: "=&r" (increment)
	: "0" (increment), "r" (ptr)
	: "g3", "g4", "g7", "memory", "cc");
}

static __inline__ void atomic_sub(int i, atomic_t *v)
{
	register atomic_t *ptr asm("g1");
	register int increment asm("g2");

	ptr = (atomic_t *) __atomic_fool_gcc(v);
	increment = i;

	__asm__ __volatile__("
	mov	%%o7, %%g4
	call	___atomic_sub
	 add	%%o7, 8, %%o7
"	: "=&r" (increment)
	: "0" (increment), "r" (ptr)
	: "g3", "g4", "g7", "memory", "cc");
}

static __inline__ int atomic_add_return(int i, atomic_t *v)
{
	register atomic_t *ptr asm("g1");
	register int increment asm("g2");

	ptr = (atomic_t *) __atomic_fool_gcc(v);
	increment = i;

	__asm__ __volatile__("
	mov	%%o7, %%g4
	call	___atomic_add
	 add	%%o7, 8, %%o7
"	: "=&r" (increment)
	: "0" (increment), "r" (ptr)
	: "g3", "g4", "g7", "memory", "cc");

	return increment;
}

static __inline__ int atomic_sub_return(int i, atomic_t *v)
{
	register atomic_t *ptr asm("g1");
	register int increment asm("g2");

	ptr = (atomic_t *) __atomic_fool_gcc(v);
	increment = i;

	__asm__ __volatile__("
	mov	%%o7, %%g4
	call	___atomic_sub
	 add	%%o7, 8, %%o7
"	: "=&r" (increment)
	: "0" (increment), "r" (ptr)
	: "g3", "g4", "g7", "memory", "cc");

	return increment;
}

#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))

#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)

#define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v))

#endif /* !(__KERNEL__) */

#endif /* !(__ARCH_SPARC_ATOMIC__) */