summaryrefslogtreecommitdiffstats
path: root/include/asm-sparc/atomic.h
blob: 3e46c262e1917ecb09fb2452bede7e0e7c759f44 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
/* atomic.h: These really suck for now.
 *
 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
 */

#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__

typedef int atomic_t;

#ifdef __KERNEL__
#include <asm/system.h>
#include <asm/psr.h>

/*
 * Make sure gcc doesn't try to be clever and move things around
 * on us. We need to use _exactly_ the address the user gave us,
 * not some alias that contains the same information.
 */
#define __atomic_fool_gcc(x) ((struct { int a[100]; } *)x)

static __inline__ void atomic_add(atomic_t i, atomic_t *v)
{
	__asm__ __volatile__("
	rd	%%psr, %%g2
	andcc	%%g2, %2, %%g0
	be,a	1f
	 wr	%%g2, %2, %%psr
1:	ld	[%0], %%g3
	add	%%g3, %1, %%g3
	andcc	%%g2, %2, %%g0
	st	%%g3, [%0]
	be,a	1f
	 wr	%%g2, 0x0, %%psr
1:	nop; nop;
        "
        : : "r" (__atomic_fool_gcc(v)), "r" (i), "i" (PSR_PIL)
        : "g2", "g3");
}

static __inline__ void atomic_sub(atomic_t i, atomic_t *v)
{
	__asm__ __volatile__("
	rd	%%psr, %%g2
	andcc	%%g2, %2, %%g0
	be,a	1f
	 wr	%%g2, %2, %%psr
1:	ld	[%0], %%g3
	sub	%%g3, %1, %%g3
	andcc	%%g2, %2, %%g0
	st	%%g3, [%0]
	be,a	1f
	 wr	%%g2, 0x0, %%psr
1:       nop; nop;
        "
        : : "r" (__atomic_fool_gcc(v)), "r" (i), "i" (PSR_PIL)
        : "g2", "g3");
}

static __inline__ int atomic_add_return(atomic_t i, atomic_t *v)
{
	__asm__ __volatile__("
	rd	%%psr, %%g2
	andcc	%%g2, %3, %%g0
	be,a	1f
	 wr	%%g2, %3, %%psr
1:	ld	[%1], %%g3
	add	%%g3, %2, %0
	andcc	%%g2, %3, %%g0
	st	%0, [%1]
	be,a	1f
	 wr	%%g2, 0x0, %%psr
1:	nop; nop;
        "
        : "=&r" (i)
        : "r" (__atomic_fool_gcc(v)), "0" (i), "i" (PSR_PIL)
        : "g2", "g3");

	return i;
}

static __inline__ int atomic_sub_return(atomic_t i, atomic_t *v)
{
	__asm__ __volatile__("
	rd	%%psr, %%g2
	andcc	%%g2, %3, %%g0
	be,a	1f
	 wr	%%g2, %3, %%psr
1:	ld	[%1], %%g3
	sub	%%g3, %2, %0
	andcc	%%g2, %3, %%g0
	st	%0, [%1]
	be,a	1f
	 wr	%%g2, 0x0, %%psr
1:	nop; nop;
        "
        : "=&r" (i)
        : "r" (__atomic_fool_gcc(v)), "0" (i), "i" (PSR_PIL)
        : "g2", "g3");

	return i;
}

#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))

#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)

#define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v))

#endif /* !(__KERNEL__) */

#endif /* !(__ARCH_SPARC_ATOMIC__) */