1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
|
/* atomic.S: Move this stuff here for better ICACHE hit rates.
*
* Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
*/
#include <asm/cprefix.h>
#include <asm/ptrace.h>
#include <asm/psr.h>
.text
.align 4
#ifndef __SMP__
.globl ___xchg32_sun4c
___xchg32_sun4c:
rd %psr, %g3
andcc %g3, PSR_PIL, %g0
bne 1f
nop
wr %g3, PSR_PIL, %psr
nop; nop; nop
1:
andcc %g3, PSR_PIL, %g0
ld [%g1], %g7
bne 1f
st %g2, [%g1]
wr %g3, 0x0, %psr
nop; nop; nop
1:
mov %g7, %g2
jmpl %o7 + 8, %g0
mov %g4, %o7
.globl ___xchg32_sun4md
___xchg32_sun4md:
swap [%g1], %g2
jmpl %o7 + 8, %g0
mov %g4, %o7
#endif
/* Read asm-sparc/atomic.h carefully to understand how this works for SMP.
* Really, some things here for SMP are overly clever, go read the header.
*/
.globl ___atomic_add
___atomic_add:
rd %psr, %g3 ! Keep the code small, old way was stupid
or %g3, PSR_PIL, %g7 ! Disable interrupts
wr %g7, 0x0, %psr ! Set %psr
nop; nop; nop; ! Let the bits set
#ifdef __SMP__
1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
orcc %g7, 0x0, %g0 ! Did we get it?
bne 1b ! Nope...
#endif
ld [%g1], %g7 ! Load locked atomic_t
sra %g7, 8, %g7 ! Get signed 24-bit integer
add %g7, %g2, %g2 ! Add in argument
sll %g2, 8, %g7 ! Transpose back to atomic_t
st %g7, [%g1] ! Clever: This releases the lock as well.
wr %g3, 0x0, %psr ! Restore original PSR_PIL
nop; nop; nop; ! Let the bits set
jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
mov %g4, %o7 ! Restore %o7
.globl ___atomic_sub
___atomic_sub:
rd %psr, %g3 ! Keep the code small, old way was stupid
or %g3, PSR_PIL, %g7 ! Disable interrupts
wr %g7, 0x0, %psr ! Set %psr
nop; nop; nop; ! Let the bits set
#ifdef __SMP__
1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
orcc %g7, 0x0, %g0 ! Did we get it?
bne 1b ! Nope...
#endif
ld [%g1], %g7 ! Load locked atomic_t
sra %g7, 8, %g7 ! Get signed 24-bit integer
sub %g7, %g2, %g2 ! Subtract argument
sll %g2, 8, %g7 ! Transpose back to atomic_t
st %g7, [%g1] ! Clever: This releases the lock as well
wr %g3, 0x0, %psr ! Restore original PSR_PIL
nop; nop; nop; ! Let the bits set
jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
mov %g4, %o7 ! Restore %o7
|