1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
|
#ifndef _ASM_IA64_SPINLOCK_H
#define _ASM_IA64_SPINLOCK_H
/*
* Copyright (C) 1998-2000 Hewlett-Packard Co
* Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
*
* This file is used for SMP configurations only.
*/
#include <linux/kernel.h>
#include <asm/system.h>
#include <asm/bitops.h>
#include <asm/atomic.h>
typedef struct {
volatile unsigned int lock;
} spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) ((x)->lock = 0)
/*
* Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
* rather than a simple xchg to avoid writing the cache-line when
* there is contention.
*/
#if 1 /* Bad code generation? */
#define spin_lock(x) __asm__ __volatile__ ( \
"mov ar.ccv = r0\n" \
"mov r29 = 1\n" \
";;\n" \
"1:\n" \
"ld4 r2 = %0\n" \
";;\n" \
"cmp4.eq p0,p7 = r0,r2\n" \
"(p7) br.cond.spnt.few 1b \n" \
"cmpxchg4.acq r2 = %0, r29, ar.ccv\n" \
";;\n" \
"cmp4.eq p0,p7 = r0, r2\n" \
"(p7) br.cond.spnt.few 1b\n" \
";;\n" \
:: "m" __atomic_fool_gcc((x)) : "r2", "r29", "memory")
#else
#define spin_lock(x) \
{ \
spinlock_t *__x = (x); \
\
do { \
while (__x->lock); \
} while (cmpxchg_acq(&__x->lock, 0, 1)); \
}
#endif
#define spin_is_locked(x) ((x)->lock != 0)
#define spin_unlock(x) ({((spinlock_t *) x)->lock = 0; barrier();})
/* Streamlined !test_and_set_bit(0, (x)) */
#define spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
#define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); })
typedef struct {
volatile int read_counter:31;
volatile int write_lock:1;
} rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
#define read_lock(rw) \
do { \
int tmp = 0; \
__asm__ __volatile__ ("1:\tfetchadd4.acq %0 = %1, 1\n" \
";;\n" \
"tbit.nz p6,p0 = %0, 31\n" \
"(p6) br.cond.sptk.few 2f\n" \
".section .text.lock,\"ax\"\n" \
"2:\tfetchadd4.rel %0 = %1, -1\n" \
";;\n" \
"3:\tld4.acq %0 = %1\n" \
";;\n" \
"tbit.nz p6,p0 = %0, 31\n" \
"(p6) br.cond.sptk.few 3b\n" \
"br.cond.sptk.few 1b\n" \
";;\n" \
".previous\n" \
: "=r" (tmp), "=m" (__atomic_fool_gcc(rw)) \
:: "memory"); \
} while(0)
#define read_unlock(rw) \
do { \
int tmp = 0; \
__asm__ __volatile__ ("fetchadd4.rel %0 = %1, -1\n" \
: "=r" (tmp) \
: "m" (__atomic_fool_gcc(rw)) \
: "memory"); \
} while(0)
#define write_lock(rw) \
do { \
do { \
while ((rw)->write_lock); \
} while (test_and_set_bit(31, (rw))); \
while ((rw)->read_counter); \
barrier(); \
} while (0)
/*
* clear_bit() has "acq" semantics; we're really need "rel" semantics,
* but for simplicity, we simply do a fence for now...
*/
#define write_unlock(x) ({clear_bit(31, (x)); mb();})
#endif /* _ASM_IA64_SPINLOCK_H */
|