1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
|
#ifndef _ALPHA_SEMAPHORE_HELPER_H
#define _ALPHA_SEMAPHORE_HELPER_H
/*
* SMP- and interrupt-safe semaphores helper functions.
*
* (C) Copyright 1996 Linus Torvalds
* (C) Copyright 1999 Richard Henderson
*/
/*
* These two _must_ execute atomically wrt each other.
*
* This is trivially done with load_locked/store_cond,
* which we have. Let the rest of the losers suck eggs.
*/
static inline void
wake_one_more(struct semaphore * sem)
{
atomic_inc(&sem->waking);
}
static inline int
waking_non_zero(struct semaphore *sem)
{
long ret, tmp;
/* An atomic conditional decrement. */
__asm__ __volatile__(
"1: ldl_l %1,%2\n"
" blt %1,2f\n"
" subl %1,1,%0\n"
" stl_c %0,%2\n"
" beq %0,3f\n"
"2:\n"
".section .text2,\"ax\"\n"
"3: br 1b\n"
".previous"
: "=r"(ret), "=r"(tmp), "=m"(__atomic_fool_gcc(&sem->waking))
: "0"(0));
return ret > 0;
}
/*
* waking_non_zero_interruptible:
* 1 got the lock
* 0 go to sleep
* -EINTR interrupted
*
* We must undo the sem->count down_interruptible decrement
* simultaneously and atomicly with the sem->waking adjustment,
* otherwise we can race with wake_one_more.
*
* This is accomplished by doing a 64-bit ll/sc on the 2 32-bit words.
*/
static inline int
waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
{
long ret, tmp, tmp2, tmp3;
/* "Equivalent" C. Note that we have to do this all without
(taken) branches in order to be a valid ll/sc sequence.
do {
tmp = ldq_l;
ret = 0;
if (tmp >= 0) {
tmp += 0xffffffff00000000;
ret = 1;
}
else if (pending) {
// Since -1 + 1 carries into the high word, we have
// to be more careful adding 1 here.
tmp = (tmp & 0xffffffff00000000)
| ((tmp + 1) & 0x00000000ffffffff;
ret = -EINTR;
}
else {
break; // ideally. we don't actually break
// since this is a predicate we don't
// have, and is more trouble to build
// than to elide the noop stq_c.
}
tmp = stq_c = tmp;
} while (tmp == 0);
*/
__asm__ __volatile__(
"1: ldq_l %1,%4\n"
" lda %0,0\n"
" cmovne %5,%6,%0\n"
" addq %1,1,%2\n"
" and %1,%7,%3\n"
" andnot %2,%7,%2\n"
" cmovge %1,1,%0\n"
" or %3,%2,%2\n"
" addq %1,%7,%3\n"
" cmovne %5,%2,%1\n"
" cmovge %2,%3,%1\n"
" stq_c %1,%4\n"
" beq %1,3f\n"
"2:\n"
".section .text2,\"ax\"\n"
"3: br 1b\n"
".previous"
: "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(tmp3), "=m"(*sem)
: "r"(signal_pending(tsk)), "r"(-EINTR),
"r"(0xffffffff00000000));
return ret;
}
/*
* waking_non_zero_trylock is unused. we do everything in
* down_trylock and let non-ll/sc hosts bounce around.
*/
static inline int
waking_non_zero_trylock(struct semaphore *sem)
{
return 0;
}
#endif
|