summaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/semaphore.h
blob: b0eb1391152f5632dee8dd9e98afb68ec49401d1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
#ifndef _SPARC64_SEMAPHORE_H
#define _SPARC64_SEMAPHORE_H

/* These are actually reasonable on the V9. */
#ifdef __KERNEL__

#include <asm/atomic.h>
#include <asm/system.h>

struct semaphore {
	atomic_t count;
	atomic_t waking;
	struct wait_queue * wait;
};

#define MUTEX ((struct semaphore) { ATOMIC_INIT(1), ATOMIC_INIT(0), NULL })
#define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), ATOMIC_INIT(0), NULL })

extern void __down(struct semaphore * sem);
extern int  __down_interruptible(struct semaphore * sem);
extern void __up(struct semaphore * sem);

#define sema_init(sem, val)	atomic_set(&((sem)->count), val)

#define wake_one_more(sem)      atomic_inc(&sem->waking);

static __inline__ int waking_non_zero(struct semaphore *sem)
{
	int ret;

	__asm__ __volatile__("
1:	ldsw		[%1], %%g5
	brlez,pt	%%g5, 2f
	 mov		0, %0
	sub		%%g5, 1, %%g7
	cas		[%1], %%g5, %%g7
	cmp		%%g5, %%g7
	bne,pn		%%icc, 1b
	 mov		1, %0
2:"	: "=r" (ret)
	: "r" (&((sem)->waking))
	: "g5", "g7", "cc", "memory");
	return ret;
}

extern __inline__ void down(struct semaphore * sem)
{
	int result;

	result = atomic_dec_return(&sem->count);
	membar("#StoreLoad | #StoreStore");
	if (result < 0)
		__down(sem);
}

extern __inline__ int down_interruptible(struct semaphore *sem)
{
	int result, ret = 0;

	result = atomic_dec_return(&sem->count);
	membar("#StoreLoad | #StoreStore");
	if (result < 0)
		ret = __down_interruptible(sem);
	return ret;
}

extern __inline__ void up(struct semaphore * sem)
{
	membar("#StoreStore | #LoadStore");
	if (atomic_inc_return(&sem->count) <= 0)
		__up(sem);
}	

#endif /* __KERNEL__ */

#endif /* !(_SPARC64_SEMAPHORE_H) */