summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/semaphore.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/semaphore.c')
-rw-r--r--arch/mips/kernel/semaphore.c111
1 files changed, 111 insertions, 0 deletions
diff --git a/arch/mips/kernel/semaphore.c b/arch/mips/kernel/semaphore.c
index d62b355e1..1f47bd929 100644
--- a/arch/mips/kernel/semaphore.c
+++ b/arch/mips/kernel/semaphore.c
@@ -127,3 +127,114 @@ int __down_trylock(struct semaphore * sem)
{
return waking_non_zero_trylock(sem);
}
+
+/*
+ * RW Semaphores
+ */
+void
+__down_read(struct rw_semaphore *sem, int count)
+{
+ DOWN_VAR;
+
+ retry_down:
+ if (count < 0) {
+ /* Wait for the lock to become unbiased. Readers
+ are non-exclusive. */
+
+ /* This takes care of granting the lock. */
+ up_read(sem);
+
+ add_wait_queue(&sem->wait, &wait);
+ while (atomic_read(&sem->count) < 0) {
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&sem->count) >= 0)
+ break;
+ schedule();
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ mb();
+ count = atomic_dec_return(&sem->count);
+ if (count <= 0)
+ goto retry_down;
+ } else {
+ add_wait_queue(&sem->wait, &wait);
+
+ while (1) {
+ if (test_and_clear_bit(0, &sem->granted))
+ break;
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ if ((sem->granted & 1) == 0)
+ schedule();
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+ }
+}
+
+void
+__down_write(struct rw_semaphore *sem, int count)
+{
+ DOWN_VAR;
+
+ retry_down:
+ if (count + RW_LOCK_BIAS < 0) {
+ up_write(sem);
+
+ add_wait_queue_exclusive(&sem->wait, &wait);
+
+ while (atomic_read(&sem->count) < 0) {
+ set_task_state(tsk, (TASK_UNINTERRUPTIBLE
+ | TASK_EXCLUSIVE));
+ if (atomic_read(&sem->count) >= RW_LOCK_BIAS)
+ break;
+ schedule();
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ mb();
+ count = atomic_sub_return(RW_LOCK_BIAS, &sem->count);
+ if (count != 0)
+ goto retry_down;
+ } else {
+ /* Put ourselves at the end of the list. */
+ add_wait_queue_exclusive(&sem->write_bias_wait, &wait);
+
+ while (1) {
+ if (test_and_clear_bit(1, &sem->granted))
+ break;
+ set_task_state(tsk, (TASK_UNINTERRUPTIBLE
+ | TASK_EXCLUSIVE));
+ if ((sem->granted & 2) == 0)
+ schedule();
+ }
+
+ remove_wait_queue(&sem->write_bias_wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ /* If the lock is currently unbiased, awaken the sleepers.
+ FIXME: This wakes up the readers early in a bit of a
+ stampede -> bad! */
+ if (atomic_read(&sem->count) >= 0)
+ wake_up(&sem->wait);
+ }
+}
+
+void
+__rwsem_wake(struct rw_semaphore *sem, unsigned long readers)
+{
+ if (readers) {
+ if (test_and_set_bit(0, &sem->granted))
+ BUG();
+ wake_up(&sem->wait);
+ } else {
+ if (test_and_set_bit(1, &sem->granted))
+ BUG();
+ wake_up(&sem->write_bias_wait);
+ }
+}