diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-02-24 00:12:35 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-02-24 00:12:35 +0000 |
commit | 482368b1a8e45430672c58c9a42e7d2004367126 (patch) | |
tree | ce2a1a567d4d62dee7c2e71a46a99cf72cf1d606 /arch/arm/kernel | |
parent | e4d0251c6f56ab2e191afb70f80f382793e23f74 (diff) |
Merge with 2.3.47. Guys, this is buggy as shit. You've been warned.
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/armksyms.c | 7 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 51 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 12 | ||||
-rw-r--r-- | arch/arm/kernel/semaphore.c | 171 |
4 files changed, 195 insertions, 46 deletions
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 0caf4bf26..63d4631a4 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -188,6 +188,10 @@ EXPORT_SYMBOL(uaccess_kernel); EXPORT_SYMBOL(uaccess_user); #endif +EXPORT_SYMBOL(consistent_alloc); +EXPORT_SYMBOL(consistent_free); +EXPORT_SYMBOL(consistent_sync); + /* gcc lib functions */ EXPORT_SYMBOL_NOVERS(__gcc_bcmp); EXPORT_SYMBOL_NOVERS(__ashldi3); @@ -234,5 +238,8 @@ EXPORT_SYMBOL_NOVERS(__down_failed); EXPORT_SYMBOL_NOVERS(__down_interruptible_failed); EXPORT_SYMBOL_NOVERS(__down_trylock_failed); EXPORT_SYMBOL_NOVERS(__up_wakeup); +EXPORT_SYMBOL_NOVERS(__down_read_failed); +EXPORT_SYMBOL_NOVERS(__down_write_failed); +EXPORT_SYMBOL_NOVERS(__rwsem_wake); EXPORT_SYMBOL(get_wchan); diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 2be1a6012..5dc61c6d7 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -15,26 +15,28 @@ #define HARVARD_CACHE #endif + .macro get_softirq, rd +#ifdef __SMP__ +#error SMP not supported +#else + ldr \rd, __softirq_state +#endif + .endm + .globl ret_from_sys_call .align 5 fast_syscall_return: - str r0, [sp, #S_R0 + S_OFF] @ returned r0 + str r0, [sp, #S_R0 + S_OFF] @ returned r0 slow_syscall_return: add sp, sp, #S_OFF -ret_from_sys_call: -#ifdef HARVARD_CACHE - ldr r0, bh_data - ldr r4, bh_data+4 -#else - adr r0, bh_data - ldmia r0, {r0, r4} -#endif - ldr r0, [r0] - ldr r1, [r4] +ret_from_sys_call: @ external entry + get_softirq r0 + ldmia r0, {r0, r1} + mov r4, #1 @ flag this as being syscall return tst r0, r1 - blne SYMBOL_NAME(do_bottom_half) -ret_with_reschedule: + blne SYMBOL_NAME(do_softirq) +ret_with_reschedule: @ external entry (__irq_usr) get_current_task r5 ldr r0, [r5, #TSK_NEED_RESCHED] ldr r1, [r5, #TSK_SIGPENDING] @@ -43,30 +45,23 @@ ret_with_reschedule: teq r1, #0 @ check for signals bne ret_signal -ret_from_all: restore_user_regs +ret_from_all: restore_user_regs @ internal -ret_signal: mov r1, sp +ret_signal: mov r1, sp @ internal adrsvc al, lr, ret_from_all mov r2, r4 b SYMBOL_NAME(do_signal) -ret_reschedule: adrsvc al, lr, ret_with_reschedule +ret_reschedule: adrsvc al, lr, ret_with_reschedule @ internal b SYMBOL_NAME(schedule) .globl ret_from_exception -ret_from_exception: -#ifdef HARVARD_CACHE - ldr r0, bh_data - ldr r1, bh_data + 4 -#else - adr r0, bh_data +ret_from_exception: @ external entry + get_softirq r0 ldmia r0, {r0, r1} -#endif - ldr r0, [r0] - ldr r1, [r1] mov r4, #0 tst r0, r1 - blne SYMBOL_NAME(do_bottom_half) + blne SYMBOL_NAME(do_softirq) ldr r0, [sp, #S_PSR] tst r0, #3 @ returning to user mode? beq ret_with_reschedule @@ -147,8 +142,8 @@ vector_swi: save_user_regs .align 5 -bh_data: .word SYMBOL_NAME(bh_mask) - .word SYMBOL_NAME(bh_active) +__softirq_state: + .word SYMBOL_NAME(softirq_state) ENTRY(sys_call_table) #include "calls.S" diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index f6c310905..6e3c863d5 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -237,16 +237,8 @@ asmlinkage void do_IRQ(int irq, struct pt_regs * regs) irq_exit(cpu, irq); - /* - * This should be conditional: we should really get - * a return code from the irq handler to tell us - * whether the handler wants us to do software bottom - * half handling or not.. - */ - if (1) { - if (bh_active & bh_mask) - do_bottom_half(); - } + if (softirq_state[cpu].active & softirq_state[cpu].mask) + do_softirq(); } #if defined(CONFIG_ARCH_ACORN) diff --git a/arch/arm/kernel/semaphore.c b/arch/arm/kernel/semaphore.c index 71bf85e09..a2ec71526 100644 --- a/arch/arm/kernel/semaphore.c +++ b/arch/arm/kernel/semaphore.c @@ -164,6 +164,126 @@ int __down_trylock(struct semaphore * sem) return 1; } +struct rw_semaphore *down_read_failed_biased(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */ + + for (;;) { + if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!sem->read_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + return sem; +} + +struct rw_semaphore *down_write_failed_biased(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */ + + for (;;) { + if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE); + if (!sem->write_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->write_bias_wait, &wait); + tsk->state = TASK_RUNNING; + + /* if the lock is currently unbiased, awaken the sleepers + * FIXME: this wakes up the readers early in a bit of a + * stampede -> bad! + */ + if (atomic_read(&sem->count) >= 0) + wake_up(&sem->wait); + + return sem; +} + +/* Wait for the lock to become unbiased. Readers + * are non-exclusive. =) + */ +struct rw_semaphore *down_read_failed(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + /* this takes care of granting the lock */ + __up_op_read(sem, __rwsem_wake); + + add_wait_queue(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + return sem; +} + +/* Wait for the lock to become unbiased. Since we're + * a writer, we'll make ourselves exclusive. + */ +struct rw_semaphore *down_write_failed(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + /* this takes care of granting the lock */ + __up_op_write(sem, __rwsem_wake); + + add_wait_queue_exclusive(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE); + if (atomic_read(&sem->count) >= 0) + break; /* we must attempt to aquire or bias the lock */ schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + return sem; +} + +/* Called when someone has done an up that transitioned from + * negative to non-negative, meaning that the lock has been + * granted to whomever owned the bias. + */ +struct rw_semaphore *rwsem_wake_readers(struct rw_semaphore *sem) +{ + if (xchg(&sem->read_bias_granted, 1)) + BUG(); + wake_up(&sem->wait); + return sem; +} + +struct rw_semaphore *rwsem_wake_writer(struct rw_semaphore *sem) +{ + if (xchg(&sem->write_bias_granted, 1)) + BUG(); + wake_up(&sem->write_bias_wait); + return sem; +} + /* * The semaphore operations have a special calling sequence that * allow us to do a simpler in-line version of them. These routines @@ -174,30 +294,65 @@ int __down_trylock(struct semaphore * sem) * registers (r0 to r3, ip and lr) except r0 in the cases where it * is used as a return value.. */ -asm(".align 5 +asm(" .section .text.lock, \"ax\" + .align 5 .globl __down_failed __down_failed: stmfd sp!, {r0 - r3, ip, lr} bl __down - ldmfd sp!, {r0 - r3, ip, pc}"); + ldmfd sp!, {r0 - r3, ip, pc} -asm(".align 5 + .align 5 .globl __down_interruptible_failed __down_interruptible_failed: stmfd sp!, {r1 - r3, ip, lr} bl __down_interruptible - ldmfd sp!, {r1 - r3, ip, pc}"); + ldmfd sp!, {r1 - r3, ip, pc} -asm(".align 5 + .align 5 .globl __down_trylock_failed __down_trylock_failed: stmfd sp!, {r1 - r3, ip, lr} bl __down_trylock - ldmfd sp!, {r1 - r3, ip, pc}"); + ldmfd sp!, {r1 - r3, ip, pc} -asm(".align 5 + .align 5 .globl __up_wakeup __up_wakeup: stmfd sp!, {r0 - r3, ip, lr} bl __up - ldmfd sp!, {r0 - r3, ip, pc}"); + ldmfd sp!, {r0 - r3, ip, pc} + + .align 5 + .globl __down_read_failed +__down_read_failed: + stmfd sp!, {r0 - r3, ip, lr} + bcc 1f + bl down_read_failed_biased + ldmfd sp!, {r0 - r3, ip, pc} +1: bl down_read_failed + /***/ + + .align 5 + .globl __down_write_failed +__down_write_failed: + stmfd sp!, {r0 - r3, ip, lr} + bcc 1f + bl down_write_failed_biased + ldmfd sp!, {r0 - r3, ip, pc} +1: bl down_write_failed + /***/ + + .align 5 + .globl __rwsem_wake +__rwsem_wake: + stmfd sp!, {r0 - r3, ip, lr} + beq 1f + bl rwsem_wake_readers + ldmfd sp!, {r0 - r3, ip, pc} +1: bl rwsem_wake_writer + ldmfd sp!, {r0 - r3, ip, pc} + + .previous + "); + |