summaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2001-01-31 22:22:27 +0000
committerRalf Baechle <ralf@linux-mips.org>2001-01-31 22:22:27 +0000
commit825423e4c4f18289df2393951cfd2a7a31fc0464 (patch)
tree4ad80e981c3d9effa910d2247d118d254f9a5d09 /arch/sh
parentc4693dc4856ab907a5c02187a8d398861bebfc7e (diff)
Merge with Linux 2.4.1.
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/boot/compressed/head.S2
-rw-r--r--arch/sh/kernel/entry.S738
-rw-r--r--arch/sh/kernel/fpu.c212
-rw-r--r--arch/sh/kernel/head.S44
-rw-r--r--arch/sh/kernel/irq.c2
-rw-r--r--arch/sh/kernel/irq_imask.c8
-rw-r--r--arch/sh/kernel/process.c32
-rw-r--r--arch/sh/kernel/setup.c4
-rw-r--r--arch/sh/kernel/sh_bios.c18
-rw-r--r--arch/sh/kernel/sh_ksyms.c5
-rw-r--r--arch/sh/kernel/signal.c6
-rw-r--r--arch/sh/kernel/traps.c446
-rw-r--r--arch/sh/lib/checksum.S157
-rw-r--r--arch/sh/lib/delay.c6
-rw-r--r--arch/sh/mm/cache.c16
15 files changed, 1090 insertions, 606 deletions
diff --git a/arch/sh/boot/compressed/head.S b/arch/sh/boot/compressed/head.S
index 75d8b4ef9..0edf49085 100644
--- a/arch/sh/boot/compressed/head.S
+++ b/arch/sh/boot/compressed/head.S
@@ -43,7 +43,7 @@ bss_start_addr:
end_addr:
.long _end
init_sr:
- .long 0x40000000 /* Privileged mode, Bank=0, Block=0, I3-I0=0 */
+ .long 0x400000F0 /* Privileged mode, Bank=0, Block=0, IMASK=0xF */
init_stack_addr:
.long stack_start
decompress_kernel_addr:
diff --git a/arch/sh/kernel/entry.S b/arch/sh/kernel/entry.S
index 37d29a6c4..f2756f72c 100644
--- a/arch/sh/kernel/entry.S
+++ b/arch/sh/kernel/entry.S
@@ -36,23 +36,23 @@
* NOTE: This code uses a convention that instructions in the delay slot
* of a transfer-control instruction are indented by an extra space, thus:
*
- * jmp @$k0 ! control-transfer instruction
- * ldc $k1, $ssr ! delay slot
+ * jmp @k0 ! control-transfer instruction
+ * ldc k1, ssr ! delay slot
*
* Stack layout in 'ret_from_syscall':
* ptrace needs to have all regs on the stack.
* if the order here is changed, it needs to be
* updated in ptrace.c and ptrace.h
*
- * $r0
+ * r0
* ...
- * $r15 = stack pointer
- * $spc
- * $pr
- * $ssr
- * $gbr
- * $mach
- * $macl
+ * r15 = stack pointer
+ * spc
+ * pr
+ * ssr
+ * gbr
+ * mach
+ * macl
* syscall #
*
*/
@@ -88,16 +88,16 @@ MMU_TEA = 0xff00000c ! TLB Exception Address Register
#endif
/* Offsets to the stack */
-R0 = 0 /* Return value. New ABI also arg4 */
-R1 = 4 /* New ABI: arg5 */
-R2 = 8 /* New ABI: arg6 */
-R3 = 12 /* New ABI: syscall_nr */
-R4 = 16 /* New ABI: arg0 */
-R5 = 20 /* New ABI: arg1 */
-R6 = 24 /* New ABI: arg2 */
-R7 = 28 /* New ABI: arg3 */
-SP = (15*4)
-SR = (16*4+8)
+OFF_R0 = 0 /* Return value. New ABI also arg4 */
+OFF_R1 = 4 /* New ABI: arg5 */
+OFF_R2 = 8 /* New ABI: arg6 */
+OFF_R3 = 12 /* New ABI: syscall_nr */
+OFF_R4 = 16 /* New ABI: arg0 */
+OFF_R5 = 20 /* New ABI: arg1 */
+OFF_R6 = 24 /* New ABI: arg2 */
+OFF_R7 = 28 /* New ABI: arg3 */
+OFF_SP = (15*4)
+OFF_SR = (16*4+8)
SYSCALL_NR = (16*4+6*4)
@@ -140,117 +140,139 @@ SYSCALL_NR = (16*4+6*4)
!
#define STI() \
- mov.l __INV_IMASK, $r11; \
- stc $sr, $r10; \
- and $r11, $r10; \
- stc $k_g_imask, $r11; \
- or $r11, $r10; \
- ldc $r10, $sr
+ mov.l __INV_IMASK, r11; \
+ stc sr, r10; \
+ and r11, r10; \
+ stc k_g_imask, r11; \
+ or r11, r10; \
+ ldc r10, sr
.align 2
tlb_miss_load:
bra call_dpf
- mov #0, $r5
+ mov #0, r5
.align 2
tlb_miss_store:
bra call_dpf
- mov #1, $r5
+ mov #1, r5
.align 2
initial_page_write:
bra call_dpf
- mov #1, $r5
+ mov #1, r5
.align 2
tlb_protection_violation_load:
bra call_dpf
- mov #0, $r5
+ mov #0, r5
.align 2
tlb_protection_violation_store:
bra call_dpf
- mov #1, $r5
+ mov #1, r5
call_dpf:
- mov.l 1f, $r0
- mov $r5, $r8
- mov.l @$r0, $r6
- mov $r6, $r9
- mov.l 2f, $r0
- sts $pr, $r10
- jsr @$r0
- mov $r15, $r4
+ mov.l 1f, r0
+ mov r5, r8
+ mov.l @r0, r6
+ mov r6, r9
+ mov.l 2f, r0
+ sts pr, r10
+ jsr @r0
+ mov r15, r4
!
- tst #0xff, $r0
+ tst #0xff, r0
bf/s 0f
- lds $r10, $pr
+ lds r10, pr
rts
nop
0: STI()
- mov.l 3f, $r0
- mov $r9, $r6
- mov $r8, $r5
- jmp @$r0
- mov $r15, $r4
+ mov.l 3f, r0
+ mov r9, r6
+ mov r8, r5
+ jmp @r0
+ mov r15, r4
.align 2
1: .long MMU_TEA
2: .long SYMBOL_NAME(__do_page_fault)
3: .long SYMBOL_NAME(do_page_fault)
+ .align 2
+address_error_load:
+ bra call_dae
+ mov #0,r5 ! writeaccess = 0
+
+ .align 2
+address_error_store:
+ bra call_dae
+ mov #1,r5 ! writeaccess = 1
+
+call_dae:
+ mov.l 1f, r0
+ mov.l @r0, r6 ! address
+ mov.l 2f, r0
+ jmp @r0
+ mov r15, r4 ! regs
+
+ .align 2
+1: .long MMU_TEA
+2: .long SYMBOL_NAME(do_address_error)
+
#if defined(CONFIG_DEBUG_KERNEL_WITH_GDB_STUB) || defined(CONFIG_SH_STANDARD_BIOS)
.align 2
/* Unwind the stack and jmp to the debug entry */
debug_kernel:
- mov.l @$r15+, $r0
- mov.l @$r15+, $r1
- mov.l @$r15+, $r2
- mov.l @$r15+, $r3
- mov.l @$r15+, $r4
- mov.l @$r15+, $r5
- mov.l @$r15+, $r6
- mov.l @$r15+, $r7
- stc $sr, $r8
- mov.l 1f, $r9 ! BL =1, RB=1, IMASK=0x0F
- or $r9, $r8
- ldc $r8, $sr ! here, change the register bank
- mov.l @$r15+, $r8
- mov.l @$r15+, $r9
- mov.l @$r15+, $r10
- mov.l @$r15+, $r11
- mov.l @$r15+, $r12
- mov.l @$r15+, $r13
- mov.l @$r15+, $r14
- mov.l @$r15+, $k0
- ldc.l @$r15+, $spc
- lds.l @$r15+, $pr
- mov.l @$r15+, $k1
- ldc.l @$r15+, $gbr
- lds.l @$r15+, $mach
- lds.l @$r15+, $macl
- mov $k0, $r15
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ mov.l @r15+, r7
+ stc sr, r8
+ mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F
+ or r9, r8
+ ldc r8, sr ! here, change the register bank
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ mov.l @r15+, r10
+ mov.l @r15+, r11
+ mov.l @r15+, r12
+ mov.l @r15+, r13
+ mov.l @r15+, r14
+ mov.l @r15+, k0
+ ldc.l @r15+, spc
+ lds.l @r15+, pr
+ mov.l @r15+, k1
+ ldc.l @r15+, gbr
+ lds.l @r15+, mach
+ lds.l @r15+, macl
+ mov k0, r15
!
- mov.l 2f, $k0
- jmp @$k0
- ldc $k1, $ssr
+ mov.l 2f, k0
+ mov.l @k0, k0
+ jmp @k0
+ ldc k1, ssr
.align 2
1: .long 0x300000f0
-2: .long CONFIG_GDB_STUB_VBR + 0x100
+2: .long SYMBOL_NAME(gdb_vbr_vector)
#endif
.align 2
debug_trap:
#if defined(CONFIG_DEBUG_KERNEL_WITH_GDB_STUB) || defined(CONFIG_SH_STANDARD_BIOS)
- mov #SR, $r0
- mov.l @($r0,$r15), $r0 ! get status register
- shll $r0
- shll $r0 ! kernel space?
+ mov #OFF_SR, r0
+ mov.l @(r0,r15), r0 ! get status register
+ shll r0
+ shll r0 ! kernel space?
bt/s debug_kernel
#endif
- mov.l @$r15, $r0
- mov.l 1f, $r8
- jmp @$r8
+ mov.l @r15, r0
+ mov.l 1f, r8
+ jmp @r8
nop
.align 2
@@ -260,8 +282,8 @@ debug_trap:
error:
!
STI()
- mov.l 1f, $r0
- jmp @$r0
+ mov.l 1f, r0
+ jmp @r0
nop
.align 2
1: .long SYMBOL_NAME(do_exception_error)
@@ -272,7 +294,7 @@ error:
!
ENTRY(ret_from_fork)
bra SYMBOL_NAME(ret_from_syscall)
- add #4, $r15 ! pop down bogus r0 (see switch_to MACRO)
+ add #4, r15 ! pop down bogus r0 (see switch_to MACRO)
/*
* Old syscall interface:
@@ -305,90 +327,90 @@ ENTRY(ret_from_fork)
*/
system_call:
- mov.l __TRA, $r9
- mov.l @$r9, $r8
+ mov.l __TRA, r9
+ mov.l @r9, r8
!
! Is the trap argument >= 0x20? (TRA will be >= 0x80)
- mov #0x20, $r9
- extu.b $r9, $r9
- shll2 $r9
- cmp/hs $r9, $r8
+ mov #0x20, r9
+ extu.b r9, r9
+ shll2 r9
+ cmp/hs r9, r8
bt debug_trap
!
- mov #SYSCALL_NR, $r14
- add $r15, $r14
+ mov #SYSCALL_NR, r14
+ add r15, r14
!
#ifdef COMPAT_OLD_SYSCALL_ABI
- mov #0x40, $r9
- cmp/hs $r9, $r8
+ mov #0x40, r9
+ cmp/hs r9, r8
bf/s old_abi_system_call
nop
#endif
! New Syscall ABI
- add #-0x40, $r8
- shlr2 $r8
- shll8 $r8
- shll8 $r8 ! $r8 = num_args<<16
- mov $r3, $r10
- or $r8, $r10 ! Encode syscall # and # of arguments
- mov.l $r10, @$r14 ! set syscall_nr
+ add #-0x40, r8
+ shlr2 r8
+ shll8 r8
+ shll8 r8 ! r8 = num_args<<16
+ mov r3, r10
+ or r8, r10 ! Encode syscall # and # of arguments
+ mov.l r10, @r14 ! set syscall_nr
STI()
!
- stc $k_current, $r11
- mov.l @(tsk_ptrace,$r11), $r10 ! Is current PTRACE_SYSCALL'd?
- mov #PT_TRACESYS, $r11
- tst $r11, $r10
+ stc k_current, r11
+ mov.l @(tsk_ptrace,r11), r10 ! Is current PTRACE_SYSCALL'd?
+ mov #PT_TRACESYS, r11
+ tst r11, r10
bt 5f
! Yes it is traced.
- mov.l __syscall_trace, $r11 ! Call syscall_trace() which notifies
- jsr @$r11 ! superior (will chomp $R[0-7])
+ mov.l __syscall_trace, r11 ! Call syscall_trace() which notifies
+ jsr @r11 ! superior (will chomp R[0-7])
nop
- ! Reload $R0-$R4 from kernel stack, where the
+ ! Reload R0-R4 from kernel stack, where the
! parent may have modified them using
- ! ptrace(POKEUSR). (Note that $R0-$R2 are
+ ! ptrace(POKEUSR). (Note that R0-R2 are
! used by the system call handler directly
! from the kernel stack anyway, so don't need
! to be reloaded here.) This allows the parent
! to rewrite system calls and args on the fly.
- mov.l @(R4,$r15), $r4 ! arg0
- mov.l @(R5,$r15), $r5
- mov.l @(R6,$r15), $r6
- mov.l @(R7,$r15), $r7 ! arg3
- mov.l @(R3,$r15), $r3 ! syscall_nr
+ mov.l @(OFF_R4,r15), r4 ! arg0
+ mov.l @(OFF_R5,r15), r5
+ mov.l @(OFF_R6,r15), r6
+ mov.l @(OFF_R7,r15), r7 ! arg3
+ mov.l @(OFF_R3,r15), r3 ! syscall_nr
! Arrange for syscall_trace() to be called
! again as the system call returns.
- mov.l __syscall_ret_trace, $r10
+ mov.l __syscall_ret_trace, r10
bra 6f
- lds $r10, $pr
+ lds r10, pr
! No it isn't traced.
! Arrange for normal system call return.
-5: mov.l __syscall_ret, $r10
- lds $r10, $pr
+5: mov.l __syscall_ret, r10
+ lds r10, pr
! Call the system call handler through the table.
! (both normal and ptrace'd)
! First check for bad syscall number
-6: mov $r3, $r9
- mov.l __n_sys, $r10
- cmp/hs $r10, $r9
+6: mov r3, r9
+ mov.l __n_sys, r10
+ cmp/hs r10, r9
bf 2f
! Bad syscall number
rts ! go to syscall_ret or syscall_ret_trace
- mov #-ENOSYS, $r0
+ mov #-ENOSYS, r0
! Good syscall number
-2: shll2 $r9 ! x4
- mov.l __sct, $r11
- add $r11, $r9
- mov.l @$r9, $r11
- jmp @$r11 ! jump to specific syscall handler
+2: shll2 r9 ! x4
+ mov.l __sct, r11
+ add r11, r9
+ mov.l @r9, r11
+ jmp @r11 ! jump to specific syscall handler
nop
! In case of trace
syscall_ret_trace:
- mov.l $r0, @(R0,$r15) ! save the return value
- mov.l __syscall_trace, $r1
- mova SYMBOL_NAME(ret_from_syscall), $r0
- jmp @$r1 ! Call syscall_trace() which notifies superior
- lds $r0, $pr ! Then return to ret_from_syscall()
+ mov.l r0, @(OFF_R0,r15) ! save the return value
+ mov.l __syscall_trace, r1
+ mova SYMBOL_NAME(ret_from_syscall), r0
+ jmp @r1 ! Call syscall_trace() which notifies superior
+ lds r0, pr ! Then return to ret_from_syscall()
@@ -396,41 +418,41 @@ syscall_ret_trace:
! Handle old ABI system call.
! Note that ptrace(SYSCALL) is not supported for the old ABI.
! At this point:
-! $r0, $r4-7 as per ABI
-! $r8 = value of TRA register (= num_args<<2)
-! $r14 = points to SYSCALL_NR in stack frame
+! r0, r4-7 as per ABI
+! r8 = value of TRA register (= num_args<<2)
+! r14 = points to SYSCALL_NR in stack frame
old_abi_system_call:
- mov $r0, $r9 ! Save system call number in $r9
+ mov r0, r9 ! Save system call number in r9
! ! arrange for return which pops stack
- mov.l __old_abi_syscall_ret, $r10
- lds $r10, $pr
+ mov.l __old_abi_syscall_ret, r10
+ lds r10, pr
! Build the stack frame if TRA > 0
- mov $r8, $r10
- cmp/pl $r10
+ mov r8, r10
+ cmp/pl r10
bf 0f
- mov.l @(SP,$r15), $r0 ! get original user stack
-7: add #-4, $r10
-4: mov.l @($r0,$r10), $r1 ! May cause address error exception..
- mov.l $r1, @-$r15
- cmp/pl $r10
+ mov.l @(OFF_SP,r15), r0 ! get original user stack
+7: add #-4, r10
+4: mov.l @(r0,r10), r1 ! May cause address error exception..
+ mov.l r1, @-r15
+ cmp/pl r10
bt 7b
0:
- mov.l $r9, @$r14 ! set syscall_nr
+ mov.l r9, @r14 ! set syscall_nr
STI()
! Call the system call handler through the table.
! First check for bad syscall number
- mov.l __n_sys, $r10
- cmp/hs $r10, $r9
+ mov.l __n_sys, r10
+ cmp/hs r10, r9
bf 2f
! Bad syscall number
rts ! return to old_abi_syscall_ret
- mov #-ENOSYS, $r0
+ mov #-ENOSYS, r0
! Good syscall number
-2: shll2 $r9 ! x4
- mov.l __sct, $r11
- add $r11, $r9
- mov.l @$r9, $r11
- jmp @$r11 ! call specific syscall handler,
+2: shll2 r9 ! x4
+ mov.l __sct, r11
+ add r11, r9
+ mov.l @r9, r11
+ jmp @r11 ! call specific syscall handler,
nop
.align 2
@@ -440,16 +462,16 @@ __old_abi_syscall_ret:
! This code gets called on address error exception when copying
! syscall arguments from user stack to kernel stack. It is
! supposed to return -EINVAL through old_abi_syscall_ret, but it
- ! appears to have been broken for a long time in that the $r0
- ! return value will be saved into the kernel stack relative to $r15
- ! but the value of $r15 is not correct partway through the loop.
- ! So the user prog is returned its old $r0 value, not -EINVAL.
+ ! appears to have been broken for a long time in that the r0
+ ! return value will be saved into the kernel stack relative to r15
+ ! but the value of r15 is not correct partway through the loop.
+ ! So the user prog is returned its old r0 value, not -EINVAL.
! Greg Banks 28 Aug 2000.
.section .fixup,"ax"
fixup_syscall_argerr:
- ! First get $r15 back to
+ ! First get r15 back to
rts
- mov #-EINVAL, $r0
+ mov #-EINVAL, r0
.previous
.section __ex_table, "a"
@@ -473,18 +495,18 @@ __syscall_ret:
.align 2
reschedule:
- mova SYMBOL_NAME(ret_from_syscall), $r0
- mov.l 1f, $r1
- jmp @$r1
- lds $r0, $pr
+ mova SYMBOL_NAME(ret_from_syscall), r0
+ mov.l 1f, r1
+ jmp @r1
+ lds r0, pr
.align 2
1: .long SYMBOL_NAME(schedule)
ENTRY(ret_from_irq)
- mov #SR, $r0
- mov.l @($r0,$r15), $r0 ! get status register
- shll $r0
- shll $r0 ! kernel space?
+ mov #OFF_SR, r0
+ mov.l @(r0,r15), r0 ! get status register
+ shll r0
+ shll r0 ! kernel space?
bt restore_all ! Yes, it's from kernel, go back soon
!
STI()
@@ -492,10 +514,10 @@ ENTRY(ret_from_irq)
nop
ENTRY(ret_from_exception)
- mov #SR, $r0
- mov.l @($r0,$r15), $r0 ! get status register
- shll $r0
- shll $r0 ! kernel space?
+ mov #OFF_SR, r0
+ mov.l @(r0,r15), r0 ! get status register
+ shll r0
+ shll r0 ! kernel space?
bt restore_all ! Yes, it's from kernel, go back soon
!
STI()
@@ -508,38 +530,38 @@ __INV_IMASK:
.align 2
#ifdef COMPAT_OLD_SYSCALL_ABI
old_abi_syscall_ret:
- add $r8, $r15 ! pop off the arguments
+ add r8, r15 ! pop off the arguments
/* fall through */
#endif
syscall_ret:
- mov.l $r0, @(R0,$r15) ! save the return value
+ mov.l r0, @(OFF_R0,r15) ! save the return value
/* fall through */
ENTRY(ret_from_syscall)
- mov.l __irq_stat, $r0 ! softirq_active
- mov.l @$r0, $r1
- mov.l @(4,$r0), $r2 ! softirq_mask
- tst $r2, $r1
+ mov.l __irq_stat, r0 ! softirq_active
+ mov.l @r0, r1
+ mov.l @(4,r0), r2 ! softirq_mask
+ tst r2, r1
bt ret_with_reschedule
handle_softirq:
- mov.l __do_softirq, $r0
- jsr @$r0
+ mov.l __do_softirq, r0
+ jsr @r0
nop
ret_with_reschedule:
- stc $k_current, $r1
- mov.l @(need_resched,$r1), $r0
- tst #0xff, $r0
+ stc k_current, r1
+ mov.l @(need_resched,r1), r0
+ tst #0xff, r0
bf reschedule
- mov.l @(sigpending,$r1), $r0
- tst #0xff, $r0
+ mov.l @(sigpending,r1), r0
+ tst #0xff, r0
bt restore_all
signal_return:
- mov $r15, $r4
- mov #0, $r5
- mov.l __do_signal, $r1
- mova restore_all, $r0
- jmp @$r1
- lds $r0, $pr
+ mov r15, r4
+ mov #0, r5
+ mov.l __do_signal, r1
+ mova restore_all, r0
+ jmp @r1
+ lds r0, pr
.align 2
__do_signal:
.long SYMBOL_NAME(do_signal)
@@ -551,108 +573,108 @@ __do_softirq:
.align 2
restore_all:
#if defined(__SH4__)
- mov.l __fpu_prepare_fd, $r0
- jsr @$r0
- stc $sr, $r4
+ mov.l __fpu_prepare_fd, r0
+ jsr @r0
+ stc sr, r4
#endif
!
- mov.l @$r15+, $r0
- mov.l @$r15+, $r1
- mov.l @$r15+, $r2
- mov.l @$r15+, $r3
- mov.l @$r15+, $r4
- mov.l @$r15+, $r5
- mov.l @$r15+, $r6
- mov.l @$r15+, $r7
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ mov.l @r15+, r7
!
- stc $sr, $r8
- mov.l __blrb_flags, $r9 ! BL =1, RB=1
- or $r9, $r8
- ldc $r8, $sr ! here, change the register bank
+ stc sr, r8
+ mov.l __blrb_flags, r9 ! BL =1, RB=1
+ or r9, r8
+ ldc r8, sr ! here, change the register bank
!
- mov.l @$r15+, $r8
- mov.l @$r15+, $r9
- mov.l @$r15+, $r10
- mov.l @$r15+, $r11
- mov.l @$r15+, $r12
- mov.l @$r15+, $r13
- mov.l @$r15+, $r14
- mov.l @$r15+, $k4 ! original stack pointer
- ldc.l @$r15+, $spc
- lds.l @$r15+, $pr
- mov.l @$r15+, $k3 ! original SR
- ldc.l @$r15+, $gbr
- lds.l @$r15+, $mach
- lds.l @$r15+, $macl
- add #4, $r15 ! Skip syscall number
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ mov.l @r15+, r10
+ mov.l @r15+, r11
+ mov.l @r15+, r12
+ mov.l @r15+, r13
+ mov.l @r15+, r14
+ mov.l @r15+, k4 ! original stack pointer
+ ldc.l @r15+, spc
+ lds.l @r15+, pr
+ mov.l @r15+, k3 ! original SR
+ ldc.l @r15+, gbr
+ lds.l @r15+, mach
+ lds.l @r15+, macl
+ add #4, r15 ! Skip syscall number
!
! Calculate new SR value
- mov $k3, $k2 ! original SR value
- mov.l 1f, $k1
- stc $sr, $k0
- and $k1, $k0 ! Get current FD-bit
- mov.l 2f, $k1
- and $k1, $k2 ! Mask orignal SR value
- or $k0, $k2 ! Inherit current FD-bit
+ mov k3, k2 ! original SR value
+ mov.l 1f, k1
+ stc sr, k0
+ and k1, k0 ! Get current FD-bit
+ mov.l 2f, k1
+ and k1, k2 ! Mask orignal SR value
+ or k0, k2 ! Inherit current FD-bit
!
- mov $k3, $k0 ! Calculate IMASK-bits
- shlr2 $k0
- and #0x3c, $k0
- cmp/eq #0x3c, $k0
+ mov k3, k0 ! Calculate IMASK-bits
+ shlr2 k0
+ and #0x3c, k0
+ cmp/eq #0x3c, k0
bt/s 7f
- shll2 $k0
- mov $g_imask, $k0
+ shll2 k0
+ mov g_imask, k0
!
-7: or $k0, $k2 ! Set the IMASK-bits
- ldc $k2, $ssr
+7: or k0, k2 ! Set the IMASK-bits
+ ldc k2, ssr
!
#if defined(__SH4__)
- shll $k2
- shll $k2
+ shll k2
+ shll k2
bf 9f ! user mode
/* Kernel to kernel transition */
- mov.l 1f, $k1
- tst $k1, $k3
+ mov.l 1f, k1
+ tst k1, k3
bf 9f ! it hadn't FPU
! Kernel to kernel and FPU was used
! There's the case we don't get FPU now
- stc $sr, $k2
- tst $k1, $k2
+ stc sr, k2
+ tst k1, k2
bt 8f
! We need to grab FPU here
- xor $k1, $k2
- ldc $k2, $sr ! Grab FPU
- mov.l __init_task_flags, $k1
- mov.l @$k1, $k2
- mov.l __PF_USEDFPU, $k0
- or $k0, $k2
- mov.l $k2, @$k1 ! Set init_task.flags |= PF_USEDFPU
+ xor k1, k2
+ ldc k2, sr ! Grab FPU
+ mov.l __init_task_flags, k1
+ mov.l @k1, k2
+ mov.l __PF_USEDFPU, k0
+ or k0, k2
+ mov.l k2, @k1 ! Set init_task.flags |= PF_USEDFPU
!
! Restoring FPU...
!
-8: mov.l 3f, $k1
- lds $k1, $fpscr
- fmov.s @$r15+, $fr0
- fmov.s @$r15+, $fr1
- fmov.s @$r15+, $fr2
- fmov.s @$r15+, $fr3
- fmov.s @$r15+, $fr4
- fmov.s @$r15+, $fr5
- fmov.s @$r15+, $fr6
- fmov.s @$r15+, $fr7
- fmov.s @$r15+, $fr8
- fmov.s @$r15+, $fr9
- fmov.s @$r15+, $fr10
- fmov.s @$r15+, $fr11
- fmov.s @$r15+, $fr12
- fmov.s @$r15+, $fr13
- fmov.s @$r15+, $fr14
- fmov.s @$r15+, $fr15
- lds.l @$r15+, $fpscr
- lds.l @$r15+, $fpul
+8: mov.l 3f, k1
+ lds k1, fpscr
+ fmov.s @r15+, fr0
+ fmov.s @r15+, fr1
+ fmov.s @r15+, fr2
+ fmov.s @r15+, fr3
+ fmov.s @r15+, fr4
+ fmov.s @r15+, fr5
+ fmov.s @r15+, fr6
+ fmov.s @r15+, fr7
+ fmov.s @r15+, fr8
+ fmov.s @r15+, fr9
+ fmov.s @r15+, fr10
+ fmov.s @r15+, fr11
+ fmov.s @r15+, fr12
+ fmov.s @r15+, fr13
+ fmov.s @r15+, fr14
+ fmov.s @r15+, fr15
+ lds.l @r15+, fpscr
+ lds.l @r15+, fpul
9:
#endif
- mov $k4, $r15
+ mov k4, r15
rte
nop
@@ -680,10 +702,10 @@ ENTRY(vbr_base)
!
.balign 256,0,256
general_exception:
- mov.l 1f, $k2
- mov.l 2f, $k3
+ mov.l 1f, k2
+ mov.l 2f, k3
bra handle_exception
- mov.l @$k2, $k2
+ mov.l @k2, k2
.align 2
2: .long SYMBOL_NAME(ret_from_exception)
1: .long EXPEVT
@@ -691,17 +713,17 @@ general_exception:
!
.balign 1024,0,1024
tlb_miss:
- mov.l 1f, $k2
- mov.l 4f, $k3
+ mov.l 1f, k2
+ mov.l 4f, k3
bra handle_exception
- mov.l @$k2, $k2
+ mov.l @k2, k2
!
.balign 512,0,512
interrupt:
- mov.l 2f, $k2
- mov.l 3f, $k3
+ mov.l 2f, k2
+ mov.l 3f, k3
bra handle_exception
- mov.l @$k2, $k2
+ mov.l @k2, k2
.align 2
1: .long EXPEVT
@@ -715,102 +737,102 @@ handle_exception:
! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
! save all registers onto stack.
!
- stc $ssr, $k0 ! from kernel space?
- shll $k0 ! Check MD bit (bit30) by shifting it into the T bit
- shll $k0
+ stc ssr, k0 ! from kernel space?
+ shll k0 ! Check MD bit (bit30) by shifting it into the T bit
+ shll k0
#if defined(__SH4__)
bf/s 8f ! it's from user to kernel transition
- mov $r15, $k0 ! save original stack to k0
+ mov r15, k0 ! save original stack to k0
/* It's a kernel to kernel transition. */
/* Is the FPU disabled? */
- mov.l 2f, $k1
- stc $ssr, $k0
- tst $k1, $k0
- mov.l 4f, $k1
+ mov.l 2f, k1
+ stc ssr, k0
+ tst k1, k0
+ mov.l 4f, k1
bf/s 9f ! FPU is not enabled, no need to save it
- mov $r15, $k0 ! save original stack to k0
+ mov r15, k0 ! save original stack to k0
! FPU is enabled, save it
! /* XXX: Need to save another bank of FPU if all FPU feature is used */
! /* Currently it's not the case for GCC (only udivsi3_i4, divsi3_i4) */
- sts.l $fpul, @-$r15
- sts.l $fpscr, @-$r15
- mov.l 6f, $k1
- lds $k1, $fpscr
- mov.l 3f, $k1
- fmov.s $fr15, @-$r15
- fmov.s $fr14, @-$r15
- fmov.s $fr13, @-$r15
- fmov.s $fr12, @-$r15
- fmov.s $fr11, @-$r15
- fmov.s $fr10, @-$r15
- fmov.s $fr9, @-$r15
- fmov.s $fr8, @-$r15
- fmov.s $fr7, @-$r15
- fmov.s $fr6, @-$r15
- fmov.s $fr5, @-$r15
- fmov.s $fr4, @-$r15
- fmov.s $fr3, @-$r15
- fmov.s $fr2, @-$r15
- fmov.s $fr1, @-$r15
+ sts.l fpul, @-r15
+ sts.l fpscr, @-r15
+ mov.l 6f, k1
+ lds k1, fpscr
+ mov.l 3f, k1
+ fmov.s fr15, @-r15
+ fmov.s fr14, @-r15
+ fmov.s fr13, @-r15
+ fmov.s fr12, @-r15
+ fmov.s fr11, @-r15
+ fmov.s fr10, @-r15
+ fmov.s fr9, @-r15
+ fmov.s fr8, @-r15
+ fmov.s fr7, @-r15
+ fmov.s fr6, @-r15
+ fmov.s fr5, @-r15
+ fmov.s fr4, @-r15
+ fmov.s fr3, @-r15
+ fmov.s fr2, @-r15
+ fmov.s fr1, @-r15
bra 9f
- fmov.s $fr0, @-$r15
+ fmov.s fr0, @-r15
#else
- mov.l 3f, $k1
+ mov.l 3f, k1
bt/s 9f ! it's a kernel to kernel transition, and skip the FPU save.
- mov $r15, $k0 ! save original stack to k0 anyway
+ mov r15, k0 ! save original stack to k0 anyway
#endif
8: /* User space to kernel */
- mov #0x20, $k1
- shll8 $k1 ! $k1 <= 8192 == THREAD_SIZE
- add $current, $k1
- mov $k1, $r15 ! change to kernel stack
+ mov #0x20, k1
+ shll8 k1 ! k1 <= 8192 == THREAD_SIZE
+ add current, k1
+ mov k1, r15 ! change to kernel stack
!
- mov.l 4f, $k1 ! let kernel release FPU
+ mov.l 4f, k1 ! let kernel release FPU
9: ! Save the user registers on the stack.
! At this point, k1 should have been set to the new SR value
- mov #-1, $k4
- mov.l $k4, @-$r15 ! syscall_nr (default: -1)
+ mov #-1, k4
+ mov.l k4, @-r15 ! syscall_nr (default: -1)
!
- sts.l $macl, @-$r15
- sts.l $mach, @-$r15
- stc.l $gbr, @-$r15
- stc.l $ssr, @-$r15
- sts.l $pr, @-$r15
- stc.l $spc, @-$r15
+ sts.l macl, @-r15
+ sts.l mach, @-r15
+ stc.l gbr, @-r15
+ stc.l ssr, @-r15
+ sts.l pr, @-r15
+ stc.l spc, @-r15
!
- lds $k3, $pr ! Set the return address to pr
+ lds k3, pr ! Set the return address to pr
!
- mov.l $k0, @-$r15 ! save orignal stack
- mov.l $r14, @-$r15
- mov.l $r13, @-$r15
- mov.l $r12, @-$r15
- mov.l $r11, @-$r15
- mov.l $r10, @-$r15
- mov.l $r9, @-$r15
- mov.l $r8, @-$r15
+ mov.l k0, @-r15 ! save orignal stack
+ mov.l r14, @-r15
+ mov.l r13, @-r15
+ mov.l r12, @-r15
+ mov.l r11, @-r15
+ mov.l r10, @-r15
+ mov.l r9, @-r15
+ mov.l r8, @-r15
!
- stc $sr, $r8 ! Back to normal register bank, and
- or $k1, $r8 ! Block all interrupts, may release FPU
- mov.l 5f, $k1
- and $k1, $r8 ! ...
- ldc $r8, $sr ! ...changed here.
+ stc sr, r8 ! Back to normal register bank, and
+ or k1, r8 ! Block all interrupts, may release FPU
+ mov.l 5f, k1
+ and k1, r8 ! ...
+ ldc r8, sr ! ...changed here.
!
- mov.l $r7, @-$r15
- mov.l $r6, @-$r15
- mov.l $r5, @-$r15
- mov.l $r4, @-$r15
- mov.l $r3, @-$r15
- mov.l $r2, @-$r15
- mov.l $r1, @-$r15
- mov.l $r0, @-$r15
+ mov.l r7, @-r15
+ mov.l r6, @-r15
+ mov.l r5, @-r15
+ mov.l r4, @-r15
+ mov.l r3, @-r15
+ mov.l r2, @-r15
+ mov.l r1, @-r15
+ mov.l r0, @-r15
! Then, dispatch to the handler, according to the exception code.
- stc $k_ex_code, $r8
- shlr2 $r8
- shlr $r8
- mov.l 1f, $r9
- add $r8, $r9
- mov.l @$r9, $r9
- jmp @$r9
+ stc k_ex_code, r8
+ shlr2 r8
+ shlr r8
+ mov.l 1f, r9
+ add r8, r9
+ mov.l @r9, r9
+ jmp @r9
nop
.align 2
1: .long SYMBOL_NAME(exception_handling_table)
@@ -833,8 +855,8 @@ ENTRY(exception_handling_table)
.long initial_page_write
.long tlb_protection_violation_load
.long tlb_protection_violation_store
- .long error ! address_error_load (filled by trap_init)
- .long error ! address_error_store (filled by trap_init)
+ .long address_error_load
+ .long address_error_store
#if defined(__SH4__)
.long SYMBOL_NAME(do_fpu_error)
#else
diff --git a/arch/sh/kernel/fpu.c b/arch/sh/kernel/fpu.c
index cbeb60d31..9036b20c0 100644
--- a/arch/sh/kernel/fpu.c
+++ b/arch/sh/kernel/fpu.c
@@ -21,43 +21,43 @@
void
save_fpu(struct task_struct *tsk)
{
- asm volatile("sts.l $fpul, @-%0\n\t"
- "sts.l $fpscr, @-%0\n\t"
- "lds %1, $fpscr\n\t"
+ asm volatile("sts.l fpul, @-%0\n\t"
+ "sts.l fpscr, @-%0\n\t"
+ "lds %1, fpscr\n\t"
"frchg\n\t"
- "fmov.s $fr15, @-%0\n\t"
- "fmov.s $fr14, @-%0\n\t"
- "fmov.s $fr13, @-%0\n\t"
- "fmov.s $fr12, @-%0\n\t"
- "fmov.s $fr11, @-%0\n\t"
- "fmov.s $fr10, @-%0\n\t"
- "fmov.s $fr9, @-%0\n\t"
- "fmov.s $fr8, @-%0\n\t"
- "fmov.s $fr7, @-%0\n\t"
- "fmov.s $fr6, @-%0\n\t"
- "fmov.s $fr5, @-%0\n\t"
- "fmov.s $fr4, @-%0\n\t"
- "fmov.s $fr3, @-%0\n\t"
- "fmov.s $fr2, @-%0\n\t"
- "fmov.s $fr1, @-%0\n\t"
- "fmov.s $fr0, @-%0\n\t"
+ "fmov.s fr15, @-%0\n\t"
+ "fmov.s fr14, @-%0\n\t"
+ "fmov.s fr13, @-%0\n\t"
+ "fmov.s fr12, @-%0\n\t"
+ "fmov.s fr11, @-%0\n\t"
+ "fmov.s fr10, @-%0\n\t"
+ "fmov.s fr9, @-%0\n\t"
+ "fmov.s fr8, @-%0\n\t"
+ "fmov.s fr7, @-%0\n\t"
+ "fmov.s fr6, @-%0\n\t"
+ "fmov.s fr5, @-%0\n\t"
+ "fmov.s fr4, @-%0\n\t"
+ "fmov.s fr3, @-%0\n\t"
+ "fmov.s fr2, @-%0\n\t"
+ "fmov.s fr1, @-%0\n\t"
+ "fmov.s fr0, @-%0\n\t"
"frchg\n\t"
- "fmov.s $fr15, @-%0\n\t"
- "fmov.s $fr14, @-%0\n\t"
- "fmov.s $fr13, @-%0\n\t"
- "fmov.s $fr12, @-%0\n\t"
- "fmov.s $fr11, @-%0\n\t"
- "fmov.s $fr10, @-%0\n\t"
- "fmov.s $fr9, @-%0\n\t"
- "fmov.s $fr8, @-%0\n\t"
- "fmov.s $fr7, @-%0\n\t"
- "fmov.s $fr6, @-%0\n\t"
- "fmov.s $fr5, @-%0\n\t"
- "fmov.s $fr4, @-%0\n\t"
- "fmov.s $fr3, @-%0\n\t"
- "fmov.s $fr2, @-%0\n\t"
- "fmov.s $fr1, @-%0\n\t"
- "fmov.s $fr0, @-%0"
+ "fmov.s fr15, @-%0\n\t"
+ "fmov.s fr14, @-%0\n\t"
+ "fmov.s fr13, @-%0\n\t"
+ "fmov.s fr12, @-%0\n\t"
+ "fmov.s fr11, @-%0\n\t"
+ "fmov.s fr10, @-%0\n\t"
+ "fmov.s fr9, @-%0\n\t"
+ "fmov.s fr8, @-%0\n\t"
+ "fmov.s fr7, @-%0\n\t"
+ "fmov.s fr6, @-%0\n\t"
+ "fmov.s fr5, @-%0\n\t"
+ "fmov.s fr4, @-%0\n\t"
+ "fmov.s fr3, @-%0\n\t"
+ "fmov.s fr2, @-%0\n\t"
+ "fmov.s fr1, @-%0\n\t"
+ "fmov.s fr0, @-%0"
: /* no output */
: "r" ((char *)(&tsk->thread.fpu.hard.status)),
"r" (FPSCR_INIT)
@@ -70,43 +70,43 @@ save_fpu(struct task_struct *tsk)
static void
restore_fpu(struct task_struct *tsk)
{
- asm volatile("lds %1, $fpscr\n\t"
- "fmov.s @%0+, $fr0\n\t"
- "fmov.s @%0+, $fr1\n\t"
- "fmov.s @%0+, $fr2\n\t"
- "fmov.s @%0+, $fr3\n\t"
- "fmov.s @%0+, $fr4\n\t"
- "fmov.s @%0+, $fr5\n\t"
- "fmov.s @%0+, $fr6\n\t"
- "fmov.s @%0+, $fr7\n\t"
- "fmov.s @%0+, $fr8\n\t"
- "fmov.s @%0+, $fr9\n\t"
- "fmov.s @%0+, $fr10\n\t"
- "fmov.s @%0+, $fr11\n\t"
- "fmov.s @%0+, $fr12\n\t"
- "fmov.s @%0+, $fr13\n\t"
- "fmov.s @%0+, $fr14\n\t"
- "fmov.s @%0+, $fr15\n\t"
+ asm volatile("lds %1, fpscr\n\t"
+ "fmov.s @%0+, fr0\n\t"
+ "fmov.s @%0+, fr1\n\t"
+ "fmov.s @%0+, fr2\n\t"
+ "fmov.s @%0+, fr3\n\t"
+ "fmov.s @%0+, fr4\n\t"
+ "fmov.s @%0+, fr5\n\t"
+ "fmov.s @%0+, fr6\n\t"
+ "fmov.s @%0+, fr7\n\t"
+ "fmov.s @%0+, fr8\n\t"
+ "fmov.s @%0+, fr9\n\t"
+ "fmov.s @%0+, fr10\n\t"
+ "fmov.s @%0+, fr11\n\t"
+ "fmov.s @%0+, fr12\n\t"
+ "fmov.s @%0+, fr13\n\t"
+ "fmov.s @%0+, fr14\n\t"
+ "fmov.s @%0+, fr15\n\t"
"frchg\n\t"
- "fmov.s @%0+, $fr0\n\t"
- "fmov.s @%0+, $fr1\n\t"
- "fmov.s @%0+, $fr2\n\t"
- "fmov.s @%0+, $fr3\n\t"
- "fmov.s @%0+, $fr4\n\t"
- "fmov.s @%0+, $fr5\n\t"
- "fmov.s @%0+, $fr6\n\t"
- "fmov.s @%0+, $fr7\n\t"
- "fmov.s @%0+, $fr8\n\t"
- "fmov.s @%0+, $fr9\n\t"
- "fmov.s @%0+, $fr10\n\t"
- "fmov.s @%0+, $fr11\n\t"
- "fmov.s @%0+, $fr12\n\t"
- "fmov.s @%0+, $fr13\n\t"
- "fmov.s @%0+, $fr14\n\t"
- "fmov.s @%0+, $fr15\n\t"
+ "fmov.s @%0+, fr0\n\t"
+ "fmov.s @%0+, fr1\n\t"
+ "fmov.s @%0+, fr2\n\t"
+ "fmov.s @%0+, fr3\n\t"
+ "fmov.s @%0+, fr4\n\t"
+ "fmov.s @%0+, fr5\n\t"
+ "fmov.s @%0+, fr6\n\t"
+ "fmov.s @%0+, fr7\n\t"
+ "fmov.s @%0+, fr8\n\t"
+ "fmov.s @%0+, fr9\n\t"
+ "fmov.s @%0+, fr10\n\t"
+ "fmov.s @%0+, fr11\n\t"
+ "fmov.s @%0+, fr12\n\t"
+ "fmov.s @%0+, fr13\n\t"
+ "fmov.s @%0+, fr14\n\t"
+ "fmov.s @%0+, fr15\n\t"
"frchg\n\t"
- "lds.l @%0+, $fpscr\n\t"
- "lds.l @%0+, $fpul\n\t"
+ "lds.l @%0+, fpscr\n\t"
+ "lds.l @%0+, fpul\n\t"
: /* no output */
: "r" (&tsk->thread.fpu), "r" (FPSCR_INIT)
: "memory");
@@ -120,41 +120,41 @@ restore_fpu(struct task_struct *tsk)
void fpu_init(void)
{
- asm volatile("lds %0, $fpul\n\t"
- "lds %1, $fpscr\n\t"
- "fsts $fpul, $fr0\n\t"
- "fsts $fpul, $fr1\n\t"
- "fsts $fpul, $fr2\n\t"
- "fsts $fpul, $fr3\n\t"
- "fsts $fpul, $fr4\n\t"
- "fsts $fpul, $fr5\n\t"
- "fsts $fpul, $fr6\n\t"
- "fsts $fpul, $fr7\n\t"
- "fsts $fpul, $fr8\n\t"
- "fsts $fpul, $fr9\n\t"
- "fsts $fpul, $fr10\n\t"
- "fsts $fpul, $fr11\n\t"
- "fsts $fpul, $fr12\n\t"
- "fsts $fpul, $fr13\n\t"
- "fsts $fpul, $fr14\n\t"
- "fsts $fpul, $fr15\n\t"
+ asm volatile("lds %0, fpul\n\t"
+ "lds %1, fpscr\n\t"
+ "fsts fpul, fr0\n\t"
+ "fsts fpul, fr1\n\t"
+ "fsts fpul, fr2\n\t"
+ "fsts fpul, fr3\n\t"
+ "fsts fpul, fr4\n\t"
+ "fsts fpul, fr5\n\t"
+ "fsts fpul, fr6\n\t"
+ "fsts fpul, fr7\n\t"
+ "fsts fpul, fr8\n\t"
+ "fsts fpul, fr9\n\t"
+ "fsts fpul, fr10\n\t"
+ "fsts fpul, fr11\n\t"
+ "fsts fpul, fr12\n\t"
+ "fsts fpul, fr13\n\t"
+ "fsts fpul, fr14\n\t"
+ "fsts fpul, fr15\n\t"
"frchg\n\t"
- "fsts $fpul, $fr0\n\t"
- "fsts $fpul, $fr1\n\t"
- "fsts $fpul, $fr2\n\t"
- "fsts $fpul, $fr3\n\t"
- "fsts $fpul, $fr4\n\t"
- "fsts $fpul, $fr5\n\t"
- "fsts $fpul, $fr6\n\t"
- "fsts $fpul, $fr7\n\t"
- "fsts $fpul, $fr8\n\t"
- "fsts $fpul, $fr9\n\t"
- "fsts $fpul, $fr10\n\t"
- "fsts $fpul, $fr11\n\t"
- "fsts $fpul, $fr12\n\t"
- "fsts $fpul, $fr13\n\t"
- "fsts $fpul, $fr14\n\t"
- "fsts $fpul, $fr15\n\t"
+ "fsts fpul, fr0\n\t"
+ "fsts fpul, fr1\n\t"
+ "fsts fpul, fr2\n\t"
+ "fsts fpul, fr3\n\t"
+ "fsts fpul, fr4\n\t"
+ "fsts fpul, fr5\n\t"
+ "fsts fpul, fr6\n\t"
+ "fsts fpul, fr7\n\t"
+ "fsts fpul, fr8\n\t"
+ "fsts fpul, fr9\n\t"
+ "fsts fpul, fr10\n\t"
+ "fsts fpul, fr11\n\t"
+ "fsts fpul, fr12\n\t"
+ "fsts fpul, fr13\n\t"
+ "fsts fpul, fr14\n\t"
+ "fsts fpul, fr15\n\t"
"frchg"
: /* no output */
: "r" (0), "r" (FPSCR_INIT));
@@ -192,9 +192,9 @@ do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
*
* There's race condition in __cli:
*
- * (1) $SR --> register
+ * (1) SR --> register
* (2) Set IMASK of register
- * (3) $SR <-- register
+ * (3) SR <-- register
*
* Between (1) and (2), or (2) and (3) getting
* interrupt, and interrupt handler (or
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
index f1ac6fd17..73c13dc7d 100644
--- a/arch/sh/kernel/head.S
+++ b/arch/sh/kernel/head.S
@@ -37,40 +37,40 @@ ENTRY(empty_zero_page)
*/
ENTRY(_stext)
! Initialize Status Register
- mov.l 1f, $r0 ! MD=1, RB=0, BL=0, IMASK=0xF
- ldc $r0, $sr
+ mov.l 1f, r0 ! MD=1, RB=0, BL=0, IMASK=0xF
+ ldc r0, sr
! Initialize global interrupt mask
- mov #0, $r0
- ldc $r0, $r6_bank
+ mov #0, r0
+ ldc r0, r6_bank
!
- mov.l 2f, $r0
- mov $r0, $r15 ! Set initial r15 (stack pointer)
- mov #0x20, $r1 !
- shll8 $r1 ! $r1 = 8192
- sub $r1, $r0 !
- ldc $r0, $r7_bank ! ... and init_task
+ mov.l 2f, r0
+ mov r0, r15 ! Set initial r15 (stack pointer)
+ mov #0x20, r1 !
+ shll8 r1 ! r1 = 8192
+ sub r1, r0 !
+ ldc r0, r7_bank ! ... and init_task
!
#if defined(__SH4__)
! Initialize fpu
- mov.l 7f, $r0
- jsr @$r0
+ mov.l 7f, r0
+ jsr @r0
nop
#endif
! Enable cache
- mov.l 6f, $r0
- jsr @$r0
+ mov.l 6f, r0
+ jsr @r0
nop
! Clear BSS area
- mov.l 3f, $r1
- add #4, $r1
- mov.l 4f, $r2
- mov #0, $r0
-9: cmp/hs $r2, $r1
+ mov.l 3f, r1
+ add #4, r1
+ mov.l 4f, r2
+ mov #0, r0
+9: cmp/hs r2, r1
bf/s 9b ! while (r1 < r2)
- mov.l $r0,@-$r2
+ mov.l r0,@-r2
! Start kernel
- mov.l 5f, $r0
- jmp @$r0
+ mov.l 5f, r0
+ jmp @r0
nop
.balign 4
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 6451c4c9e..200148320 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -235,7 +235,7 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
unsigned int status;
/* Get IRQ number */
- asm volatile("stc $r2_bank, %0\n\t"
+ asm volatile("stc r2_bank, %0\n\t"
"shlr2 %0\n\t"
"shlr2 %0\n\t"
"shlr %0\n\t"
diff --git a/arch/sh/kernel/irq_imask.c b/arch/sh/kernel/irq_imask.c
index 27d91b372..d0b3aea9f 100644
--- a/arch/sh/kernel/irq_imask.c
+++ b/arch/sh/kernel/irq_imask.c
@@ -59,16 +59,16 @@ void static inline set_interrupt_registers(int ip)
{
unsigned long __dummy;
- asm volatile("ldc %2, $r6_bank\n\t"
- "stc $sr, %0\n\t"
+ asm volatile("ldc %2, r6_bank\n\t"
+ "stc sr, %0\n\t"
"and #0xf0, %0\n\t"
"shlr2 %0\n\t"
"cmp/eq #0x3c, %0\n\t"
"bt/s 1f ! CLI-ed\n\t"
- " stc $sr, %0\n\t"
+ " stc sr, %0\n\t"
"and %1, %0\n\t"
"or %2, %0\n\t"
- "ldc %0, $sr\n"
+ "ldc %0, sr\n"
"1:"
: "=&z" (__dummy)
: "r" (~0xf0), "r" (ip << 4)
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index c7511093b..1ce22f0fd 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -14,6 +14,8 @@
#define __KERNEL_SYSCALLS__
#include <stdarg.h>
+#include <linux/config.h>
+
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -31,6 +33,7 @@
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -39,8 +42,9 @@
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/elf.h>
-
-#include <linux/irq.h>
+#ifdef CONFIG_SH_STANDARD_BIOS
+#include <asm/sh_bios.h>
+#endif
static int hlt_counter=0;
@@ -79,11 +83,17 @@ void cpu_idle(void *unused)
}
void machine_restart(char * __unused)
-{ /* Need to set MMU_TTB?? */
+{
+#ifdef CONFIG_SH_STANDARD_BIOS
+ sh_bios_shutdown(1);
+#endif
}
void machine_halt(void)
{
+#ifdef CONFIG_SH_STANDARD_BIOS
+ sh_bios_shutdown(0);
+#endif
}
void machine_power_off(void)
@@ -93,7 +103,7 @@ void machine_power_off(void)
void show_regs(struct pt_regs * regs)
{
printk("\n");
- printk("PC : %08lx SP : %08lx SR : %08lx TEA : %08lx\n",
+ printk("PC : %08lx SP : %08lx SR : %08lx TEA : %08x\n",
regs->pc, regs->regs[15], regs->sr, ctrl_inl(MMU_TEA));
printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
regs->regs[0],regs->regs[1],
@@ -144,12 +154,12 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
register unsigned long __sc9 __asm__ ("r9") = (long) fn;
__asm__("trapa #0x12\n\t" /* Linux/SH system call */
- "tst #0xff, $r0\n\t" /* child or parent? */
+ "tst #0xff, r0\n\t" /* child or parent? */
"bf 1f\n\t" /* parent - jump */
- "jsr @$r9\n\t" /* call fn */
- " mov $r8, $r4\n\t" /* push argument */
- "mov $r0, $r4\n\t" /* return value to arg of exit */
- "mov %1, $r3\n\t" /* exit */
+ "jsr @r9\n\t" /* call fn */
+ " mov r8, r4\n\t" /* push argument */
+ "mov r0, r4\n\t" /* return value to arg of exit */
+ "mov %1, r3\n\t" /* exit */
"trapa #0x11\n"
"1:"
: "=z" (__sc0)
@@ -285,7 +295,7 @@ void __switch_to(struct task_struct *prev, struct task_struct *next)
* Restore the kernel mode register
* k7 (r7_bank1)
*/
- asm volatile("ldc %0, $r7_bank"
+ asm volatile("ldc %0, r7_bank"
: /* no output */
:"r" (next));
}
@@ -376,7 +386,7 @@ unsigned long get_wchan(struct task_struct *p)
asmlinkage void print_syscall(int x)
{
unsigned long flags, sr;
- asm("stc $sr, %0": "=r" (sr));
+ asm("stc sr, %0": "=r" (sr));
save_and_cli(flags);
printk("%c: %c %c, %c: SYSCALL\n", (x&63)+32,
(current->flags&PF_USEDFPU)?'C':' ',
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 8dad245df..b6fb1e9a8 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -503,8 +503,8 @@ int get_cpuinfo(char *buffer)
"cache size\t: 8K-byte/16K-byte\n");
#endif
p += sprintf(p, "bogomips\t: %lu.%02lu\n\n",
- (loops_per_jiffy+2500)/(500000/HZ),
- ((loops_per_jiffy+2500)/(5000/HZ)) % 100);
+ loops_per_jiffy/(500000/HZ),
+ (loops_per_jiffy/(5000/HZ)) % 100);
p += sprintf(p, "Machine: %s\n", sh_mv.mv_name);
#define PRINT_CLOCK(name, value) \
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
index 81a56b960..f72f71b98 100644
--- a/arch/sh/kernel/sh_bios.c
+++ b/arch/sh/kernel/sh_bios.c
@@ -1,4 +1,4 @@
-/* $Id: sh_bios.c,v 1.3 2000/09/30 03:43:30 gniibe Exp $
+/* $Id: sh_bios.c,v 1.5 2001/01/08 08:42:32 gniibe Exp $
*
* linux/arch/sh/kernel/sh_bios.c
* C interface for trapping into the standard LinuxSH BIOS.
@@ -7,13 +7,12 @@
*
*/
-#include <linux/config.h>
#include <asm/sh_bios.h>
-#ifdef CONFIG_SH_STANDARD_BIOS
-
#define BIOS_CALL_CONSOLE_WRITE 0
-#define BIOS_CALL_READ_BLOCK 1 /* not implemented */
+#define BIOS_CALL_READ_BLOCK 1
+#define BIOS_CALL_ETH_NODE_ADDR 10
+#define BIOS_CALL_SHUTDOWN 11
#define BIOS_CALL_CHAR_OUT 0x1f /* TODO: hack */
#define BIOS_CALL_GDB_GET_MODE_PTR 0xfe
#define BIOS_CALL_GDB_DETACH 0xff
@@ -66,5 +65,12 @@ void sh_bios_gdb_detach(void)
sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0);
}
-#endif
+void sh_bios_get_node_addr (unsigned char *node_addr)
+{
+ sh_bios_call(BIOS_CALL_ETH_NODE_ADDR, 0, (long)node_addr, 0, 0);
+}
+void sh_bios_shutdown(unsigned int how)
+{
+ sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0);
+}
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
index 2b0298699..2bdcd75ea 100644
--- a/arch/sh/kernel/sh_ksyms.c
+++ b/arch/sh/kernel/sh_ksyms.c
@@ -64,10 +64,11 @@ EXPORT_SYMBOL(get_vm_area);
#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL_NOVERS(name)
/* These symbols are generated by the compiler itself */
+DECLARE_EXPORT(__udivsi3);
+DECLARE_EXPORT(__sdivsi3);
+
#ifdef __SH4__
-DECLARE_EXPORT(__udivsi3_i4);
-DECLARE_EXPORT(__sdivsi3_i4);
DECLARE_EXPORT(__movstr_i4_even);
DECLARE_EXPORT(__movstr_i4_odd);
DECLARE_EXPORT(__ashrdi3);
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index fe2f1b319..9095c5c79 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -29,8 +29,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
- int options, unsigned long *ru);
asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
@@ -433,7 +431,7 @@ static void setup_frame(int sig, struct k_sigaction *ka,
current->comm, current->pid, frame, regs->pc, regs->pr);
#endif
- flush_icache_range(regs->pr, regs->pr+4);
+ flush_cache_sigtramp(regs->pr);
return;
give_sigsegv:
@@ -507,7 +505,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
current->comm, current->pid, frame, regs->pc, regs->pr);
#endif
- flush_icache_range(regs->pr, regs->pr+4);
+ flush_cache_sigtramp(regs->pr);
return;
give_sigsegv:
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 4aeaf8efa..f929fba2a 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -2,13 +2,16 @@
*
* linux/arch/sh/traps.c
*
- * SuperH version: Copyright (C) 1999 Niibe Yutaka
+ * SuperH version: Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2000 Philipp Rumpf
+ * Copyright (C) 2000 David Howells
*/
/*
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'entry.S'.
*/
+#include <linux/config.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -35,7 +38,7 @@ asmlinkage void do_##name(unsigned long r4, unsigned long r5, \
{ \
unsigned long error_code; \
\
- asm volatile("stc $r2_bank, %0": "=r" (error_code)); \
+ asm volatile("stc r2_bank, %0": "=r" (error_code)); \
sti(); \
tsk->thread.error_code = error_code; \
tsk->thread.trap_no = trapnr; \
@@ -69,7 +72,16 @@ static inline void die_if_kernel(const char * str, struct pt_regs * regs, long e
die(str, regs, err);
}
-static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
+static int handle_unaligned_notify_count = 10;
+
+/*
+ * try and fix up kernelspace address errors
+ * - userspace errors just cause EFAULT to be returned, resulting in SEGV
+ * - kernel/userspace interfaces cause a jump to an appropriate handler
+ * - other kernel errors are bad
+ * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
+ */
+static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
{
if (!user_mode(regs))
{
@@ -77,14 +89,407 @@ static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
fixup = search_exception_table(regs->pc);
if (fixup) {
regs->pc = fixup;
- return;
+ return 0;
}
die(str, regs, err);
}
+ return -EFAULT;
+}
+
+/*
+ * handle an instruction that does an unaligned memory access by emulating the
+ * desired behaviour
+ * - note that PC _may not_ point to the faulting instruction
+ * (if that instruction is in a branch delay slot)
+ * - return 0 if emulation okay, -EFAULT on existential error
+ */
+static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
+{
+ int ret, index, count;
+ unsigned long *rm, *rn;
+ unsigned char *src, *dst;
+
+ index = (instruction>>8)&15; /* 0x0F00 */
+ rn = &regs->regs[index];
+
+ index = (instruction>>4)&15; /* 0x00F0 */
+ rm = &regs->regs[index];
+
+ count = 1<<(instruction&3);
+
+ ret = -EFAULT;
+ switch (instruction>>12) {
+ case 0: /* mov.[bwl] to/from memory via r0+rn */
+ if (instruction & 8) {
+ /* from memory */
+ src = (unsigned char*) *rm;
+ src += regs->regs[0];
+ dst = (unsigned char*) rn;
+ *(unsigned long*)dst = 0;
+
+#ifdef __LITTLE_ENDIAN__
+ if (copy_from_user(dst, src, count))
+ goto fetch_fault;
+
+ if ((count == 2) && dst[1] & 0x80) {
+ dst[2] = 0xff;
+ dst[3] = 0xff;
+ }
+#else
+ dst += 4-count;
+
+ if (__copy_user(dst, src, count))
+ goto fetch_fault;
+
+ if ((count == 2) && dst[2] & 0x80) {
+ dst[0] = 0xff;
+ dst[1] = 0xff;
+ }
+#endif
+ } else {
+ /* to memory */
+ src = (unsigned char*) rm;
+#if !defined(__LITTLE_ENDIAN__)
+ src += 4-count;
+#endif
+ dst = (unsigned char*) *rn;
+ dst += regs->regs[0];
+
+ if (copy_to_user(dst, src, count))
+ goto fetch_fault;
+ }
+ ret = 0;
+ break;
+
+ case 1: /* mov.l Rm,@(disp,Rn) */
+ src = (unsigned char*) rm;
+ dst = (unsigned char*) *rn;
+ dst += (instruction&0x000F)<<2;
+
+ if (copy_to_user(dst,src,4))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
+ if (instruction & 4)
+ *rn -= count;
+ src = (unsigned char*) rm;
+ dst = (unsigned char*) *rn;
+#if !defined(__LITTLE_ENDIAN__)
+ src += 4-count;
+#endif
+ if (copy_to_user(dst, src, count))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 5: /* mov.l @(disp,Rm),Rn */
+ src = (unsigned char*) *rm;
+ src += (instruction&0x000F)<<2;
+ dst = (unsigned char*) rn;
+ *(unsigned long*)dst = 0;
+
+ if (copy_from_user(dst,src,4))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 6: /* mov.[bwl] from memory, possibly with post-increment */
+ src = (unsigned char*) *rm;
+ if (instruction & 4)
+ *rm += count;
+ dst = (unsigned char*) rn;
+ *(unsigned long*)dst = 0;
+
+#ifdef __LITTLE_ENDIAN__
+ if (copy_from_user(dst, src, count))
+ goto fetch_fault;
+
+ if ((count == 2) && dst[1] & 0x80) {
+ dst[2] = 0xff;
+ dst[3] = 0xff;
+ }
+#else
+ dst += 4-count;
+
+ if (copy_from_user(dst, src, count))
+ goto fetch_fault;
+
+ if ((count == 2) && dst[2] & 0x80) {
+ dst[0] = 0xff;
+ dst[1] = 0xff;
+ }
+#endif
+ ret = 0;
+ break;
+
+ case 8:
+ switch ((instruction&0xFF00)>>8) {
+ case 0x81: /* mov.w R0,@(disp,Rn) */
+ src = (unsigned char*) &regs->regs[0];
+#if !defined(__LITTLE_ENDIAN__)
+ src += 2;
+#endif
+ dst = (unsigned char*) *rm; /* called Rn in the spec */
+ dst += (instruction&0x000F)<<1;
+
+ if (copy_to_user(dst, src, 2))
+ goto fetch_fault;
+ ret = 0;
+ break;
+
+ case 0x85: /* mov.w @(disp,Rm),R0 */
+ src = (unsigned char*) *rm;
+ src += (instruction&0x000F)<<1;
+ dst = (unsigned char*) &regs->regs[0];
+ *(unsigned long*)dst = 0;
+
+#if !defined(__LITTLE_ENDIAN__)
+ dst += 2;
+#endif
+
+ if (copy_from_user(dst, src, 2))
+ goto fetch_fault;
+
+#ifdef __LITTLE_ENDIAN__
+ if (dst[1] & 0x80) {
+ dst[2] = 0xff;
+ dst[3] = 0xff;
+ }
+#else
+ if (dst[2] & 0x80) {
+ dst[0] = 0xff;
+ dst[1] = 0xff;
+ }
+#endif
+ ret = 0;
+ break;
+ }
+ break;
+ }
+ return ret;
+
+ fetch_fault:
+ /* Argh. Address not only misaligned but also non-existent.
+ * Raise an EFAULT and see if it's trapped
+ */
+ return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
+}
+
+/*
+ * emulate the instruction in the delay slot
+ * - fetches the instruction from PC+2
+ */
+static inline int handle_unaligned_delayslot(struct pt_regs *regs)
+{
+ u16 instruction;
+
+ if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
+ /* the instruction-fetch faulted */
+ if (user_mode(regs))
+ return -EFAULT;
+
+ /* kernel */
+ die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0);
+ }
+
+ return handle_unaligned_ins(instruction,regs);
+}
+
+/*
+ * handle an instruction that does an unaligned memory access
+ * - have to be careful of branch delay-slot instructions that fault
+ * - if the branch would be taken PC points to the branch
+ * - if the branch would not be taken, PC points to delay-slot
+ * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
+ */
+static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
+{
+ u_int rm;
+ int ret, index;
+
+ index = (instruction>>8)&15; /* 0x0F00 */
+ rm = regs->regs[index];
+
+ /* shout about the first ten userspace fixups */
+ if (user_mode(regs) && handle_unaligned_notify_count>0) {
+ handle_unaligned_notify_count--;
+
+ printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ current->comm,current->pid,(u16*)regs->pc,instruction);
+ }
+
+ ret = -EFAULT;
+ switch (instruction&0xF000) {
+ case 0x0000:
+ if (instruction==0x000B) {
+ /* rts */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc = regs->pr;
+ }
+ else if ((instruction&0x00FF)==0x0023) {
+ /* braf @Rm */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc += rm + 4;
+ }
+ else if ((instruction&0x00FF)==0x0003) {
+ /* bsrf @Rm */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0) {
+ regs->pr = regs->pc + 4;
+ regs->pc += rm + 4;
+ }
+ }
+ else {
+ /* mov.[bwl] to/from memory via r0+rn */
+ goto simple;
+ }
+ break;
+
+ case 0x1000: /* mov.l Rm,@(disp,Rn) */
+ goto simple;
+
+ case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
+ goto simple;
+
+ case 0x4000:
+ if ((instruction&0x00FF)==0x002B) {
+ /* jmp @Rm */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc = rm;
+ }
+ else if ((instruction&0x00FF)==0x000B) {
+ /* jsr @Rm */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0) {
+ regs->pr = regs->pc + 4;
+ regs->pc = rm;
+ }
+ }
+ else {
+ /* mov.[bwl] to/from memory via r0+rn */
+ goto simple;
+ }
+ break;
+
+ case 0x5000: /* mov.l @(disp,Rm),Rn */
+ goto simple;
+
+ case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
+ goto simple;
+
+ case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
+ switch (instruction&0x0F00) {
+ case 0x0100: /* mov.w R0,@(disp,Rm) */
+ goto simple;
+ case 0x0500: /* mov.w @(disp,Rm),R0 */
+ goto simple;
+ case 0x0B00: /* bf lab - no delayslot*/
+ break;
+ case 0x0F00: /* bf/s lab */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc += (instruction&0x00FF)*2 + 4;
+ break;
+ case 0x0900: /* bt lab - no delayslot */
+ break;
+ case 0x0D00: /* bt/s lab */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc += (instruction&0x00FF)*2 + 4;
+ break;
+ }
+ break;
+
+ case 0xA000: /* bra label */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0)
+ regs->pc += (instruction&0x0FFF)*2 + 4;
+ break;
+
+ case 0xB000: /* bsr label */
+ ret = handle_unaligned_delayslot(regs);
+ if (ret==0) {
+ regs->pr = regs->pc + 4;
+ regs->pc += (instruction&0x0FFF)*2 + 4;
+ }
+ break;
+ }
+ return ret;
+
+ /* handle non-delay-slot instruction */
+ simple:
+ ret = handle_unaligned_ins(instruction,regs);
+ if (ret==0)
+ regs->pc += 2;
+ return ret;
+}
+
+/*
+ * Handle various address error exceptions
+ */
+asmlinkage void do_address_error(struct pt_regs *regs,
+ unsigned long writeaccess,
+ unsigned long address)
+{
+ unsigned long error_code;
+ mm_segment_t oldfs;
+ u16 instruction;
+ int tmp;
+
+ asm volatile("stc r2_bank,%0": "=r" (error_code));
+
+ oldfs = get_fs();
+
+ if (user_mode(regs)) {
+ sti();
+ current->thread.error_code = error_code;
+ current->thread.trap_no = (writeaccess) ? 8 : 7;
+
+ /* bad PC is not something we can fix */
+ if (regs->pc & 1)
+ goto uspace_segv;
+
+ set_fs(USER_DS);
+ if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
+ /* Argh. Fault on the instruction itself.
+ This should never happen non-SMP
+ */
+ set_fs(oldfs);
+ goto uspace_segv;
+ }
+
+ tmp = handle_unaligned_access(instruction, regs);
+ set_fs(oldfs);
+
+ if (tmp==0)
+ return; /* sorted */
+
+ uspace_segv:
+ printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
+ force_sig(SIGSEGV, current);
+ } else {
+ if (regs->pc & 1)
+ die("unaligned program counter", regs, error_code);
+
+ set_fs(KERNEL_DS);
+ if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
+ /* Argh. Fault on the instruction itself.
+ This should never happen non-SMP
+ */
+ set_fs(oldfs);
+ die("insn faulting in do_address_error", regs, 0);
+ }
+
+ handle_unaligned_access(instruction, regs);
+ set_fs(oldfs);
+ }
}
-DO_ERROR( 7, SIGSEGV, "address error (load)", address_error_load, current)
-DO_ERROR( 8, SIGSEGV, "address error (store)", address_error_store, current)
DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
@@ -93,25 +498,42 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
struct pt_regs regs)
{
long ex;
- asm volatile("stc $r2_bank, %0" : "=r" (ex));
+ asm volatile("stc r2_bank, %0" : "=r" (ex));
die_if_kernel("exception", &regs, ex);
}
+#if defined(CONFIG_DEBUG_KERNEL_WITH_GDB_STUB) || defined(CONFIG_SH_STANDARD_BIOS)
+void *gdb_vbr_vector;
+#endif
+
void __init trap_init(void)
{
extern void *vbr_base;
extern void *exception_handling_table[14];
- exception_handling_table[7] = (void *)do_address_error_load;
- exception_handling_table[8] = (void *)do_address_error_store;
exception_handling_table[12] = (void *)do_reserved_inst;
exception_handling_table[13] = (void *)do_illegal_slot_inst;
+#if defined(CONFIG_DEBUG_KERNEL_WITH_GDB_STUB) || defined(CONFIG_SH_STANDARD_BIOS)
+ /*
+ * Read the old value of the VBR register to initialise
+ * the vector through which debug and BIOS traps are
+ * delegated by the Linux trap handler.
+ */
+ {
+ register unsigned long vbr;
+ asm volatile("stc vbr, %0" : "=r" (vbr));
+ gdb_vbr_vector = (void *)(vbr + 0x100);
+ printk("Setting GDB trap vector to 0x%08lx\n",
+ (unsigned long)gdb_vbr_vector);
+ }
+#endif
+
/* NOTE: The VBR value should be at P1
(or P2, virtural "fixed" address space).
It's definitely should not in physical address. */
- asm volatile("ldc %0, $vbr"
+ asm volatile("ldc %0, vbr"
: /* no output */
: "r" (&vbr_base)
: "memory");
@@ -123,8 +545,8 @@ void dump_stack(void)
unsigned long *end;
unsigned long *p;
- asm("mov $r15, %0" : "=r" (start));
- asm("stc $r7_bank, %0" : "=r" (end));
+ asm("mov r15, %0" : "=r" (start));
+ asm("stc r7_bank, %0" : "=r" (end));
end += 8192/4;
printk("%08lx:%08lx\n", (unsigned long)start, (unsigned long)end);
diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S
index c7e96a394..9d8e0f476 100644
--- a/arch/sh/lib/checksum.S
+++ b/arch/sh/lib/checksum.S
@@ -49,99 +49,99 @@ ENTRY(csum_partial)
* Fortunately, it is easy to convert 2-byte alignment to 4-byte
* alignment for the unrolled loop.
*/
- mov $r5, $r1
- mov $r4, $r0
- tst #2, $r0 ! Check alignment.
+ mov r5, r1
+ mov r4, r0
+ tst #2, r0 ! Check alignment.
bt 2f ! Jump if alignment is ok.
!
- add #-2, $r5 ! Alignment uses up two bytes.
- cmp/pz $r5 !
+ add #-2, r5 ! Alignment uses up two bytes.
+ cmp/pz r5 !
bt/s 1f ! Jump if we had at least two bytes.
clrt
bra 6f
- add #2, $r5 ! $r5 was < 2. Deal with it.
+ add #2, r5 ! r5 was < 2. Deal with it.
1:
- mov.w @$r4+, $r0
- extu.w $r0, $r0
- addc $r0, $r6
+ mov.w @r4+, r0
+ extu.w r0, r0
+ addc r0, r6
bf 2f
- add #1, $r6
+ add #1, r6
2:
- mov #-5, $r0
- shld $r0, $r5
- tst $r5, $r5
+ mov #-5, r0
+ shld r0, r5
+ tst r5, r5
bt/s 4f ! if it's =0, go to 4f
clrt
.align 2
3:
- mov.l @$r4+, $r0
- mov.l @$r4+, $r2
- mov.l @$r4+, $r3
- addc $r0, $r6
- mov.l @$r4+, $r0
- addc $r2, $r6
- mov.l @$r4+, $r2
- addc $r3, $r6
- mov.l @$r4+, $r3
- addc $r0, $r6
- mov.l @$r4+, $r0
- addc $r2, $r6
- mov.l @$r4+, $r2
- addc $r3, $r6
- addc $r0, $r6
- addc $r2, $r6
- movt $r0
- dt $r5
+ mov.l @r4+, r0
+ mov.l @r4+, r2
+ mov.l @r4+, r3
+ addc r0, r6
+ mov.l @r4+, r0
+ addc r2, r6
+ mov.l @r4+, r2
+ addc r3, r6
+ mov.l @r4+, r3
+ addc r0, r6
+ mov.l @r4+, r0
+ addc r2, r6
+ mov.l @r4+, r2
+ addc r3, r6
+ addc r0, r6
+ addc r2, r6
+ movt r0
+ dt r5
bf/s 3b
- cmp/eq #1, $r0
- ! here, we know $r5==0
- addc $r5, $r6 ! add carry to $r6
+ cmp/eq #1, r0
+ ! here, we know r5==0
+ addc r5, r6 ! add carry to r6
4:
- mov $r1, $r0
- and #0x1c, $r0
- tst $r0, $r0
+ mov r1, r0
+ and #0x1c, r0
+ tst r0, r0
bt/s 6f
- mov $r0, $r5
- shlr2 $r5
- mov #0, $r2
+ mov r0, r5
+ shlr2 r5
+ mov #0, r2
5:
- addc $r2, $r6
- mov.l @$r4+, $r2
- movt $r0
- dt $r5
+ addc r2, r6
+ mov.l @r4+, r2
+ movt r0
+ dt r5
bf/s 5b
- cmp/eq #1, $r0
- addc $r2, $r6
- addc $r5, $r6 ! $r5==0 here, so it means add carry-bit
+ cmp/eq #1, r0
+ addc r2, r6
+ addc r5, r6 ! r5==0 here, so it means add carry-bit
6:
- mov $r1, $r5
- mov #3, $r0
- and $r0, $r5
- tst $r5, $r5
+ mov r1, r5
+ mov #3, r0
+ and r0, r5
+ tst r5, r5
bt 9f ! if it's =0 go to 9f
- mov #2, $r1
- cmp/hs $r1, $r5
+ mov #2, r1
+ cmp/hs r1, r5
bf 7f
- mov.w @r4+, $r0
- extu.w $r0, $r0
- cmp/eq $r1, $r5
+ mov.w @r4+, r0
+ extu.w r0, r0
+ cmp/eq r1, r5
bt/s 8f
clrt
- shll16 $r0
- addc $r0, $r6
+ shll16 r0
+ addc r0, r6
7:
- mov.b @$r4+, $r0
- extu.b $r0, $r0
+ mov.b @r4+, r0
+ extu.b r0, r0
#ifndef __LITTLE_ENDIAN__
- shll8 $r0
+ shll8 r0
#endif
8:
- addc $r0, $r6
- mov #0, $r0
- addc $r0, $r6
+ addc r0, r6
+ mov #0, r0
+ addc r0, r6
9:
rts
- mov $r6, $r0
+ mov r6, r0
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
@@ -159,14 +159,14 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
* them all but there's no guarantee.
*/
-#define SRC(x,y) \
- 9999: x,y; \
+#define SRC(...) \
+ 9999: __VA_ARGS__ ; \
.section __ex_table, "a"; \
.long 9999b, 6001f ; \
.previous
-#define DST(x,y) \
- 9999: x,y; \
+#define DST(...) \
+ 9999: __VA_ARGS__ ; \
.section __ex_table, "a"; \
.long 9999b, 6002f ; \
.previous
@@ -202,7 +202,7 @@ ENTRY(csum_partial_copy_generic)
bt/s 1f
clrt
bra 4f
- add #2,r6 ! $r6 was < 2. Deal with it.
+ add #2,r6 ! r6 was < 2. Deal with it.
3: ! Handle different src and dest alinments.
! This is not common, so simple byte by byte copy will do.
@@ -211,7 +211,8 @@ ENTRY(csum_partial_copy_generic)
tst r6, r6
bt 4f
clrt
-SRC(5: mov.b @r4+,r0 )
+5:
+SRC( mov.b @r4+,r0 )
DST( mov.b r0,@r5 )
add #1, r5
SRC( mov.b @r4+,r1 )
@@ -244,7 +245,8 @@ DST( mov.b r1,@r5 )
! src and dest equally aligned, but to a two byte boundary.
! Handle first two bytes as a special case
.align 5
-SRC(1: mov.w @r4+,r0 )
+1:
+SRC( mov.w @r4+,r0 )
DST( mov.w r0,@r5 )
add #2,r5
extu.w r0,r0
@@ -258,7 +260,8 @@ DST( mov.w r0,@r5 )
tst r6,r6
bt/s 2f
clrt
-SRC(1: mov.l @r4+,r0 )
+1:
+SRC( mov.l @r4+,r0 )
SRC( mov.l @r4+,r1 )
addc r0,r7
DST( mov.l r0,@r5 )
@@ -276,7 +279,7 @@ DST( mov.l r0,@r5 )
DST( mov.l r1,@r5 )
add #4,r5
-SRC( mov.l @r4+,r0 )
+SRC( mov.l @r4+,r0 )
SRC( mov.l @r4+,r1 )
addc r0,r7
DST( mov.l r0,@r5 )
@@ -307,7 +310,8 @@ DST( mov.l r1,@r5 )
bf/s 4f
clrt
shlr2 r6
-SRC(3: mov.l @r4+,r0 )
+3:
+SRC( mov.l @r4+,r0 )
addc r0,r7
DST( mov.l r0,@r5 )
add #4,r5
@@ -334,7 +338,8 @@ DST( mov.w r0,@r5 )
clrt
shll16 r0
addc r0,r7
-SRC(5: mov.b @r4+,r0 )
+5:
+SRC( mov.b @r4+,r0 )
DST( mov.b r0,@r5 )
extu.b r0,r0
#ifndef __LITTLE_ENDIAN__
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c
index 790fd1808..e1fcc970f 100644
--- a/arch/sh/lib/delay.c
+++ b/arch/sh/lib/delay.c
@@ -21,7 +21,11 @@ void __delay(unsigned long loops)
inline void __const_udelay(unsigned long xloops)
{
- xloops *= current_cpu_data.loops_per_jiffy;
+ __asm__("dmulu.l %0, %2\n\t"
+ "sts mach, %0"
+ : "=r" (xloops)
+ : "0" (xloops), "r" (current_cpu_data.loops_per_jiffy)
+ : "macl", "mach");
__delay(xloops * HZ);
}
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index ab63535e4..2ea37e723 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -244,6 +244,22 @@ void flush_icache_range(unsigned long start, unsigned long end)
}
/*
+ * Write back the D-cache and purge the I-cache for signal trampoline.
+ */
+void flush_cache_sigtramp(unsigned long addr)
+{
+ unsigned long v, index;
+
+ v = addr & ~(L1_CACHE_BYTES-1);
+ asm volatile("ocbwb %0"
+ : /* no output */
+ : "m" (__m(v)));
+
+ index = CACHE_IC_ADDRESS_ARRAY| (v&CACHE_IC_ENTRY_MASK);
+ ctrl_outl(0, index); /* Clear out Valid-bit */
+}
+
+/*
* Invalidate the I-cache of the page (don't need to write back D-cache).
*
* Called from kernel/ptrace.c, mm/memory.c after flush_page_to_ram is called.