summaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-03-12 23:15:27 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-03-12 23:15:27 +0000
commitae38fd1e4c98588314a42097c5a5e77dcef23561 (patch)
treef9f10c203bb9e5fbad4810d1f8774c08dfad20ff /arch/sh/kernel
parent466a823d79f41d0713b272e48fd73e494b0588e0 (diff)
Merge with Linux 2.3.50.
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/Makefile11
-rw-r--r--arch/sh/kernel/cf-enabler.c30
-rw-r--r--arch/sh/kernel/entry.S351
-rw-r--r--arch/sh/kernel/fpu.c266
-rw-r--r--arch/sh/kernel/head.S48
-rw-r--r--arch/sh/kernel/irq.c50
-rw-r--r--arch/sh/kernel/irq_imask.c106
-rw-r--r--arch/sh/kernel/irq_onchip.c38
-rw-r--r--arch/sh/kernel/pci-sh.c12
-rw-r--r--arch/sh/kernel/process.c157
-rw-r--r--arch/sh/kernel/semaphore.c161
-rw-r--r--arch/sh/kernel/setup.c85
-rw-r--r--arch/sh/kernel/signal.c28
-rw-r--r--arch/sh/kernel/sys_sh.c52
-rw-r--r--arch/sh/kernel/time.c138
-rw-r--r--arch/sh/kernel/traps.c23
16 files changed, 1227 insertions, 329 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 6cf0b319e..efa2fb109 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -11,10 +11,19 @@
O_TARGET := kernel.o
O_OBJS := process.o signal.o entry.o traps.o irq.o irq_onchip.o \
- ptrace.o setup.o time.o sys_sh.o semaphore.o
+ ptrace.o setup.o time.o sys_sh.o semaphore.o pci-sh.o \
+ irq_imask.o
OX_OBJS := sh_ksyms.o
MX_OBJS :=
+ifdef CONFIG_CF_ENABLER
+O_OBJS += cf-enabler.o
+endif
+
+ifdef CONFIG_CPU_SH4
+O_OBJS += fpu.o
+endif
+
all: kernel.o head.o init_task.o
entry.o: entry.S
diff --git a/arch/sh/kernel/cf-enabler.c b/arch/sh/kernel/cf-enabler.c
new file mode 100644
index 000000000..80dc511b3
--- /dev/null
+++ b/arch/sh/kernel/cf-enabler.c
@@ -0,0 +1,30 @@
+/* $Id: cf-enabler.c,v 1.2 1999/12/20 10:14:40 gniibe Exp $
+ *
+ * linux/drivers/block/cf-enabler.c
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * Enable the CF configuration.
+ */
+
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#define CF_CIS_BASE 0xb8000000
+/*
+ * 0xB8000000 : Attribute
+ * 0xB8001000 : Common Memory
+ * 0xBA000000 : I/O
+ */
+
+int __init cf_init(void)
+{
+ outw(0x0042, CF_CIS_BASE+0x0200);
+ make_imask_irq(14);
+ disable_irq(14);
+ return 0;
+}
+
+__initcall (cf_init);
diff --git a/arch/sh/kernel/entry.S b/arch/sh/kernel/entry.S
index a3e5a918c..77bb7938b 100644
--- a/arch/sh/kernel/entry.S
+++ b/arch/sh/kernel/entry.S
@@ -1,8 +1,8 @@
-/* $Id: entry.S,v 1.19 1999/10/31 13:19:35 gniibe Exp gniibe $
+/* $Id: entry.S,v 1.55 2000/03/05 01:48:58 gniibe Exp $
*
* linux/arch/sh/entry.S
*
- * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 1999, 2000 Niibe Yutaka
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -54,7 +54,8 @@ sigpending = 8
addr_limit = 12
need_resched = 20
-PF_TRACESYS = 0x20
+PF_TRACESYS = 0x00000020
+PF_USEDFPU = 0x00100000
ENOSYS = 38
@@ -207,37 +208,10 @@ error:
1: .long SYMBOL_NAME(do_exception_error)
2: .long 0xefffffff ! BL=0
-reschedule:
- mova SYMBOL_NAME(ret_from_syscall),r0
- mov.l 1f,r1
- jmp @r1
- lds r0,pr
- .balign 4
-1: .long SYMBOL_NAME(schedule)
-
badsys: mov #-ENOSYS,r0
rts ! go to ret_from_syscall..
mov.l r0,@(R0,r15)
-signal_return:
- ! We can reach here from an interrupt handler,
- ! so, we need to unblock interrupt.
- /* STI */
- mov.l 1f,r1
- stc sr,r0
- and r1,r0
- ldc r0,sr
- !
- mov r15,r4
- mov #0,r5
- mov.l 2f,r1
- mova restore_all,r0
- jmp @r1
- lds r0,pr
- .balign 4
-1: .long 0xefffffff ! BL=0
-2: .long SYMBOL_NAME(do_signal)
-
!
!
!
@@ -274,7 +248,7 @@ system_call:
ldc r2,sr
!
mov.l __n_sys,r1
- cmp/ge r1,r0
+ cmp/hs r1,r0
bt/s badsys
mov r0,r2
!
@@ -329,6 +303,9 @@ system_call:
3: .long SYMBOL_NAME(syscall_trace)
2: .long 0xefffffff ! BL=0
1: .long TRA
+__n_sys: .long NR_syscalls
+__sct: .long SYMBOL_NAME(sys_call_table)
+__tsk_flags: .long flags-8192 ! offset from stackbase to tsk->flags
led: .long 0xa8000000 ! For my board -- gN
.section .fixup,"ax"
@@ -343,30 +320,57 @@ fixup_syscall_argerr:
.long 8b,fixup_syscall_argerr
.previous
+reschedule:
+ mova SYMBOL_NAME(ret_from_syscall),r0
+ mov.l 1f,r1
+ jmp @r1
+ lds r0,pr
+ .balign 4
+1: .long SYMBOL_NAME(schedule)
ENTRY(ret_from_irq)
- mov.l @(SR,r15),r0 ! get original stack
+ mov.l @(SR,r15),r0 ! get status register
shll r0
shll r0 ! kernel space?
bt restore_all ! Yes, it's from kernel, go back soon
- ! XXX: Is it better to run through bottom half?
- ! In such a case, we should go "ret_from_syscall" instead
+ ! STI
+ mov.l 1f, $r1
+ stc $sr, $r2
+ and $r1, $r2
+ ldc $r2, $sr
+ !
bra ret_with_reschedule
nop
+ENTRY(ret_from_exception)
+ mov.l @(SR,r15),r0 ! get status register
+ shll r0
+ shll r0 ! kernel space?
+ bt restore_all ! Yes, it's from kernel, go back soon
+ ! STI
+ mov.l 1f, $r1
+ stc $sr, $r2
+ and $r1, $r2
+ ldc $r2, $sr
+ !
+ bra ret_from_syscall
+ nop
+ .balign 4
+1: .long 0xefffffff ! BL=0
+
+ .balign 4
ret: add r8,r15 ! pop off the arguments
mov.l r0,@(R0,r15) ! save the return value
/* fall through */
ENTRY(ret_from_syscall)
- mov.l __bh_mask,r0
+ mov.l __softirq_state,r0
mov.l @r0,r1
- mov.l __bh_active,r0
- mov.l @r0,r2
+ mov.l @(4,r0),r2
tst r2,r1
bt ret_with_reschedule
-handle_bottom_half:
- mov.l __dbh,r0
+handle_softirq:
+ mov.l __do_softirq,r0
jsr @r0
nop
ret_with_reschedule:
@@ -378,11 +382,44 @@ ret_with_reschedule:
bf reschedule
mov.l @(sigpending,r1),r0
tst #0xff,r0
- bf signal_return
- !
+ bt restore_all
+signal_return:
+ mov r15,r4
+ mov #0,r5
+ mov.l __do_signal,r1
+ mova restore_all,r0
+ jmp @r1
+ lds r0,pr
+ .balign 4
+__do_signal:
+ .long SYMBOL_NAME(do_signal)
+__softirq_state:
+ .long SYMBOL_NAME(softirq_state)
+__do_softirq:
+ .long SYMBOL_NAME(do_softirq)
+__minus8192:
+ .long -8192 ! offset from stackbase to tsk
+
+ .balign 4
restore_all:
- add #4,r15 ! skip syscall number
- mov.l @r15+,r11 ! SSR
+#if defined(__SH4__)
+ mov.l __fpu_prepare_fd, $r1
+ jsr @$r1
+ stc $sr, $r4
+#endif
+ add #4,r15 ! Skip syscall number
+ mov.l @r15+,r11 ! Got SSR into R11
+#if defined(__SH4__)
+ mov $r11, $r12
+#endif
+ !
+ mov.l 1f,r1
+ stc sr,r0
+ and r1,r0 ! Get IMASK+FD
+ mov.l 2f,r1
+ and r1,r11
+ or r0,r11 ! Inherit the IMASK+FD value of SR
+ !
mov.l @r15+,r10 ! original stack
mov.l @r15+,r0
mov.l @r15+,r1
@@ -398,6 +435,9 @@ restore_all:
ldc r14,sr ! here, change the register bank
mov r10,k0
mov r11,k1
+#if defined(__SH4__)
+ mov $r12, $k2
+#endif
mov.l @r15+,r8
mov.l @r15+,r9
mov.l @r15+,r10
@@ -410,21 +450,69 @@ restore_all:
lds.l @r15+,macl
lds.l @r15+,pr
ldc.l @r15+,spc
- mov k0,r15
ldc k1,ssr
+#if defined(__SH4__)
+ shll $k1
+ shll $k1
+ bf 9f ! user mode
+ /* Kernel to kernel transition */
+ mov.l 3f, $k1
+ tst $k1, $k2
+ bf 9f ! it hadn't FPU
+ ! Kernel to kernel and FPU was used
+ ! There's the case we don't get FPU now
+ stc $sr, $k2
+ tst $k1, $k2
+ bt 7f
+ ! We need to grab FPU here
+ xor $k1, $k2
+ ldc $k2, $sr ! Grab FPU
+ mov.l __init_task_flags, $k1
+ mov.l @$k1, $k2
+ mov.l __PF_USEDFPU, $k1
+ or $k1, $k2
+ mov.l __init_task_flags, $k1
+ mov.l $k2, @$k1 ! Set init_task.flags |= PF_USEDFPU
+ !
+ ! Restoring FPU...
+ !
+7: fmov.s @$r15+, $fr0
+ fmov.s @$r15+, $fr1
+ fmov.s @$r15+, $fr2
+ fmov.s @$r15+, $fr3
+ fmov.s @$r15+, $fr4
+ fmov.s @$r15+, $fr5
+ fmov.s @$r15+, $fr6
+ fmov.s @$r15+, $fr7
+ fmov.s @$r15+, $fr8
+ fmov.s @$r15+, $fr9
+ fmov.s @$r15+, $fr10
+ fmov.s @$r15+, $fr11
+ fmov.s @$r15+, $fr12
+ fmov.s @$r15+, $fr13
+ fmov.s @$r15+, $fr14
+ fmov.s @$r15+, $fr15
+ lds.l @$r15+, $fpscr
+ lds.l @$r15+, $fpul
+9:
+#endif
+ mov k0,r15
rte
nop
.balign 4
-__n_sys: .long NR_syscalls
-__sct: .long SYMBOL_NAME(sys_call_table)
-__bh_mask: .long SYMBOL_NAME(bh_mask)
-__bh_active: .long SYMBOL_NAME(bh_active)
-__dbh: .long SYMBOL_NAME(do_bottom_half)
__blrb_flags: .long 0x30000000
-__minus8192: .long -8192 ! offset from stackbase to tsk
-__tsk_flags: .long flags-8192 ! offset from stackbase to tsk->flags
-
+#if defined(__SH4__)
+__fpu_prepare_fd:
+ .long SYMBOL_NAME(fpu_prepare_fd)
+__init_task_flags:
+ .long SYMBOL_NAME(init_task_union)+4
+__PF_USEDFPU:
+ .long PF_USEDFPU
+#endif
+1: .long 0x000080f0 ! IMASK+FD
+2: .long 0xffff7f0f ! ~(IMASK+FD)
+3: .long 0x00008000 ! FD=1
! Exception Vector Base
!
@@ -441,43 +529,81 @@ general_exception:
bra handle_exception
mov.l @k2,k2
.balign 4
-2: .long SYMBOL_NAME(ret_from_syscall)
+2: .long SYMBOL_NAME(ret_from_exception)
1: .long EXPEVT
!
!
.balign 1024,0,1024
tlb_miss:
mov.l 1f,k2
- mov.l 3f,k3
+ mov.l 4f,k3
bra handle_exception
mov.l @k2,k2
!
.balign 512,0,512
interrupt:
mov.l 2f,k2
- mov.l 4f,k3
+ mov.l 3f,k3
bra handle_exception
mov.l @k2,k2
.balign 4
1: .long EXPEVT
2: .long INTEVT
-3: .long SYMBOL_NAME(ret_from_syscall)
-4: .long SYMBOL_NAME(ret_from_irq)
+3: .long SYMBOL_NAME(ret_from_irq)
+4: .long SYMBOL_NAME(ret_from_exception)
!
!
handle_exception:
- ! Using k0, k1 for scratch registers (r0_bank1, and r1_bank1),
+ ! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
! save all registers onto stack.
!
stc ssr,k0 ! from kernel space?
shll k0 ! Check MD bit (bit30)
shll k0
- bt/s 1f ! it's from kernel to kernel transition
+#if defined(__SH4__)
+ bf/s 8f ! it's from user to kernel transition
+ mov $r15, $k0 ! save original stack to k0
+ /* Kernel to kernel transition */
+ mov.l 2f, $k1
+ stc $ssr, $k0
+ tst $k1, $k0
+ bf/s 9f ! FPU is not used
+ mov $r15, $k0 ! save original stack to k0
+ ! FPU is used, save FPU
+ ! /* XXX: Need to save another bank of FPU if all FPU feature is used */
+ ! /* Currently it's not the case for GCC (only udivsi3_i4, divsi3_i4) */
+ sts.l $fpul, @-$r15
+ sts.l $fpscr, @-$r15
+ fmov.s $fr15, @-$r15
+ fmov.s $fr14, @-$r15
+ fmov.s $fr13, @-$r15
+ fmov.s $fr12, @-$r15
+ fmov.s $fr11, @-$r15
+ fmov.s $fr10, @-$r15
+ fmov.s $fr9, @-$r15
+ fmov.s $fr8, @-$r15
+ fmov.s $fr7, @-$r15
+ fmov.s $fr6, @-$r15
+ fmov.s $fr5, @-$r15
+ fmov.s $fr4, @-$r15
+ fmov.s $fr3, @-$r15
+ fmov.s $fr2, @-$r15
+ fmov.s $fr1, @-$r15
+ fmov.s $fr0, @-$r15
+ bra 9f
+ mov #0, $k1
+#else
+ bt/s 9f ! it's from kernel to kernel transition
mov r15,k0 ! save original stack to k0 anyway
- mov kernel_sp,r15 ! change to kernel stack
-1: stc.l spc,@-r15
+#endif
+8: /* User space to kernel */
+ mov kernel_sp, $r15 ! change to kernel stack
+#if defined(__SH4__)
+ mov.l 2f, $k1 ! let kernel release FPU
+#endif
+9: stc.l spc,@-r15
sts.l pr,@-r15
!
lds k3,pr ! Set the return address to pr
@@ -487,9 +613,12 @@ handle_exception:
stc.l gbr,@-r15
mov.l r14,@-r15
!
- mov.l 2f,k1
- stc sr,r14 ! back to normal register bank, and
- and k1,r14 ! ..
+ stc sr,r14 ! Back to normal register bank, and
+#if defined(__SH4__)
+ or $k1, $r14 ! may release FPU
+#endif
+ mov.l 3f,k1
+ and k1,r14 ! ...
ldc r14,sr ! ...changed here.
!
mov.l r13,@-r15
@@ -520,7 +649,8 @@ handle_exception:
mov.l @r15,r0 ! recovering r0..
.balign 4
1: .long SYMBOL_NAME(exception_handling_table)
-2: .long 0xdfffffff ! RB=0, BL=1
+2: .long 0x00008000 ! FD=1
+3: .long 0xdfffffff ! RB=0, leave BL=1
none:
rts
@@ -537,7 +667,11 @@ ENTRY(exception_handling_table)
.long tlb_protection_violation_store
.long error ! address_error_load (filled by trap_init)
.long error ! address_error_store (filled by trap_init)
+#if defined(__SH4__)
+ .long SYMBOL_NAME(do_fpu_error)
+#else
.long error ! fpu_exception
+#endif
.long error
.long system_call ! Unconditional Trap
.long error ! reserved_instruction (filled by trap_init)
@@ -628,8 +762,8 @@ ENTRY(interrupt_table)
.long error
.long error
.long error
- .long error ! fpu
- .long error ! fpu
+ .long SYMBOL_NAME(do_fpu_state_restore)
+ .long SYMBOL_NAME(do_fpu_state_restore)
#endif
ENTRY(sys_call_table)
@@ -649,15 +783,15 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_time)
.long SYMBOL_NAME(sys_mknod)
.long SYMBOL_NAME(sys_chmod) /* 15 */
- .long SYMBOL_NAME(sys_lchown)
+ .long SYMBOL_NAME(sys_lchown16)
.long SYMBOL_NAME(sys_ni_syscall) /* old break syscall holder */
.long SYMBOL_NAME(sys_stat)
.long SYMBOL_NAME(sys_lseek)
.long SYMBOL_NAME(sys_getpid) /* 20 */
.long SYMBOL_NAME(sys_mount)
.long SYMBOL_NAME(sys_oldumount)
- .long SYMBOL_NAME(sys_setuid)
- .long SYMBOL_NAME(sys_getuid)
+ .long SYMBOL_NAME(sys_setuid16)
+ .long SYMBOL_NAME(sys_getuid16)
.long SYMBOL_NAME(sys_stime) /* 25 */
.long SYMBOL_NAME(sys_ptrace)
.long SYMBOL_NAME(sys_alarm)
@@ -679,13 +813,13 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_times)
.long SYMBOL_NAME(sys_ni_syscall) /* old prof syscall holder */
.long SYMBOL_NAME(sys_brk) /* 45 */
- .long SYMBOL_NAME(sys_setgid)
- .long SYMBOL_NAME(sys_getgid)
+ .long SYMBOL_NAME(sys_setgid16)
+ .long SYMBOL_NAME(sys_getgid16)
.long SYMBOL_NAME(sys_signal)
- .long SYMBOL_NAME(sys_geteuid)
- .long SYMBOL_NAME(sys_getegid) /* 50 */
+ .long SYMBOL_NAME(sys_geteuid16)
+ .long SYMBOL_NAME(sys_getegid16) /* 50 */
.long SYMBOL_NAME(sys_acct)
- .long SYMBOL_NAME(sys_umount) /* recycled never used phys() */
+ .long SYMBOL_NAME(sys_umount) /* recycled never used phys() */
.long SYMBOL_NAME(sys_ni_syscall) /* old lock syscall holder */
.long SYMBOL_NAME(sys_ioctl)
.long SYMBOL_NAME(sys_fcntl) /* 55 */
@@ -703,19 +837,19 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_sigaction)
.long SYMBOL_NAME(sys_sgetmask)
.long SYMBOL_NAME(sys_ssetmask)
- .long SYMBOL_NAME(sys_setreuid) /* 70 */
- .long SYMBOL_NAME(sys_setregid)
+ .long SYMBOL_NAME(sys_setreuid16) /* 70 */
+ .long SYMBOL_NAME(sys_setregid16)
.long SYMBOL_NAME(sys_sigsuspend)
.long SYMBOL_NAME(sys_sigpending)
.long SYMBOL_NAME(sys_sethostname)
.long SYMBOL_NAME(sys_setrlimit) /* 75 */
- .long SYMBOL_NAME(sys_getrlimit)
+ .long SYMBOL_NAME(sys_old_getrlimit)
.long SYMBOL_NAME(sys_getrusage)
.long SYMBOL_NAME(sys_gettimeofday)
.long SYMBOL_NAME(sys_settimeofday)
- .long SYMBOL_NAME(sys_getgroups) /* 80 */
- .long SYMBOL_NAME(sys_setgroups)
- .long SYMBOL_NAME(sys_ni_syscall) /* old_select */
+ .long SYMBOL_NAME(sys_getgroups16) /* 80 */
+ .long SYMBOL_NAME(sys_setgroups16)
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_oldselect */
.long SYMBOL_NAME(sys_symlink)
.long SYMBOL_NAME(sys_lstat)
.long SYMBOL_NAME(sys_readlink) /* 85 */
@@ -723,18 +857,18 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_swapon)
.long SYMBOL_NAME(sys_reboot)
.long SYMBOL_NAME(old_readdir)
- .long SYMBOL_NAME(sys_mmap) /* 90 */
+ .long SYMBOL_NAME(old_mmap) /* 90 */
.long SYMBOL_NAME(sys_munmap)
.long SYMBOL_NAME(sys_truncate)
.long SYMBOL_NAME(sys_ftruncate)
.long SYMBOL_NAME(sys_fchmod)
- .long SYMBOL_NAME(sys_fchown) /* 95 */
+ .long SYMBOL_NAME(sys_fchown16) /* 95 */
.long SYMBOL_NAME(sys_getpriority)
.long SYMBOL_NAME(sys_setpriority)
.long SYMBOL_NAME(sys_ni_syscall) /* old profil syscall holder */
.long SYMBOL_NAME(sys_statfs)
.long SYMBOL_NAME(sys_fstatfs) /* 100 */
- .long SYMBOL_NAME(sys_ni_syscall) /* ioperm */
+ .long SYMBOL_NAME(sys_ni_syscall) /* ioperm */
.long SYMBOL_NAME(sys_socketcall)
.long SYMBOL_NAME(sys_syslog)
.long SYMBOL_NAME(sys_setitimer)
@@ -771,8 +905,8 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_sysfs) /* 135 */
.long SYMBOL_NAME(sys_personality)
.long SYMBOL_NAME(sys_ni_syscall) /* for afs_syscall */
- .long SYMBOL_NAME(sys_setfsuid)
- .long SYMBOL_NAME(sys_setfsgid)
+ .long SYMBOL_NAME(sys_setfsuid16)
+ .long SYMBOL_NAME(sys_setfsgid16)
.long SYMBOL_NAME(sys_llseek) /* 140 */
.long SYMBOL_NAME(sys_getdents)
.long SYMBOL_NAME(sys_select)
@@ -797,14 +931,14 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_sched_rr_get_interval)
.long SYMBOL_NAME(sys_nanosleep)
.long SYMBOL_NAME(sys_mremap)
- .long SYMBOL_NAME(sys_setresuid)
- .long SYMBOL_NAME(sys_getresuid) /* 165 */
- .long SYMBOL_NAME(sys_ni_syscall) /* vm86 */
+ .long SYMBOL_NAME(sys_setresuid16)
+ .long SYMBOL_NAME(sys_getresuid16) /* 165 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* vm86 */
.long SYMBOL_NAME(sys_query_module)
.long SYMBOL_NAME(sys_poll)
.long SYMBOL_NAME(sys_nfsservctl)
- .long SYMBOL_NAME(sys_setresgid) /* 170 */
- .long SYMBOL_NAME(sys_getresgid)
+ .long SYMBOL_NAME(sys_setresgid16) /* 170 */
+ .long SYMBOL_NAME(sys_getresgid16)
.long SYMBOL_NAME(sys_prctl)
.long SYMBOL_NAME(sys_rt_sigreturn)
.long SYMBOL_NAME(sys_rt_sigaction)
@@ -815,15 +949,42 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_rt_sigsuspend)
.long SYMBOL_NAME(sys_pread) /* 180 */
.long SYMBOL_NAME(sys_pwrite)
- .long SYMBOL_NAME(sys_chown)
+ .long SYMBOL_NAME(sys_chown16)
.long SYMBOL_NAME(sys_getcwd)
.long SYMBOL_NAME(sys_capget)
.long SYMBOL_NAME(sys_capset) /* 185 */
.long SYMBOL_NAME(sys_sigaltstack)
.long SYMBOL_NAME(sys_sendfile)
- .long SYMBOL_NAME(sys_ni_syscall) /* streams1 */
- .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* streams1 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */
.long SYMBOL_NAME(sys_vfork) /* 190 */
+ .long SYMBOL_NAME(sys_getrlimit)
+ .long SYMBOL_NAME(sys_mmap2)
+ .long SYMBOL_NAME(sys_truncate64)
+ .long SYMBOL_NAME(sys_ftruncate64)
+ .long SYMBOL_NAME(sys_stat64) /* 195 */
+ .long SYMBOL_NAME(sys_lstat64)
+ .long SYMBOL_NAME(sys_fstat64)
+ .long SYMBOL_NAME(sys_lchown)
+ .long SYMBOL_NAME(sys_getuid)
+ .long SYMBOL_NAME(sys_getgid) /* 200 */
+ .long SYMBOL_NAME(sys_geteuid)
+ .long SYMBOL_NAME(sys_getegid)
+ .long SYMBOL_NAME(sys_setreuid)
+ .long SYMBOL_NAME(sys_setregid)
+ .long SYMBOL_NAME(sys_getgroups) /* 205 */
+ .long SYMBOL_NAME(sys_setgroups)
+ .long SYMBOL_NAME(sys_fchown)
+ .long SYMBOL_NAME(sys_setresuid)
+ .long SYMBOL_NAME(sys_getresuid)
+ .long SYMBOL_NAME(sys_setresgid) /* 210 */
+ .long SYMBOL_NAME(sys_getresgid)
+ .long SYMBOL_NAME(sys_chown)
+ .long SYMBOL_NAME(sys_setuid)
+ .long SYMBOL_NAME(sys_setgid)
+ .long SYMBOL_NAME(sys_setfsuid) /* 215 */
+ .long SYMBOL_NAME(sys_setfsgid)
+ .long SYMBOL_NAME(sys_pivot_root)
/*
* NOTE!! This doesn't have to be exact - we just have
@@ -831,7 +992,7 @@ ENTRY(sys_call_table)
* entries. Don't panic if you notice that this hasn't
* been shrunk every time we add a new system call.
*/
- .rept NR_syscalls-190
+ .rept NR_syscalls-217
.long SYMBOL_NAME(sys_ni_syscall)
.endr
diff --git a/arch/sh/kernel/fpu.c b/arch/sh/kernel/fpu.c
new file mode 100644
index 000000000..335902c1d
--- /dev/null
+++ b/arch/sh/kernel/fpu.c
@@ -0,0 +1,266 @@
+/* $Id: fpu.c,v 1.27 2000/03/05 01:48:34 gniibe Exp $
+ *
+ * linux/arch/sh/kernel/fpu.c
+ *
+ * Save/restore floating point context for signal handlers.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ *
+ * FIXME! These routines can be optimized in big endian case.
+ */
+
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+
+void
+save_fpu(struct task_struct *tsk)
+{
+ asm volatile("sts.l $fpul, @-%0\n\t"
+ "sts.l $fpscr, @-%0\n\t"
+ "frchg\n\t"
+ "fmov.s $fr15, @-%0\n\t"
+ "fmov.s $fr14, @-%0\n\t"
+ "fmov.s $fr13, @-%0\n\t"
+ "fmov.s $fr12, @-%0\n\t"
+ "fmov.s $fr11, @-%0\n\t"
+ "fmov.s $fr10, @-%0\n\t"
+ "fmov.s $fr9, @-%0\n\t"
+ "fmov.s $fr8, @-%0\n\t"
+ "fmov.s $fr7, @-%0\n\t"
+ "fmov.s $fr6, @-%0\n\t"
+ "fmov.s $fr5, @-%0\n\t"
+ "fmov.s $fr4, @-%0\n\t"
+ "fmov.s $fr3, @-%0\n\t"
+ "fmov.s $fr2, @-%0\n\t"
+ "fmov.s $fr1, @-%0\n\t"
+ "fmov.s $fr0, @-%0\n\t"
+ "frchg\n\t"
+ "fmov.s $fr15, @-%0\n\t"
+ "fmov.s $fr14, @-%0\n\t"
+ "fmov.s $fr13, @-%0\n\t"
+ "fmov.s $fr12, @-%0\n\t"
+ "fmov.s $fr11, @-%0\n\t"
+ "fmov.s $fr10, @-%0\n\t"
+ "fmov.s $fr9, @-%0\n\t"
+ "fmov.s $fr8, @-%0\n\t"
+ "fmov.s $fr7, @-%0\n\t"
+ "fmov.s $fr6, @-%0\n\t"
+ "fmov.s $fr5, @-%0\n\t"
+ "fmov.s $fr4, @-%0\n\t"
+ "fmov.s $fr3, @-%0\n\t"
+ "fmov.s $fr2, @-%0\n\t"
+ "fmov.s $fr1, @-%0\n\t"
+ "fmov.s $fr0, @-%0"
+ : /* no output */
+ : "r" ((char *)(&tsk->thread.fpu.hard.status))
+ : "memory");
+
+ tsk->flags &= ~PF_USEDFPU;
+ release_fpu();
+}
+
+static void
+restore_fpu(struct task_struct *tsk)
+{
+ asm volatile("fmov.s @%0+, $fr0\n\t"
+ "fmov.s @%0+, $fr1\n\t"
+ "fmov.s @%0+, $fr2\n\t"
+ "fmov.s @%0+, $fr3\n\t"
+ "fmov.s @%0+, $fr4\n\t"
+ "fmov.s @%0+, $fr5\n\t"
+ "fmov.s @%0+, $fr6\n\t"
+ "fmov.s @%0+, $fr7\n\t"
+ "fmov.s @%0+, $fr8\n\t"
+ "fmov.s @%0+, $fr9\n\t"
+ "fmov.s @%0+, $fr10\n\t"
+ "fmov.s @%0+, $fr11\n\t"
+ "fmov.s @%0+, $fr12\n\t"
+ "fmov.s @%0+, $fr13\n\t"
+ "fmov.s @%0+, $fr14\n\t"
+ "fmov.s @%0+, $fr15\n\t"
+ "frchg\n\t"
+ "fmov.s @%0+, $fr0\n\t"
+ "fmov.s @%0+, $fr1\n\t"
+ "fmov.s @%0+, $fr2\n\t"
+ "fmov.s @%0+, $fr3\n\t"
+ "fmov.s @%0+, $fr4\n\t"
+ "fmov.s @%0+, $fr5\n\t"
+ "fmov.s @%0+, $fr6\n\t"
+ "fmov.s @%0+, $fr7\n\t"
+ "fmov.s @%0+, $fr8\n\t"
+ "fmov.s @%0+, $fr9\n\t"
+ "fmov.s @%0+, $fr10\n\t"
+ "fmov.s @%0+, $fr11\n\t"
+ "fmov.s @%0+, $fr12\n\t"
+ "fmov.s @%0+, $fr13\n\t"
+ "fmov.s @%0+, $fr14\n\t"
+ "fmov.s @%0+, $fr15\n\t"
+ "frchg\n\t"
+ "lds.l @%0+, $fpscr\n\t"
+ "lds.l @%0+, $fpul\n\t"
+ : /* no output */
+ : "r" (&tsk->thread.fpu)
+ : "memory");
+}
+
+/*
+ * Load the FPU with signalling NANS. This bit pattern we're using
+ * has the property that no matter wether considered as single or as
+ * double precission represents signaling NANS.
+ */
+/* Double presision, NANS as NANS, rounding to nearest, no exceptions */
+#define FPU_DEFAULT 0x00080000
+
+void fpu_init(void)
+{
+ asm volatile("lds %0, $fpul\n\t"
+ "lds %1, $fpscr\n\t"
+ "fsts $fpul, $fr0\n\t"
+ "fsts $fpul, $fr1\n\t"
+ "fsts $fpul, $fr2\n\t"
+ "fsts $fpul, $fr3\n\t"
+ "fsts $fpul, $fr4\n\t"
+ "fsts $fpul, $fr5\n\t"
+ "fsts $fpul, $fr6\n\t"
+ "fsts $fpul, $fr7\n\t"
+ "fsts $fpul, $fr8\n\t"
+ "fsts $fpul, $fr9\n\t"
+ "fsts $fpul, $fr10\n\t"
+ "fsts $fpul, $fr11\n\t"
+ "fsts $fpul, $fr12\n\t"
+ "fsts $fpul, $fr13\n\t"
+ "fsts $fpul, $fr14\n\t"
+ "fsts $fpul, $fr15\n\t"
+ "frchg\n\t"
+ "fsts $fpul, $fr0\n\t"
+ "fsts $fpul, $fr1\n\t"
+ "fsts $fpul, $fr2\n\t"
+ "fsts $fpul, $fr3\n\t"
+ "fsts $fpul, $fr4\n\t"
+ "fsts $fpul, $fr5\n\t"
+ "fsts $fpul, $fr6\n\t"
+ "fsts $fpul, $fr7\n\t"
+ "fsts $fpul, $fr8\n\t"
+ "fsts $fpul, $fr9\n\t"
+ "fsts $fpul, $fr10\n\t"
+ "fsts $fpul, $fr11\n\t"
+ "fsts $fpul, $fr12\n\t"
+ "fsts $fpul, $fr13\n\t"
+ "fsts $fpul, $fr14\n\t"
+ "fsts $fpul, $fr15\n\t"
+ "frchg"
+ : /* no output */
+ : "r" (0), "r" (FPU_DEFAULT));
+}
+
+asmlinkage void
+do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ struct task_struct *tsk = current;
+
+ regs.syscall_nr = -1;
+ regs.pc += 2;
+
+ grab_fpu();
+ save_fpu(tsk);
+ tsk->thread.trap_no = 11;
+ tsk->thread.error_code = 0;
+ force_sig(SIGFPE, tsk);
+}
+
+asmlinkage void
+do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
+ unsigned long r7, struct pt_regs regs)
+{
+ struct task_struct *tsk = current;
+
+ regs.syscall_nr = -1;
+
+ if (!user_mode(&regs)) {
+ if (tsk != &init_task) {
+ unlazy_fpu(tsk);
+ }
+ tsk = &init_task;
+ if (tsk->flags & PF_USEDFPU)
+ BUG();
+ }
+
+ grab_fpu();
+ if (tsk->used_math) {
+ /* Using the FPU again. */
+ restore_fpu(tsk);
+ } else {
+ /* First time FPU user. */
+ fpu_init();
+ tsk->used_math = 1;
+ }
+ tsk->flags |= PF_USEDFPU;
+ release_fpu();
+}
+
+/*
+ * Change current FD flag to set FD flag back to exception
+ */
+asmlinkage void
+fpu_prepare_fd(unsigned long sr, unsigned long r5, unsigned long r6,
+ unsigned long r7, struct pt_regs regs)
+{
+ __cli();
+ if (!user_mode(&regs)) {
+ if (init_task.flags & PF_USEDFPU)
+ grab_fpu();
+ else {
+ if (!(sr & SR_FD)) {
+ release_fpu();
+ BUG();
+ }
+ }
+ return;
+ }
+
+ if (sr & SR_FD) { /* Kernel doesn't grab FPU */
+ if (current->flags & PF_USEDFPU)
+ grab_fpu();
+ else {
+ if (init_task.flags & PF_USEDFPU) {
+ init_task.flags &= ~PF_USEDFPU;
+ BUG();
+ }
+ }
+ } else {
+ if (init_task.flags & PF_USEDFPU)
+ save_fpu(&init_task);
+ else {
+ release_fpu();
+ BUG();
+ }
+ }
+}
+
+/* Short cut for the FPU exception */
+asmlinkage void
+enable_fpu_in_danger(void)
+{
+ struct task_struct *tsk = current;
+
+ if (tsk != &init_task)
+ unlazy_fpu(tsk);
+
+ tsk = &init_task;
+ if (tsk->used_math) {
+ /* Using the FPU again. */
+ restore_fpu(tsk);
+ } else {
+ /* First time FPU user. */
+ fpu_init();
+ tsk->used_math = 1;
+ }
+ tsk->flags |= PF_USEDFPU;
+}
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
index f6378927e..3f938557a 100644
--- a/arch/sh/kernel/head.S
+++ b/arch/sh/kernel/head.S
@@ -1,8 +1,8 @@
-/* $Id: head.S,v 1.7 1999/10/27 09:41:42 gniibe Exp gniibe $
+/* $Id: head.S,v 1.16 2000/03/02 00:01:15 gniibe Exp $
*
* arch/sh/kernel/head.S
*
- * Copyright (C) 1999 Niibe Yutaka & Kaz Kojima
+ * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -34,49 +34,37 @@ ENTRY(empty_zero_page)
* Cache may or may not be initialized.
* Hardware (including on-chip modules) may or may not be initialized.
*
- * The register R4&R5 holds the address of the parameter block, which has
- * command-line data, etc.
- *
*/
ENTRY(_stext)
-#if defined(__SH4__)
- ! Initialize FPSCR
- /* GCC (as of 2.95.1) assumes FPU with double precision mode. */
- mov.l 7f,r0
- lds r0,fpscr
-#endif
! Initialize Status Register
- mov.l 1f,r0 ! MD=1, RB=0, BL=1
- ldc r0,sr
+ mov.l 1f, $r0 ! MD=1, RB=0, BL=1
+ ldc $r0, $sr
!
- mov.l 2f,r0
- mov r0,r15 ! Set initial r15 (stack pointer)
- ldc r0,r4_bank ! and stack base
+ mov.l 2f, $r0
+ mov $r0, $r15 ! Set initial r15 (stack pointer)
+ ldc $r0, $r4_bank ! and stack base
!
! Enable cache
- mov.l 6f,r0
- jsr @r0
+ mov.l 6f, $r0
+ jsr @$r0
nop
! Clear BSS area
- mov.l 3f,r1
- add #4,r1
- mov.l 4f,r2
- mov #0,r0
-9: cmp/hs r2,r1
+ mov.l 3f, $r1
+ add #4, $r1
+ mov.l 4f, $r2
+ mov #0, $r0
+9: cmp/hs $r2, $r1
bf/s 9b ! while (r1 < r2)
- mov.l r0,@-r2
+ mov.l $r0,@-$r2
! Start kernel
- mov.l 5f,r0
- jmp @r0
+ mov.l 5f, $r0
+ jmp @$r0
nop
.balign 4
-1: .long 0x50000000 ! MD=1, RB=0, BL=1
+1: .long 0x50000000 ! MD=1, RB=0, BL=1, FD=0
2: .long SYMBOL_NAME(stack)
3: .long SYMBOL_NAME(__bss_start)
4: .long SYMBOL_NAME(_end)
5: .long SYMBOL_NAME(start_kernel)
6: .long SYMBOL_NAME(cache_init)
-#if defined(__SH4__)
-7: .long 0x00080000
-#endif
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index e87972c73..a15352389 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -1,4 +1,4 @@
-/* $Id: irq.c,v 1.4 1999/10/11 13:12:14 gniibe Exp $
+/* $Id: irq.c,v 1.11 2000/02/29 11:03:40 gniibe Exp $
*
* linux/arch/sh/kernel/irq.c
*
@@ -31,7 +31,7 @@
#include <asm/io.h>
#include <asm/bitops.h>
#include <asm/smp.h>
-#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
#include <asm/delay.h>
#include <asm/irq.h>
#include <linux/irq.h>
@@ -49,7 +49,8 @@ spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED;
/*
* Controller mappings for all interrupt sources:
*/
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }};
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
+ { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }};
/*
* Special irq handlers.
@@ -112,9 +113,8 @@ int get_irq_list(char *buf)
p += sprintf(p, " %14s", irq_desc[i].handler->typename);
p += sprintf(p, " %s", action->name);
- for (action=action->next; action; action = action->next) {
+ for (action=action->next; action; action = action->next)
p += sprintf(p, ", %s", action->name);
- }
*p++ = '\n';
}
return p - buf;
@@ -248,7 +248,7 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
kstat.irqs[cpu][irq]++;
desc = irq_desc + irq;
spin_lock(&irq_controller_lock);
- irq_desc[irq].handler->ack(irq);
+ desc->handler->ack(irq);
/*
REPLAY is when Linux resends an IRQ that was dropped earlier
WAITING is used by probe to mark irqs that are being tested
@@ -298,21 +298,15 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
spin_unlock(&irq_controller_lock);
}
desc->status &= ~IRQ_INPROGRESS;
- if (!(desc->status & IRQ_DISABLED)){
- irq_desc[irq].handler->end(irq);
- }
+ if (!(desc->status & IRQ_DISABLED))
+ desc->handler->end(irq);
spin_unlock(&irq_controller_lock);
- /*
- * This should be conditional: we should really get
- * a return code from the irq handler to tell us
- * whether the handler wants us to do software bottom
- * half handling or not..
- */
- if (1) {
- if (bh_active & bh_mask)
- do_bottom_half();
- }
+#if 1
+ __sti();
+#endif
+ if (softirq_state[cpu].active&softirq_state[cpu].mask)
+ do_softirq();
return 1;
}
@@ -347,7 +341,7 @@ int request_irq(unsigned int irq,
kfree(action);
return retval;
}
-
+
void free_irq(unsigned int irq, void *dev_id)
{
struct irqaction **p;
@@ -373,10 +367,6 @@ void free_irq(unsigned int irq, void *dev_id)
irq_desc[irq].handler->shutdown(irq);
}
spin_unlock_irqrestore(&irq_controller_lock,flags);
-
- /* Wait to make sure it's not being used on another CPU */
- while (irq_desc[irq].status & IRQ_INPROGRESS)
- barrier();
kfree(action);
return;
}
@@ -398,6 +388,7 @@ unsigned long probe_irq_on(void)
{
unsigned int i;
unsigned long delay;
+ unsigned long val;
/*
* first, enable any unassigned irqs
@@ -421,6 +412,7 @@ unsigned long probe_irq_on(void)
/*
* Now filter out any obviously spurious interrupts
*/
+ val = 0;
spin_lock_irq(&irq_controller_lock);
for (i=0; i<NR_IRQS; i++) {
unsigned int status = irq_desc[i].status;
@@ -433,19 +425,19 @@ unsigned long probe_irq_on(void)
irq_desc[i].status = status & ~IRQ_AUTODETECT;
irq_desc[i].handler->shutdown(i);
}
+
+ if (i < 32)
+ val |= 1 << i;
}
spin_unlock_irq(&irq_controller_lock);
- return 0x12345678;
+ return val;
}
-int probe_irq_off(unsigned long unused)
+int probe_irq_off(unsigned long val)
{
int i, irq_found, nr_irqs;
- if (unused != 0x12345678)
- printk("Bad IRQ probe from %lx\n", (&unused)[-1]);
-
nr_irqs = 0;
irq_found = 0;
spin_lock_irq(&irq_controller_lock);
diff --git a/arch/sh/kernel/irq_imask.c b/arch/sh/kernel/irq_imask.c
new file mode 100644
index 000000000..a3cf78b5f
--- /dev/null
+++ b/arch/sh/kernel/irq_imask.c
@@ -0,0 +1,106 @@
+/* $Id: irq_imask.c,v 1.2 2000/02/11 04:57:40 gniibe Exp $
+ *
+ * linux/arch/sh/kernel/irq_imask.c
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * Simple interrupt handling using IMASK of SR register.
+ *
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+
+#include <linux/spinlock.h>
+#include <linux/cache.h>
+#include <linux/irq.h>
+
+/* Bitmap of IRQ masked */
+static unsigned long imask_mask = 0x7fff;
+static int interrupt_priority = 0;
+
+static void enable_imask_irq(unsigned int irq);
+static void disable_imask_irq(unsigned int irq);
+static void shutdown_imask_irq(unsigned int irq);
+static void mask_and_ack_imask(unsigned int);
+static void end_imask_irq(unsigned int irq);
+
+#define IMASK_PRIORITY 15
+
+static unsigned int startup_imask_irq(unsigned int irq)
+{
+ enable_imask_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static struct hw_interrupt_type imask_irq_type = {
+ "Interrupt using IMASK of SR register",
+ startup_imask_irq,
+ shutdown_imask_irq,
+ enable_imask_irq,
+ disable_imask_irq,
+ mask_and_ack_imask,
+ end_imask_irq
+};
+
+void disable_imask_irq(unsigned int irq)
+{
+ unsigned long __dummy;
+
+ clear_bit(irq, &imask_mask);
+ if (interrupt_priority < IMASK_PRIORITY - irq)
+ interrupt_priority = IMASK_PRIORITY - irq;
+
+ asm volatile("stc sr,%0\n\t"
+ "and %1,%0\n\t"
+ "or %2,%0\n\t"
+ "ldc %0,sr"
+ : "=&r" (__dummy)
+ : "r" (0xffffff0f), "r" (interrupt_priority << 4));
+}
+
+static void enable_imask_irq(unsigned int irq)
+{
+ unsigned long __dummy;
+
+ set_bit(irq, &imask_mask);
+ interrupt_priority = IMASK_PRIORITY - ffz(imask_mask);
+
+ asm volatile("stc sr,%0\n\t"
+ "and %1,%0\n\t"
+ "or %2,%0\n\t"
+ "ldc %0,sr"
+ : "=&r" (__dummy)
+ : "r" (0xffffff0f), "r" (interrupt_priority << 4));
+}
+
+static void mask_and_ack_imask(unsigned int irq)
+{
+ disable_imask_irq(irq);
+}
+
+static void end_imask_irq(unsigned int irq)
+{
+ enable_imask_irq(irq);
+}
+
+static void shutdown_imask_irq(unsigned int irq)
+{
+ disable_imask_irq(irq);
+}
+
+void make_imask_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+ irq_desc[irq].handler = &imask_irq_type;
+ enable_irq(irq);
+}
diff --git a/arch/sh/kernel/irq_onchip.c b/arch/sh/kernel/irq_onchip.c
index cd28d2a59..10c48fd38 100644
--- a/arch/sh/kernel/irq_onchip.c
+++ b/arch/sh/kernel/irq_onchip.c
@@ -1,4 +1,4 @@
-/* $Id: irq_onchip.c,v 1.5 1999/10/28 02:18:33 gniibe Exp $
+/* $Id: irq_onchip.c,v 1.7 2000-01-09 15:55:55+09 gniibe Exp $
*
* linux/arch/sh/kernel/irq_onchip.c
*
@@ -143,7 +143,18 @@ static void end_onChip_irq(unsigned int irq)
*/
#define INTC_IRR0 0xa4000004UL
-#define INTC_IPRC 0xa4000016UL
+#define INTC_IRR1 0xa4000006UL
+#define INTC_IRR2 0xa4000008UL
+
+#define INTC_ICR0 0xfffffee0
+#define INTC_ICR1 0xa4000010
+#define INTC_ICR2 0xa4000012
+#define INTC_INTER 0xa4000014
+#define INTC_IPRA 0xfffffee2
+#define INTC_IPRB 0xfffffee4
+#define INTC_IPRC 0xa4000016
+#define INTC_IPRD 0xa4000018
+#define INTC_IPRE 0xa400001a
#define IRQ0_IRQ 32
#define IRQ1_IRQ 33
@@ -248,6 +259,26 @@ void __init init_IRQ(void)
}
#ifdef CONFIG_CPU_SUBTYPE_SH7709
+
+ /*
+ * Initialize the Interrupt Controller (INTC)
+ * registers to their power on values
+ */
+
+ ctrl_outb(0, INTC_IRR0);
+ ctrl_outb(0, INTC_IRR1);
+ ctrl_outb(0, INTC_IRR2);
+
+ ctrl_outw(0, INTC_ICR0);
+ ctrl_outw(0, INTC_ICR1);
+ ctrl_outw(0, INTC_ICR2);
+ ctrl_outw(0, INTC_INTER);
+ ctrl_outw(0, INTC_IPRA);
+ ctrl_outw(0, INTC_IPRB);
+ ctrl_outw(0, INTC_IPRC);
+ ctrl_outw(0, INTC_IPRD);
+ ctrl_outw(0, INTC_IPRE);
+
for (i = IRQ0_IRQ; i < NR_IRQS; i++) {
irq_desc[i].handler = &onChip2_irq_type;
}
@@ -263,8 +294,5 @@ void __init init_IRQ(void)
set_ipr_data(IRQ3_IRQ, IRQ3_IRP_OFFSET, IRQ3_PRIORITY);
set_ipr_data(IRQ4_IRQ, IRQ4_IRP_OFFSET, IRQ4_PRIORITY);
set_ipr_data(IRQ5_IRQ, IRQ5_IRP_OFFSET, IRQ5_PRIORITY);
-
- ctrl_inb(INTC_IRR0);
- ctrl_outb(0, INTC_IRR0);
#endif /* CONFIG_CPU_SUBTYPE_SH7709 */
}
diff --git a/arch/sh/kernel/pci-sh.c b/arch/sh/kernel/pci-sh.c
new file mode 100644
index 000000000..3613596f7
--- /dev/null
+++ b/arch/sh/kernel/pci-sh.c
@@ -0,0 +1,12 @@
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/errno.h>
+
+unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
+ unsigned long start, unsigned long size)
+{
+ return start;
+}
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 5d2a5696c..2ca91cb40 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -1,10 +1,10 @@
-/* $Id: process.c,v 1.8 1999/10/31 13:19:16 gniibe Exp $
+/* $Id: process.c,v 1.28 2000/03/05 02:16:15 gniibe Exp $
*
* linux/arch/sh/kernel/process.c
*
* Copyright (C) 1995 Linus Torvalds
*
- * SuperH version: Copyright (C) 1999 Niibe Yutaka & Kaz Kojima
+ * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
*/
/*
@@ -42,10 +42,6 @@
#include <linux/irq.h>
-#if defined(__SH4__)
-struct task_struct *last_task_used_math = NULL;
-#endif
-
static int hlt_counter=0;
#define HARD_IDLE_TIMEOUT (HZ / 3)
@@ -140,25 +136,25 @@ void free_task_struct(struct task_struct *p)
*/
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{ /* Don't use this in BL=1(cli). Or else, CPU resets! */
- register unsigned long __sc0 __asm__ ("r0") = __NR_clone;
- register unsigned long __sc4 __asm__ ("r4") = (long) flags | CLONE_VM;
- register unsigned long __sc5 __asm__ ("r5") = 0;
- register unsigned long __sc8 __asm__ ("r8") = (long) arg;
- register unsigned long __sc9 __asm__ ("r9") = (long) fn;
- __asm__ __volatile__(
- "trapa #0\n\t" /* Linux/SH system call */
- "tst #0xff,r0\n\t" /* child or parent? */
+ register unsigned long __sc0 __asm__ ("$r0") = __NR_clone;
+ register unsigned long __sc4 __asm__ ("$r4") = (long) flags | CLONE_VM;
+ register unsigned long __sc5 __asm__ ("$r5") = 0;
+ register unsigned long __sc8 __asm__ ("$r8") = (long) arg;
+ register unsigned long __sc9 __asm__ ("$r9") = (long) fn;
+
+ __asm__("trapa #0\n\t" /* Linux/SH system call */
+ "tst #0xff, $r0\n\t" /* child or parent? */
"bf 1f\n\t" /* parent - jump */
- "jsr @r9\n\t" /* call fn */
- " mov r8,r4\n\t" /* push argument */
- "mov r0,r4\n\t" /* return value to arg of exit */
- "mov %2,r0\n\t" /* exit */
+ "jsr @$r9\n\t" /* call fn */
+ " mov $r8, $r4\n\t" /* push argument */
+ "mov $r0, $r4\n\t" /* return value to arg of exit */
+ "mov %2, $r0\n\t" /* exit */
"trapa #0\n"
"1:"
- :"=z" (__sc0)
- :"0" (__sc0), "i" (__NR_exit),
- "r" (__sc4), "r" (__sc5), "r" (__sc8), "r" (__sc9)
- :"memory");
+ : "=z" (__sc0)
+ : "0" (__sc0), "i" (__NR_exit),
+ "r" (__sc4), "r" (__sc5), "r" (__sc8), "r" (__sc9)
+ : "memory");
return __sc0;
}
@@ -167,18 +163,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
*/
void exit_thread(void)
{
-#if defined(__sh3__)
- /* nothing to do ... */
-#elif defined(__SH4__)
-#if 0 /* for the time being... */
- /* Forget lazy fpu state */
- if (last_task_used_math == current) {
- set_status_register (SR_FD, 0);
- write_system_register (fpscr, FPSCR_PR);
- last_task_used_math = NULL;
- }
-#endif
-#endif
+ /* Nothing to do. */
}
void flush_thread(void)
@@ -187,14 +172,11 @@ void flush_thread(void)
/* do nothing */
/* Possibly, set clear debug registers */
#elif defined(__SH4__)
-#if 0 /* for the time being... */
- /* Forget lazy fpu state */
- if (last_task_used_math == current) {
- set_status_register (SR_FD, 0);
- write_system_register (fpscr, FPSCR_PR);
- last_task_used_math = NULL;
- }
-#endif
+ struct task_struct *tsk = current;
+
+ /* Forget lazy FPU state */
+ clear_fpu(tsk);
+ tsk->used_math = 0;
#endif
}
@@ -204,18 +186,22 @@ void release_thread(struct task_struct *dead_task)
}
/* Fill in the fpu structure for a core dump.. */
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
{
#if defined(__SH4__)
-#if 0 /* for the time being... */
- /* We store the FPU info in the task->thread area. */
- if (! (regs->sr & SR_FD)) {
- memcpy (r, &current->thread.fpu, sizeof (*r));
- return 1;
- }
-#endif
-#endif
+ int fpvalid;
+ struct task_struct *tsk = current;
+
+ fpvalid = tsk->used_math;
+ if (fpvalid) {
+ unlazy_fpu(tsk);
+ memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
+ }
+
+ return fpvalid;
+#else
return 0; /* Task didn't use the fpu at all. */
+#endif
}
asmlinkage void ret_from_fork(void);
@@ -224,21 +210,17 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
struct task_struct *p, struct pt_regs *regs)
{
struct pt_regs *childregs;
+ struct task_struct *tsk = current;
childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long) p)) - 1;
- *childregs = *regs;
+ struct_cpy(childregs, regs);
#if defined(__SH4__)
-#if 0 /* for the time being... */
- if (last_task_used_math == current) {
- set_status_register (SR_FD, 0);
- sh4_save_fp (p);
+ if (tsk != &init_task) {
+ unlazy_fpu(tsk);
+ struct_cpy(&p->thread.fpu, &current->thread.fpu);
+ p->used_math = tsk->used_math;
}
- /* New tasks loose permission to use the fpu. This accelerates context
- switching for most programs since they don't use the fpu. */
- p->thread.sr = (read_control_register (sr) &~ SR_MD) | SR_FD;
- childregs->sr |= SR_FD;
-#endif
#endif
if (user_mode(regs)) {
childregs->sp = usp;
@@ -246,6 +228,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
childregs->sp = (unsigned long)p+2*PAGE_SIZE;
}
childregs->regs[0] = 0; /* Set return value for child */
+ childregs->sr |= SR_FD; /* Invalidate FPU flag */
p->thread.sp = (unsigned long) childregs;
p->thread.pc = (unsigned long) ret_from_fork;
@@ -258,7 +241,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
*/
void dump_thread(struct pt_regs * regs, struct user * dump)
{
-/* changed the size calculations - should hopefully work better. lbt */
dump->magic = CMAGIC;
dump->start_code = current->mm->start_code;
dump->start_data = current->mm->start_data;
@@ -271,11 +253,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
dump->regs = *regs;
-#if 0 /* defined(__SH4__) */
- /* FPU */
- memcpy (&dump->regs[EF_SIZE/4], &current->thread.fpu,
- sizeof (current->thread.fpu));
-#endif
+ dump->u_fpvalid = dump_fpu(regs, &dump->fpu);
}
/*
@@ -284,11 +262,15 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
*/
void __switch_to(struct task_struct *prev, struct task_struct *next)
{
+#if defined(__SH4__)
+ if (prev != &init_task)
+ unlazy_fpu(prev);
+#endif
/*
* Restore the kernel stack onto kernel mode register
* k4 (r4_bank1)
*/
- asm volatile("ldc %0,r4_bank"
+ asm volatile("ldc %0, $r4_bank"
: /* no output */
:"r" ((unsigned long)next+8192));
}
@@ -341,6 +323,7 @@ asmlinkage int sys_execve(char *ufilename, char **uargv,
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
+
error = do_execve(filename, uargv, uenvp, &regs);
if (error == 0)
current->flags &= ~PF_DTRACE;
@@ -349,3 +332,41 @@ out:
unlock_kernel();
return error;
}
+
+/*
+ * These bracket the sleeping functions..
+ */
+extern void scheduling_functions_start_here(void);
+extern void scheduling_functions_end_here(void);
+#define first_sched ((unsigned long) scheduling_functions_start_here)
+#define last_sched ((unsigned long) scheduling_functions_end_here)
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long schedule_frame;
+ unsigned long pc;
+
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ /*
+ * The same comment as on the Alpha applies here, too ...
+ */
+ pc = thread_saved_pc(&p->thread);
+ if (pc >= (unsigned long) interruptible_sleep_on && pc < (unsigned long) add_timer) {
+ schedule_frame = ((unsigned long *)(long)p->thread.sp)[1];
+ return (unsigned long)((unsigned long *)schedule_frame)[1];
+ }
+ return pc;
+}
+
+asmlinkage void print_syscall(int x)
+{
+ unsigned long flags, sr;
+ asm("stc $sr, %0": "=r" (sr));
+ save_and_cli(flags);
+ printk("%c: %c %c, %c: SYSCALL\n", (x&63)+32,
+ (current->flags&PF_USEDFPU)?'C':' ',
+ (init_task.flags&PF_USEDFPU)?'K':' ', (sr&SR_FD)?' ':'F');
+ restore_flags(flags);
+}
diff --git a/arch/sh/kernel/semaphore.c b/arch/sh/kernel/semaphore.c
index b9f565dd8..c958745b5 100644
--- a/arch/sh/kernel/semaphore.c
+++ b/arch/sh/kernel/semaphore.c
@@ -8,6 +8,8 @@
*/
#include <linux/sched.h>
+#include <linux/wait.h>
+#include <asm/semaphore.h>
#include <asm/semaphore-helper.h>
/*
@@ -131,3 +133,162 @@ int __down_trylock(struct semaphore * sem)
{
return waking_non_zero_trylock(sem);
}
+
+/* Called when someone has done an up that transitioned from
+ * negative to non-negative, meaning that the lock has been
+ * granted to whomever owned the bias.
+ */
+struct rw_semaphore *rwsem_wake_readers(struct rw_semaphore *sem)
+{
+ if (xchg(&sem->read_bias_granted, 1))
+ BUG();
+ wake_up(&sem->wait);
+ return sem;
+}
+
+struct rw_semaphore *rwsem_wake_writer(struct rw_semaphore *sem)
+{
+ if (xchg(&sem->write_bias_granted, 1))
+ BUG();
+ wake_up(&sem->write_bias_wait);
+ return sem;
+}
+
+struct rw_semaphore * __rwsem_wake(struct rw_semaphore *sem)
+{
+ if (atomic_read(&sem->count) == 0)
+ return rwsem_wake_writer(sem);
+ else
+ return rwsem_wake_readers(sem);
+}
+
+struct rw_semaphore *down_read_failed_biased(struct rw_semaphore *sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */
+
+ for (;;) {
+ if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0))
+ break;
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ if (!sem->read_bias_granted)
+ schedule();
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ return sem;
+}
+
+struct rw_semaphore *down_write_failed_biased(struct rw_semaphore *sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */
+
+ for (;;) {
+ if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0))
+ break;
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE);
+ if (!sem->write_bias_granted)
+ schedule();
+ }
+
+ remove_wait_queue(&sem->write_bias_wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ /* if the lock is currently unbiased, awaken the sleepers
+ * FIXME: this wakes up the readers early in a bit of a
+ * stampede -> bad!
+ */
+ if (atomic_read(&sem->count) >= 0)
+ wake_up(&sem->wait);
+
+ return sem;
+}
+
+/* Wait for the lock to become unbiased. Readers
+ * are non-exclusive. =)
+ */
+struct rw_semaphore *down_read_failed(struct rw_semaphore *sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ __up_read(sem); /* this takes care of granting the lock */
+
+ add_wait_queue(&sem->wait, &wait);
+
+ while (atomic_read(&sem->count) < 0) {
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&sem->count) >= 0)
+ break;
+ schedule();
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ return sem;
+}
+
+/* Wait for the lock to become unbiased. Since we're
+ * a writer, we'll make ourselves exclusive.
+ */
+struct rw_semaphore *down_write_failed(struct rw_semaphore *sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ __up_write(sem); /* this takes care of granting the lock */
+
+ add_wait_queue_exclusive(&sem->wait, &wait);
+
+ while (atomic_read(&sem->count) < 0) {
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE);
+ if (atomic_read(&sem->count) >= 0)
+ break; /* we must attempt to aquire or bias the lock */
+ schedule();
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ return sem;
+}
+
+struct rw_semaphore *__down_read(struct rw_semaphore *sem, int carry)
+{
+ if (carry) {
+ int saved, new;
+
+ do {
+ down_read_failed(sem);
+ saved = atomic_read(&sem->count);
+ if ((new = atomic_dec_return(&sem->count)) >= 0)
+ return sem;
+ } while (!(new < 0 && saved >=0));
+ }
+
+ return down_read_failed_biased(sem);
+}
+
+struct rw_semaphore *__down_write(struct rw_semaphore *sem, int carry)
+{
+ if (carry) {
+ int saved, new;
+
+ do {
+ down_write_failed(sem);
+ saved = atomic_read(&sem->count);
+ if ((new = atomic_sub_return(RW_LOCK_BIAS, &sem->count) ) == 0)
+ return sem;
+ } while (!(new < 0 && saved >=0));
+ }
+
+ return down_write_failed_biased(sem);
+}
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index f97e66585..154283571 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -1,4 +1,4 @@
-/* $Id: setup.c,v 1.7 1999/10/23 01:34:50 gniibe Exp gniibe $
+/* $Id: setup.c,v 1.20 2000/03/05 02:44:41 gniibe Exp $
*
* linux/arch/sh/kernel/setup.c
*
@@ -51,6 +51,7 @@ extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
extern int rd_image_start; /* starting block # of image */
#endif
+extern void fpu_init(void);
extern int root_mountflags;
extern int _text, _etext, _edata, _end;
@@ -196,82 +197,82 @@ void __init setup_arch(char **cmdline_p)
#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
/*
- * partially used pages are not usable - thus
- * we are rounding upwards:
- */
- start_pfn = PFN_UP(__pa(&_end)-__MEMORY_START);
-
- /*
* Find the highest page frame number we have available
*/
- max_pfn = PFN_DOWN(__pa(memory_end)-__MEMORY_START);
+ max_pfn = PFN_DOWN(__pa(memory_end));
/*
* Determine low and high memory ranges:
*/
max_low_pfn = max_pfn;
+ /*
+ * Partially used pages are not usable - thus
+ * we are rounding upwards:
+ */
+ start_pfn = PFN_UP(__pa(&_end));
/*
- * Initialize the boot-time allocator (with low memory only):
- */
- bootmap_size = init_bootmem(start_pfn, max_low_pfn, __MEMORY_START);
-
- /*
- * FIXME: what about high memory?
+ * Find a proper area for the bootmem bitmap. After this
+ * bootstrap step all allocations (until the page allocator
+ * is intact) must be done via bootmem_alloc().
*/
- ram_resources[1].end = PFN_PHYS(max_low_pfn) + __MEMORY_START;
+ bootmap_size = init_bootmem_node(0, start_pfn,
+ __MEMORY_START>>PAGE_SHIFT,
+ max_low_pfn);
/*
* Register fully available low RAM pages with the bootmem allocator.
*/
{
- unsigned long curr_pfn, last_pfn, size;
+ unsigned long curr_pfn, last_pfn, pages;
/*
* We are rounding up the start address of usable memory:
*/
- curr_pfn = PFN_UP(0);
+ curr_pfn = PFN_UP(__MEMORY_START);
/*
* ... and at the end of the usable range downwards:
*/
- last_pfn = PFN_DOWN(memory_end-__MEMORY_START);
+ last_pfn = PFN_DOWN(__pa(memory_end));
if (last_pfn > max_low_pfn)
last_pfn = max_low_pfn;
- size = last_pfn - curr_pfn;
- free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
+ pages = last_pfn - curr_pfn;
+ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
}
+
/*
* Reserve the kernel text and
- * Reserve the bootmem bitmap itself as well. We do this in two
- * steps (first step was init_bootmem()) because this catches
- * the (very unlikely) case of us accidentally initializing the
- * bootmem allocator with an invalid RAM area.
+ * Reserve the bootmem bitmap.We do this in two steps (first step
+ * was init_bootmem()), because this catches the (definitely buggy)
+ * case of us accidentally initializing the bootmem allocator with
+ * an invalid RAM area.
*/
- reserve_bootmem(PAGE_SIZE, PFN_PHYS(start_pfn) + bootmap_size);
+ reserve_bootmem(__MEMORY_START+PAGE_SIZE, (PFN_PHYS(start_pfn) +
+ bootmap_size + PAGE_SIZE-1) - __MEMORY_START);
/*
* reserve physical page 0 - it's a special BIOS page on many boxes,
* enabling clean reboots, SMP operation, laptop functions.
*/
- reserve_bootmem(0, PAGE_SIZE);
+ reserve_bootmem(__MEMORY_START, PAGE_SIZE);
#ifdef CONFIG_BLK_DEV_INITRD
- if (LOADER_TYPE) {
+ if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
- reserve_bootmem(INITRD_START, INITRD_SIZE);
- initrd_start =
- INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0;
- initrd_end = initrd_start+INITRD_SIZE;
+ reserve_bootmem(INITRD_START+__MEMORY_START, INITRD_SIZE);
+ initrd_start =
+ INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0;
+ initrd_end = initrd_start + INITRD_SIZE;
} else {
- printk("initrd extends beyond end of memory "
- "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
- INITRD_START + INITRD_SIZE,
- max_low_pfn << PAGE_SHIFT);
- initrd_start = 0;
- }
- }
+ printk("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ INITRD_START + INITRD_SIZE,
+ max_low_pfn << PAGE_SHIFT);
+ initrd_start = 0;
+ }
+ }
#endif
#if 0
@@ -298,6 +299,14 @@ void __init setup_arch(char **cmdline_p)
conswitchp = &dummy_con;
#endif
#endif
+
+#if defined(__SH4__)
+ init_task.used_math = 1;
+ init_task.flags |= PF_USEDFPU;
+ grab_fpu();
+ fpu_init();
+#endif
+ paging_init();
}
/*
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index 7c9fbbf00..0c24acf73 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -1,4 +1,4 @@
-/* $Id: signal.c,v 1.10 1999/09/27 23:25:44 gniibe Exp $
+/* $Id: signal.c,v 1.16 2000/01/29 11:31:31 gniibe Exp gniibe $
*
* linux/arch/sh/kernel/signal.c
*
@@ -54,7 +54,7 @@ sys_sigsuspend(old_sigset_t mask,
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
- if (do_signal(&regs,&saveset))
+ if (do_signal(&regs, &saveset))
return -EINTR;
}
}
@@ -73,7 +73,6 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
if (copy_from_user(&newset, unewset, sizeof(newset)))
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
-
spin_lock_irq(&current->sigmask_lock);
saveset = current->blocked;
current->blocked = newset;
@@ -188,6 +187,7 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
+
if (__get_user(set.sig[0], &frame->sc.oldmask)
|| (_NSIG_WORDS > 1
&& __copy_from_user(&set.sig[1], &frame->extramask,
@@ -195,6 +195,7 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
+
spin_lock_irq(&current->sigmask_lock);
current->blocked = set;
recalc_sigpending(current);
@@ -220,6 +221,7 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
+
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
@@ -228,7 +230,7 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
current->blocked = set;
recalc_sigpending(current);
spin_unlock_irq(&current->sigmask_lock);
-
+
if (restore_sigcontext(&regs, &frame->uc.uc_mcontext, &r0))
goto badframe;
@@ -317,7 +319,7 @@ static void setup_frame(int sig, struct k_sigaction *ka,
if (ka->sa.sa_flags & SA_RESTORER) {
regs->pr = (unsigned long) ka->sa.sa_restorer;
} else {
- /* This is ; mov #__NR_sigreturn,r0 ; trapa #0 */
+ /* This is : mov #__NR_sigreturn,r0 ; trapa #0 */
#ifdef __LITTLE_ENDIAN__
unsigned long code = 0xc300e000 | (__NR_sigreturn);
#else
@@ -390,11 +392,11 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
if (ka->sa.sa_flags & SA_RESTORER) {
regs->pr = (unsigned long) ka->sa.sa_restorer;
} else {
- /* This is ; mov #__NR_sigreturn,r0 ; trapa #0 */
+ /* This is : mov #__NR_rt_sigreturn,r0 ; trapa #0 */
#ifdef __LITTLE_ENDIAN__
- unsigned long code = 0xc300e000 | (__NR_sigreturn);
+ unsigned long code = 0xc300e000 | (__NR_rt_sigreturn);
#else
- unsigned long code = 0xe000c300 | (__NR_sigreturn << 16);
+ unsigned long code = 0xe000c300 | (__NR_rt_sigreturn << 16);
#endif
regs->pr = (unsigned long) frame->retcode;
@@ -485,6 +487,15 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
siginfo_t info;
struct k_sigaction *ka;
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return 1;
+
if (!oldset)
oldset = &current->blocked;
@@ -580,6 +591,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
/* NOTREACHED */
}
}
+
/* Whee! Actually deliver the signal. */
handle_signal(signr, ka, &info, oldset, regs);
return 1;
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 1b708e5a9..0b3d5fc2e 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -28,7 +28,9 @@
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way Unix traditionally does this, though.
*/
-asmlinkage int sys_pipe(unsigned long * fildes)
+asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
{
int fd[2];
int error;
@@ -37,46 +39,62 @@ asmlinkage int sys_pipe(unsigned long * fildes)
error = do_pipe(fd);
unlock_kernel();
if (!error) {
- if (copy_to_user(fildes, fd, 2*sizeof(int)))
- error = -EFAULT;
+ regs.regs[1] = fd[1];
+ return fd[0];
}
return error;
}
-asmlinkage unsigned long
-sys_mmap(unsigned long addr, unsigned long len, unsigned long prot,
- unsigned long flags, int fd, unsigned long off)
+static inline long
+do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
+ unsigned long flags, int fd, unsigned long pgoff)
{
- int error = -EFAULT;
+ int error = -EBADF;
struct file *file = NULL;
- down(&current->mm->mmap_sem);
- lock_kernel();
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
if (!(flags & MAP_ANONYMOUS)) {
- error = -EBADF;
file = fget(fd);
if (!file)
goto out;
}
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- error = do_mmap(file, addr, len, prot, flags, off);
- if (file)
- fput(file);
-out:
+ down(&current->mm->mmap_sem);
+ lock_kernel();
+
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
unlock_kernel();
up(&current->mm->mmap_sem);
+ if (file)
+ fput(file);
+out:
return error;
}
+asmlinkage int old_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ int fd, unsigned long off)
+{
+ if (off & ~PAGE_MASK)
+ return -EINVAL;
+ return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
+}
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
+}
+
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
*
* This is really horribly ugly.
*/
-asmlinkage int sys_ipc (uint call, int first, int second,
- int third, void *ptr, long fifth)
+asmlinkage int sys_ipc(uint call, int first, int second,
+ int third, void *ptr, long fifth)
{
int version, ret;
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index 49a765f83..fad3a8145 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -1,4 +1,4 @@
-/* $Id: time.c,v 1.7 1999/11/06 02:00:37 gniibe Exp $
+/* $Id: time.c,v 1.20 2000/02/28 12:42:51 gniibe Exp $
*
* linux/arch/sh/kernel/time.c
*
@@ -8,8 +8,6 @@
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
*/
-#include <linux/config.h>
-
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -43,10 +41,10 @@
#define TMU0_TCNT 0xfffffe98 /* Long access */
#define TMU0_TCR 0xfffffe9c /* Word access */
-#define INTERVAL 37500 /* (1000000*CLOCK_MHZ/HZ/2) ??? for CqREEK */
-#if 0 /* Takeshi's board */
-#define INTERVAL 83333
-#endif
+#define FRQCR 0xffffff80
+
+#define RTC_IRQ 22
+#define RTC_IPR_OFFSET 0
/* SH-3 RTC */
#define R64CNT 0xfffffec0
@@ -74,7 +72,10 @@
#define TMU0_TCNT 0xffd8000c /* Long access */
#define TMU0_TCR 0xffd80010 /* Word access */
-#define INTERVAL 83333
+#define FRQCR 0xffc00000
+
+#define RTC_IRQ 22
+#define RTC_IPR_OFFSET 0
/* SH-4 RTC */
#define R64CNT 0xffc80000
@@ -145,11 +146,10 @@ void do_settimeofday(struct timeval *tv)
static int set_rtc_time(unsigned long nowtime)
{
-#ifdef CONFIG_SH_CPU_RTC
int retval = 0;
int real_seconds, real_minutes, cmos_minutes;
- ctrl_outb(2, RCR2); /* reset pre-scaler & stop RTC */
+ ctrl_outb(0x02, RCR2); /* reset pre-scaler & stop RTC */
cmos_minutes = ctrl_inb(RMINCNT);
BCD_TO_BIN(cmos_minutes);
@@ -178,13 +178,9 @@ static int set_rtc_time(unsigned long nowtime)
retval = -1;
}
- ctrl_outb(2, RCR2); /* start RTC */
+ ctrl_outb(0x01, RCR2); /* start RTC */
return retval;
-#else
- /* XXX should support other clock devices? */
- return -1;
-#endif
}
/* last time the RTC clock got updated */
@@ -197,7 +193,6 @@ static long last_rtc_update = 0;
static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
do_timer(regs);
-
#ifdef TAKESHI
{
unsigned long what_is_this=0xa4000124;
@@ -248,9 +243,7 @@ static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* locally disabled. -arca
*/
write_lock(&xtime_lock);
-
do_timer_interrupt(irq, NULL, regs);
-
write_unlock(&xtime_lock);
}
@@ -287,11 +280,10 @@ static inline unsigned long mktime(unsigned int year, unsigned int mon,
static unsigned long get_rtc_time(void)
{
-#ifdef CONFIG_SH_CPU_RTC
unsigned int sec, min, hr, wk, day, mon, yr, yr100;
again:
- ctrl_outb(1, RCR1); /* clear CF bit */
+ ctrl_outb(0x01, RCR1); /* clear CF bit */
do {
sec = ctrl_inb(RSECCNT);
min = ctrl_inb(RMINCNT);
@@ -321,7 +313,7 @@ static unsigned long get_rtc_time(void)
hr > 23 || min > 59 || sec > 59) {
printk(KERN_ERR
"SH RTC: invalid value, resetting to 1 Jan 2000\n");
- ctrl_outb(2, RCR2); /* reset, stop */
+ ctrl_outb(0x02, RCR2); /* reset, stop */
ctrl_outb(0, RSECCNT);
ctrl_outb(0, RMINCNT);
ctrl_outb(0, RHRCNT);
@@ -333,36 +325,114 @@ static unsigned long get_rtc_time(void)
#else
ctrl_outb(0, RYRCNT);
#endif
- ctrl_outb(1, RCR2); /* start */
+ ctrl_outb(0x01, RCR2); /* start */
goto again;
}
return mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
+}
+
+static __init unsigned int get_cpu_mhz(void)
+{
+ unsigned int count;
+ unsigned long __dummy;
+
+ sti();
+ do {} while (ctrl_inb(R64CNT) != 0);
+ ctrl_outb(0x11, RCR1);
+ asm volatile(
+ "1:\t"
+ "tst %1,%1\n\t"
+ "bt/s 1b\n\t"
+ " add #1,%0"
+ : "=&r"(count), "=&z" (__dummy)
+ : "0" (0), "1" (0));
+ cli();
+ /*
+ * SH-3:
+ * CPU clock = 4 stages * loop
+ * tst rm,rm if id ex
+ * bt/s 1b if id ex
+ * add #1,rd if id ex
+ * (if) pipe line stole
+ * tst rm,rm if id ex
+ * ....
+ *
+ *
+ * SH-4:
+ * CPU clock = 6 stages * loop
+ * I don't know why.
+ * ....
+ */
+#if defined(__SH4__)
+ return count*6;
#else
- /* XXX should support other clock devices? */
- return 0;
+ return count*4;
#endif
}
+static void rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ ctrl_outb(0x01, RCR1);
+ regs->regs[0] = 1;
+}
+
static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, 0, "timer", NULL, NULL};
+static struct irqaction irq1 = { rtc_interrupt, SA_INTERRUPT, 0, "rtc", NULL, NULL};
void __init time_init(void)
{
+ unsigned int cpu_clock, master_clock, module_clock;
+ unsigned short ifc, pfc;
+ unsigned long interval;
+#if defined(__sh3__)
+ static int ifc_table[] = { 1, 2, 4, 1, 3, 1, 1, 1 };
+ static int pfc_table[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
+#elif defined(__SH4__)
+ static int ifc_table[] = { 1, 2, 3, 4, 6, 8, 1, 1 };
+ static int pfc_table[] = { 2, 3, 4, 6, 8, 2, 2, 2 };
+#endif
+
xtime.tv_sec = get_rtc_time();
xtime.tv_usec = 0;
- set_ipr_data(TIMER_IRQ, TIMER_IRP_OFFSET, TIMER_PRIORITY);
+ set_ipr_data(TIMER_IRQ, TIMER_IPR_OFFSET, TIMER_PRIORITY);
setup_irq(TIMER_IRQ, &irq0);
+ set_ipr_data(RTC_IRQ, RTC_IPR_OFFSET, TIMER_PRIORITY);
+ setup_irq(RTC_IRQ, &irq1);
- /* Start TMU0 */
- ctrl_outb(TMU_TOCR_INIT,TMU_TOCR);
- ctrl_outw(TMU0_TCR_INIT,TMU0_TCR);
- ctrl_outl(INTERVAL,TMU0_TCOR);
- ctrl_outl(INTERVAL,TMU0_TCNT);
- ctrl_outb(TMU_TSTR_INIT,TMU_TSTR);
+ /* Check how fast it is.. */
+ cpu_clock = get_cpu_mhz();
+ disable_irq(RTC_IRQ);
-#if 0
- /* Start RTC */
- asm volatile("");
+ printk("CPU clock: %d.%02dMHz\n",
+ (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
+#if defined(__sh3__)
+ {
+ unsigned short tmp;
+ tmp = (ctrl_inw(FRQCR) & 0x000c) >> 2;
+ tmp |= (ctrl_inw(FRQCR) & 0x4000) >> 12;
+ ifc = ifc_table[tmp & 0x0007];
+ tmp = ctrl_inw(FRQCR) & 0x0003;
+ tmp |= (ctrl_inw(FRQCR) & 0x2000) >> 11;
+ pfc = pfc_table[ctrl_inw(FRQCR) & 0x0007];
+ }
+#elif defined(__SH4__)
+ ifc = ifc_table[(ctrl_inw(FRQCR)>> 6) & 0x0007];
+ pfc = pfc_table[ctrl_inw(FRQCR) & 0x0007];
#endif
+ master_clock = cpu_clock * ifc;
+ module_clock = master_clock/pfc;
+ printk("Module clock: %d.%02dMHz\n",
+ (module_clock/1000000), (module_clock % 1000000)/10000);
+ interval = (module_clock/400);
+
+ printk("Interval = %ld\n", interval);
+
+ /* Start TMU0 */
+ ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
+ ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
+ ctrl_outl(interval, TMU0_TCOR);
+ ctrl_outl(interval, TMU0_TCNT);
+ ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
}
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 8a9b3e1f9..98431cb36 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -1,4 +1,4 @@
-/* $Id: traps.c,v 1.3 1999/09/21 14:37:19 gniibe Exp $
+/* $Id: traps.c,v 1.5 2000/02/27 08:27:55 gniibe Exp $
*
* linux/arch/sh/traps.c
*
@@ -26,6 +26,7 @@
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/atomic.h>
+#include <asm/processor.h>
static inline void console_verbose(void)
{
@@ -40,7 +41,7 @@ asmlinkage void do_##name(unsigned long r4, unsigned long r5, \
{ \
unsigned long error_code; \
\
- asm volatile("stc r2_bank,%0": "=r" (error_code)); \
+ asm volatile("stc $r2_bank, %0": "=r" (error_code)); \
sti(); \
regs.syscall_nr = -1; \
tsk->thread.error_code = error_code; \
@@ -99,7 +100,7 @@ asmlinkage void do_exception_error (unsigned long r4, unsigned long r5,
struct pt_regs regs)
{
long ex;
- asm volatile("stc r2_bank,%0" : "=r" (ex));
+ asm volatile("stc $r2_bank, %0" : "=r" (ex));
die_if_kernel("exception", &regs, ex);
}
@@ -117,8 +118,22 @@ void __init trap_init(void)
(or P2, virtural "fixed" address space).
It's definitely should not in physical address. */
- asm volatile("ldc %0,vbr"
+ asm volatile("ldc %0, $vbr"
: /* no output */
: "r" (&vbr_base)
: "memory");
}
+
+void dump_stack(void)
+{
+ unsigned long *start;
+ unsigned long *end;
+ unsigned long *p;
+
+ asm("mov $r15, %0" : "=r" (start));
+ asm("stc $r4_bank, %0" : "=r" (end));
+
+ printk("%08lx:%08lx\n", (unsigned long)start, (unsigned long)end);
+ for (p=start; p < end; p++)
+ printk("%08lx\n", *p);
+}