summaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/Makefile27
-rw-r--r--arch/sh/kernel/entry.S683
-rw-r--r--arch/sh/kernel/head.S69
-rw-r--r--arch/sh/kernel/init_task.c23
-rw-r--r--arch/sh/kernel/irq.c485
-rw-r--r--arch/sh/kernel/irq_onchip.c168
-rw-r--r--arch/sh/kernel/process.c303
-rw-r--r--arch/sh/kernel/ptrace.c476
-rw-r--r--arch/sh/kernel/semaphore.c133
-rw-r--r--arch/sh/kernel/setup.c188
-rw-r--r--arch/sh/kernel/sh_ksyms.c48
-rw-r--r--arch/sh/kernel/signal.c597
-rw-r--r--arch/sh/kernel/sys_sh.c249
-rw-r--r--arch/sh/kernel/test-img.c69
-rw-r--r--arch/sh/kernel/time.c224
-rw-r--r--arch/sh/kernel/traps.c127
16 files changed, 3869 insertions, 0 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
new file mode 100644
index 000000000..0a2abf858
--- /dev/null
+++ b/arch/sh/kernel/Makefile
@@ -0,0 +1,27 @@
+#
+# Makefile for the Linux/SuperH kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+
+.S.o:
+ $(CC) -D__ASSEMBLY__ $(AFLAGS) -traditional -c $< -o $*.o
+
+O_TARGET := kernel.o
+O_OBJS := process.o signal.o entry.o traps.o irq.o irq_onchip.o \
+ ptrace.o setup.o time.o sys_sh.o test-img.o semaphore.o
+OX_OBJS := sh_ksyms.o
+MX_OBJS :=
+
+all: kernel.o head.o init_task.o
+
+entry.o: entry.S
+
+head.o: head.S
+ $(CC) -D__ASSEMBLY__ $(AFLAGS) -traditional -c $*.S -o $*.o
+
+clean:
+
+include $(TOPDIR)/Rules.make
diff --git a/arch/sh/kernel/entry.S b/arch/sh/kernel/entry.S
new file mode 100644
index 000000000..7fca58b30
--- /dev/null
+++ b/arch/sh/kernel/entry.S
@@ -0,0 +1,683 @@
+/* $Id$
+ *
+ * linux/arch/sh/entry.S
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+
+! NOTE:
+! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
+! to be jumped is too far, but it causes illegal slot exception.
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * Stack layout in 'ret_from_syscall':
+ * ptrace needs to have all regs on the stack.
+ * if the order here is changed, it needs to be
+ * updated in process.c:copy_thread, signal.c:do_signal,
+ * ptrace.c and ptrace.h
+ *
+ * syscall #
+ * r0
+ * ...
+ * r15
+ * gbr
+ * mach
+ * macl
+ * pr
+ * ssr
+ * spc
+ *
+ */
+
+/*
+ * these are offsets into the task-struct.
+ */
+state = 0
+flags = 4
+sigpending = 8
+addr_limit = 12
+need_resched = 20
+
+PF_TRACESYS = 0x20
+
+ENOSYS = 38
+
+TRA = 0xffffffd0
+EXPEVT = 0xffffffd4
+INTEVT = 0xffffffd8
+
+/* Offsets to the stack */
+SYSCALL_NR = 0
+R0 = 4
+R15 = 64
+
+#define k0 r0
+#define k1 r1
+#define k2 r2
+#define k3 r3
+
+#define kernel_sp r4 /* r4_bank1 */
+#define ksp r4_bank /* r4_bank1 */
+#define k_ex_code r2_bank /* r2_bank1 */
+
+/* Kernel mode register usage:
+ k0 scratch
+ k1 scratch
+ k2 scratch (Exception code)
+ k3 scratch (Return address)
+ k4 Stack base = current+8192
+ k5 reserved
+ k6 reserved
+ k7 reserved
+*/
+
+!
+! TLB Miss / Initial Page write exception handling
+! _and_
+! TLB hits, but the access violate the protection.
+! It can be valid access, such as stack grow and/or C-O-W.
+!
+!
+! Find the pmd/pte entry and loadtlb
+! If it's not found, cause address error (SEGV)
+!
+! Although this could be written in assembly language (and it'd be faster),
+! this first version depends *much* on C implementation.
+!
+MMU_TEA = 0xfffffffc ! TLB Exception Address Register
+
+#define DO_FAULT(write) \
+ mov #MMU_TEA,r0; \
+ mov.l @r0,r6; \
+ /* STI */ \
+ mov.l 3f,r1; \
+ stc sr,r0; \
+ and r1,r0; \
+ ldc r0,sr; \
+ /* */ \
+ mov r15,r4; \
+ mov.l 2f,r0; \
+ jmp @r0; \
+ mov #write,r5;
+
+ .balign 4
+tlb_protection_violation_load:
+tlb_miss_load:
+ mov #-1,r0
+ mov.l r0,@r15 ! syscall nr = -1
+ DO_FAULT(0)
+
+ .balign 4
+tlb_protection_violation_store:
+tlb_miss_store:
+initial_page_write:
+ mov #-1,r0
+ mov.l r0,@r15 ! syscall nr = -1
+ DO_FAULT(1)
+
+ .balign 4
+2: .long SYMBOL_NAME(do_page_fault)
+3: .long 0xefffffff ! BL=0
+
+
+ .balign 4
+error: mov #-1,r0
+ ! STI
+ mov.l 2f,r1
+ stc sr,r0
+ and r1,r0
+ ldc r0,sr
+ !
+ mov.l r0,@r15 ! syscall nr = -1
+ mov.l 1f,r1
+ jmp @r1
+ nop
+ .balign 4
+1: .long SYMBOL_NAME(do_exception_error)
+
+reschedule:
+ mova SYMBOL_NAME(ret_from_syscall),r0
+ mov.l 1f,r1
+ jmp @r1
+ lds r0,pr
+ .balign 4
+1: .long SYMBOL_NAME(schedule)
+
+badsys: mov #-ENOSYS,r0
+ bra SYMBOL_NAME(ret_from_syscall)
+ mov.l r0,@(R0,r15)
+
+signal_return:
+ ! We can reach here from an interrupt handler,
+ ! so, we need to unblock interrupt.
+ mov.l 1f,r1
+ stc sr,r0
+ and r1,r0
+ ldc r0,sr
+ !
+ mov r15,r4
+ mov #0,r5
+ mov.l 2f,r1
+ mova restore_all,r0
+ jmp @r1
+ lds r0,pr
+ .balign 4
+1: .long 0xefffffff ! BL=0
+2: .long SYMBOL_NAME(do_signal)
+
+!
+!
+!
+ENTRY(ret_from_fork)
+ bra SYMBOL_NAME(ret_from_syscall)
+ add #4,r15 ! pop down bogus r0
+
+!
+! The immediate value of "trapa" indicates the number of arguments
+! placed on the stack.
+!
+system_call:
+ mov #TRA,r2
+ mov.l @r2,r8
+ ! STI
+ mov.l 2f,r1
+ stc sr,r2
+ and r1,r2
+ ldc r2,sr
+ !
+ mov.l __n_sys,r1
+ cmp/ge r1,r0
+ bt badsys
+ !
+ stc ksp,r1 !
+ mov.l __tsk_flags,r0 !
+ add r0,r1 !
+ mov.l @r1,r0 ! Is it trace?
+ tst #PF_TRACESYS,r0
+ bt 6f
+ ! Trace system call
+ mov #-ENOSYS,r1
+ mov.l r1,@(R0,r15)
+ mov.l 3f,r1
+ jsr @r1
+ nop
+ mova 4f,r0
+ bra 7f
+ lds r0,pr
+ !
+6: mova 1f,r0
+ lds r0,pr
+ ! Build the stack frame if TRA > 0
+7: cmp/pl r8
+ bf 9f
+ shll2 r8 ! x4
+ mov #R15,r0
+ mov.l @(r0,r15),r0 ! get original stack
+8: add #-4,r8
+ mov.l @(r0,r8),r1
+ mov.l r1,@-r15
+ cmp/pl r8
+ bt 8b
+ !
+9: mov.l @(SYSCALL_NR,r15),r0
+ shll2 r0 ! x4
+ mov.l __sct,r1
+ add r1,r0
+ mov.l @r0,r1
+ jmp @r1
+ nop
+ .balign 4
+4: mov.l r0,@(R0,r15) ! save the return value
+ mov.l 3f,r1
+ mova SYMBOL_NAME(ret_from_syscall),r0
+ jmp @r1
+ lds r0,pr
+ .balign 4
+3: .long SYMBOL_NAME(syscall_trace)
+2: .long 0xefffffff ! BL=0
+1: mov.l r0,@(R0,r15) ! save the return value
+ /* fall through */
+
+ENTRY(ret_from_syscall)
+ENTRY(ret_from_irq)
+ mov.l __bh_mask,r0
+ mov.l @r0,r1
+ mov.l __bh_active,r0
+ mov.l @r0,r2
+ tst r2,r1
+ bt ret_with_reschedule
+handle_bottom_half:
+ mov.l __dbh,r0
+ jsr @r0
+ nop
+ret_with_reschedule:
+ stc ksp,r1
+ mov.l __minus8192,r0
+ add r0,r1
+ mov.l @(need_resched,r1),r0
+ tst #0xff,r0
+ bf reschedule
+ mov.l @(sigpending,r1),r0
+ tst #0xff,r0
+ bf signal_return
+ !
+ .balign 4
+restore_all:
+ add #4,r15 ! skip syscall number
+ mov.l @r15+,r0
+ mov.l @r15+,r1
+ mov.l @r15+,r2
+ mov.l @r15+,r3
+ mov.l @r15+,r4
+ mov.l @r15+,r5
+ mov.l @r15+,r6
+ mov.l @r15+,r7
+ stc sr,r14
+ mov.l __blrb_flags,r9 ! BL =1, RB=1
+ or r9,r14
+ ldc r14,sr ! here, change the register bank
+ mov.l @r15+,r8
+ mov.l @r15+,r9
+ mov.l @r15+,r10
+ mov.l @r15+,r11
+ mov.l @r15+,r12
+ mov.l @r15+,r13
+ mov.l @r15+,r14
+ mov.l @r15+,k0
+ ldc.l @r15+,gbr
+ lds.l @r15+,mach
+ lds.l @r15+,macl
+ lds.l @r15+,pr
+ ldc.l @r15+,ssr
+ ldc.l @r15+,spc
+ mov k0,r15
+ rte
+ nop
+
+ .balign 4
+__n_sys: .long NR_syscalls
+__sct: .long SYMBOL_NAME(sys_call_table)
+__bh_mask: .long SYMBOL_NAME(bh_mask)
+__bh_active: .long SYMBOL_NAME(bh_active)
+__dbh: .long SYMBOL_NAME(do_bottom_half)
+__blrb_flags: .long 0x30000000
+__minus8192: .long -8192 ! offset from stackbase to tsk
+__tsk_flags: .long flags-8192 ! offset from stackbase to tsk->flags
+
+
+! Exception Vector Base
+!
+! Should be aligned page boundary.
+!
+ .balign 4096,0,4096
+ENTRY(vbr_base)
+ .long 0
+!
+ .balign 256,0,256
+general_exception:
+ mov #EXPEVT,k2
+ mov.l 2f,k3
+ bra handle_exception
+ mov.l @k2,k2
+ .balign 4
+2: .long SYMBOL_NAME(ret_from_syscall)
+!
+!
+ .balign 1024,0,1024
+tlb_miss:
+ mov #EXPEVT,k2
+ mov.l 3f,k3
+ bra handle_exception
+ mov.l @k2,k2
+!
+ .balign 512,0,512
+interrupt:
+ mov #INTEVT,k2
+ mov.l 4f,k3
+ bra handle_exception
+ mov.l @k2,k2
+
+ .balign 4
+3: .long SYMBOL_NAME(ret_from_syscall)
+4: .long SYMBOL_NAME(ret_from_irq)
+
+!
+!
+handle_exception:
+ ! Using k0, k1 for scratch registers (r0_bank1, and r1_bank1),
+ ! save all registers onto stack.
+ !
+ mov.l 2f,k1
+ stc ssr,k0 ! from kernel space?
+ shll k0 ! Check MD bit (bit30)
+ shll k0
+ bt/s 1f ! it's from kernel to kernel transition
+ mov r15,k0 ! save original stack to k0 anyway
+ mov kernel_sp,r15 ! change to kernel stack
+1: stc.l spc,@-r15 ! save control registers
+ stc.l ssr,@-r15
+ sts.l pr,@-r15
+ !
+ lds k3,pr ! Set the return address to pr
+ !
+ sts.l macl,@-r15
+ sts.l mach,@-r15
+ stc.l gbr,@-r15
+ mov.l k0,@-r15 ! save orignal stack, and general registers
+ mov.l r14,@-r15
+ !
+ stc sr,r14 ! back to normal register bank, and
+ and k1,r14 ! ..
+ ldc r14,sr ! ...changed here.
+ !
+ mov.l r13,@-r15
+ mov.l r12,@-r15
+ mov.l r11,@-r15
+ mov.l r10,@-r15
+ mov.l r9,@-r15
+ mov.l r8,@-r15
+ mov.l r7,@-r15
+ mov.l r6,@-r15
+ mov.l r5,@-r15
+ mov.l r4,@-r15
+ mov.l r3,@-r15
+ mov.l r2,@-r15
+ mov.l r1,@-r15
+ mov.l r0,@-r15
+ mov.l r0,@-r15 ! push r0 again (for syscall number)
+ ! Then, dispatch to the handler, according to the excepiton code.
+ stc k_ex_code,r1
+ shlr2 r1
+ shlr r1
+ mov.l 1f,r0
+ add r1,r0
+ mov.l @r0,r0
+ jmp @r0
+ mov.l @r15,r0 ! recovering r0..
+ .balign 4
+1: .long SYMBOL_NAME(exception_handling_table)
+2: .long 0xdfffffff ! RB=0, BL=1
+
+.data
+ENTRY(exception_handling_table)
+ .long 0
+ .long 0
+ .long tlb_miss_load
+ .long tlb_miss_store
+ .long initial_page_write
+ .long tlb_protection_violation_load
+ .long tlb_protection_violation_store
+ .long error ! address_error_load (filled by trap_init)
+ .long error ! address_error_store (filled by trap_init)
+ .long 0
+ .long 0
+ .long system_call ! Unconditional Trap
+ .long error ! reserved_instruction (filled by trap_init)
+ .long error ! illegal_slot_instruction (filled by trap_init)
+ENTRY(nmi_slot)
+ .long error ! Not implemented yet
+ENTRY(user_break_point_trap)
+ .long error ! Not implemented yet
+ENTRY(interrupt_table)
+ ! external hardware
+ .long SYMBOL_NAME(do_IRQ) ! 0000
+ .long SYMBOL_NAME(do_IRQ) ! 0001
+ .long SYMBOL_NAME(do_IRQ) ! 0010
+ .long SYMBOL_NAME(do_IRQ) ! 0011
+ .long SYMBOL_NAME(do_IRQ) ! 0100
+ .long SYMBOL_NAME(do_IRQ) ! 0101
+ .long SYMBOL_NAME(do_IRQ) ! 0110
+ .long SYMBOL_NAME(do_IRQ) ! 0111
+ .long SYMBOL_NAME(do_IRQ) ! 1000
+ .long SYMBOL_NAME(do_IRQ) ! 1001
+ .long SYMBOL_NAME(do_IRQ) ! 1010
+ .long SYMBOL_NAME(do_IRQ) ! 1011
+ .long SYMBOL_NAME(do_IRQ) ! 1100
+ .long SYMBOL_NAME(do_IRQ) ! 1101
+ .long SYMBOL_NAME(do_IRQ) ! 1110
+ .long 0
+ ! Internal hardware
+ .long SYMBOL_NAME(do_IRQ) ! TMU0 tuni0
+ .long SYMBOL_NAME(do_IRQ) ! TMU1 tuni1
+ .long SYMBOL_NAME(do_IRQ) ! TMU2 tuni2
+ .long SYMBOL_NAME(do_IRQ) ! ticpi2
+ .long SYMBOL_NAME(do_IRQ) ! RTC ati
+ .long SYMBOL_NAME(do_IRQ) ! pri
+ .long SYMBOL_NAME(do_IRQ) ! cui
+ .long SYMBOL_NAME(do_IRQ) ! SCI eri
+ .long SYMBOL_NAME(do_IRQ) ! rxi
+ .long SYMBOL_NAME(do_IRQ) ! txi
+ .long SYMBOL_NAME(do_IRQ) ! tei
+ .long SYMBOL_NAME(do_IRQ) ! WDT iti
+ .long SYMBOL_NAME(do_IRQ) ! REF rcmi
+ .long SYMBOL_NAME(do_IRQ) ! rovi
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+
+ENTRY(sys_call_table)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 0 - old "setup()" system call*/
+ .long SYMBOL_NAME(sys_exit)
+ .long SYMBOL_NAME(sys_fork)
+ .long SYMBOL_NAME(sys_read)
+ .long SYMBOL_NAME(sys_write)
+ .long SYMBOL_NAME(sys_open) /* 5 */
+ .long SYMBOL_NAME(sys_close)
+ .long SYMBOL_NAME(sys_waitpid)
+ .long SYMBOL_NAME(sys_creat)
+ .long SYMBOL_NAME(sys_link)
+ .long SYMBOL_NAME(sys_unlink) /* 10 */
+ .long SYMBOL_NAME(sys_execve)
+ .long SYMBOL_NAME(sys_chdir)
+ .long SYMBOL_NAME(sys_time)
+ .long SYMBOL_NAME(sys_mknod)
+ .long SYMBOL_NAME(sys_chmod) /* 15 */
+ .long SYMBOL_NAME(sys_lchown)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old break syscall holder */
+ .long SYMBOL_NAME(sys_stat)
+ .long SYMBOL_NAME(sys_lseek)
+ .long SYMBOL_NAME(sys_getpid) /* 20 */
+ .long SYMBOL_NAME(sys_mount)
+ .long SYMBOL_NAME(sys_oldumount)
+ .long SYMBOL_NAME(sys_setuid)
+ .long SYMBOL_NAME(sys_getuid)
+ .long SYMBOL_NAME(sys_stime) /* 25 */
+ .long SYMBOL_NAME(sys_ptrace)
+ .long SYMBOL_NAME(sys_alarm)
+ .long SYMBOL_NAME(sys_fstat)
+ .long SYMBOL_NAME(sys_pause)
+ .long SYMBOL_NAME(sys_utime) /* 30 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old stty syscall holder */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old gtty syscall holder */
+ .long SYMBOL_NAME(sys_access)
+ .long SYMBOL_NAME(sys_nice)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 35 */ /* old ftime syscall holder */
+ .long SYMBOL_NAME(sys_sync)
+ .long SYMBOL_NAME(sys_kill)
+ .long SYMBOL_NAME(sys_rename)
+ .long SYMBOL_NAME(sys_mkdir)
+ .long SYMBOL_NAME(sys_rmdir) /* 40 */
+ .long SYMBOL_NAME(sys_dup)
+ .long SYMBOL_NAME(sys_pipe)
+ .long SYMBOL_NAME(sys_times)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old prof syscall holder */
+ .long SYMBOL_NAME(sys_brk) /* 45 */
+ .long SYMBOL_NAME(sys_setgid)
+ .long SYMBOL_NAME(sys_getgid)
+ .long SYMBOL_NAME(sys_signal)
+ .long SYMBOL_NAME(sys_geteuid)
+ .long SYMBOL_NAME(sys_getegid) /* 50 */
+ .long SYMBOL_NAME(sys_acct)
+ .long SYMBOL_NAME(sys_umount) /* recycled never used phys() */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old lock syscall holder */
+ .long SYMBOL_NAME(sys_ioctl)
+ .long SYMBOL_NAME(sys_fcntl) /* 55 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old mpx syscall holder */
+ .long SYMBOL_NAME(sys_setpgid)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old ulimit syscall holder */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_olduname */
+ .long SYMBOL_NAME(sys_umask) /* 60 */
+ .long SYMBOL_NAME(sys_chroot)
+ .long SYMBOL_NAME(sys_ustat)
+ .long SYMBOL_NAME(sys_dup2)
+ .long SYMBOL_NAME(sys_getppid)
+ .long SYMBOL_NAME(sys_getpgrp) /* 65 */
+ .long SYMBOL_NAME(sys_setsid)
+ .long SYMBOL_NAME(sys_sigaction)
+ .long SYMBOL_NAME(sys_sgetmask)
+ .long SYMBOL_NAME(sys_ssetmask)
+ .long SYMBOL_NAME(sys_setreuid) /* 70 */
+ .long SYMBOL_NAME(sys_setregid)
+ .long SYMBOL_NAME(sys_sigsuspend)
+ .long SYMBOL_NAME(sys_sigpending)
+ .long SYMBOL_NAME(sys_sethostname)
+ .long SYMBOL_NAME(sys_setrlimit) /* 75 */
+ .long SYMBOL_NAME(sys_getrlimit)
+ .long SYMBOL_NAME(sys_getrusage)
+ .long SYMBOL_NAME(sys_gettimeofday)
+ .long SYMBOL_NAME(sys_settimeofday)
+ .long SYMBOL_NAME(sys_getgroups) /* 80 */
+ .long SYMBOL_NAME(sys_setgroups)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old_select */
+ .long SYMBOL_NAME(sys_symlink)
+ .long SYMBOL_NAME(sys_lstat)
+ .long SYMBOL_NAME(sys_readlink) /* 85 */
+ .long SYMBOL_NAME(sys_uselib)
+ .long SYMBOL_NAME(sys_swapon)
+ .long SYMBOL_NAME(sys_reboot)
+ .long SYMBOL_NAME(old_readdir)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old_mmap */ /* 90 */
+ .long SYMBOL_NAME(sys_munmap)
+ .long SYMBOL_NAME(sys_truncate)
+ .long SYMBOL_NAME(sys_ftruncate)
+ .long SYMBOL_NAME(sys_fchmod)
+ .long SYMBOL_NAME(sys_fchown) /* 95 */
+ .long SYMBOL_NAME(sys_getpriority)
+ .long SYMBOL_NAME(sys_setpriority)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old profil syscall holder */
+ .long SYMBOL_NAME(sys_statfs)
+ .long SYMBOL_NAME(sys_fstatfs) /* 100 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* ioperm */
+ .long SYMBOL_NAME(sys_socketcall)
+ .long SYMBOL_NAME(sys_syslog)
+ .long SYMBOL_NAME(sys_setitimer)
+ .long SYMBOL_NAME(sys_getitimer) /* 105 */
+ .long SYMBOL_NAME(sys_newstat)
+ .long SYMBOL_NAME(sys_newlstat)
+ .long SYMBOL_NAME(sys_newfstat)
+ .long SYMBOL_NAME(sys_uname)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 110 */ /* iopl */
+ .long SYMBOL_NAME(sys_vhangup)
+ .long SYMBOL_NAME(sys_ni_syscall) /* idle */
+ .long SYMBOL_NAME(sys_ni_syscall) /* vm86old */
+ .long SYMBOL_NAME(sys_wait4)
+ .long SYMBOL_NAME(sys_swapoff) /* 115 */
+ .long SYMBOL_NAME(sys_sysinfo)
+ .long SYMBOL_NAME(sys_ipc)
+ .long SYMBOL_NAME(sys_fsync)
+ .long SYMBOL_NAME(sys_sigreturn)
+ .long SYMBOL_NAME(sys_clone) /* 120 */
+ .long SYMBOL_NAME(sys_setdomainname)
+ .long SYMBOL_NAME(sys_newuname)
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_modify_ldt */
+ .long SYMBOL_NAME(sys_adjtimex)
+ .long SYMBOL_NAME(sys_mprotect) /* 125 */
+ .long SYMBOL_NAME(sys_sigprocmask)
+ .long SYMBOL_NAME(sys_create_module)
+ .long SYMBOL_NAME(sys_init_module)
+ .long SYMBOL_NAME(sys_delete_module)
+ .long SYMBOL_NAME(sys_get_kernel_syms) /* 130 */
+ .long SYMBOL_NAME(sys_quotactl)
+ .long SYMBOL_NAME(sys_getpgid)
+ .long SYMBOL_NAME(sys_fchdir)
+ .long SYMBOL_NAME(sys_bdflush)
+ .long SYMBOL_NAME(sys_sysfs) /* 135 */
+ .long SYMBOL_NAME(sys_personality)
+ .long SYMBOL_NAME(sys_ni_syscall) /* for afs_syscall */
+ .long SYMBOL_NAME(sys_setfsuid)
+ .long SYMBOL_NAME(sys_setfsgid)
+ .long SYMBOL_NAME(sys_llseek) /* 140 */
+ .long SYMBOL_NAME(sys_getdents)
+ .long SYMBOL_NAME(sys_select)
+ .long SYMBOL_NAME(sys_flock)
+ .long SYMBOL_NAME(sys_msync)
+ .long SYMBOL_NAME(sys_readv) /* 145 */
+ .long SYMBOL_NAME(sys_writev)
+ .long SYMBOL_NAME(sys_getsid)
+ .long SYMBOL_NAME(sys_fdatasync)
+ .long SYMBOL_NAME(sys_sysctl)
+ .long SYMBOL_NAME(sys_mlock) /* 150 */
+ .long SYMBOL_NAME(sys_munlock)
+ .long SYMBOL_NAME(sys_mlockall)
+ .long SYMBOL_NAME(sys_munlockall)
+ .long SYMBOL_NAME(sys_sched_setparam)
+ .long SYMBOL_NAME(sys_sched_getparam) /* 155 */
+ .long SYMBOL_NAME(sys_sched_setscheduler)
+ .long SYMBOL_NAME(sys_sched_getscheduler)
+ .long SYMBOL_NAME(sys_sched_yield)
+ .long SYMBOL_NAME(sys_sched_get_priority_max)
+ .long SYMBOL_NAME(sys_sched_get_priority_min) /* 160 */
+ .long SYMBOL_NAME(sys_sched_rr_get_interval)
+ .long SYMBOL_NAME(sys_nanosleep)
+ .long SYMBOL_NAME(sys_mremap)
+ .long SYMBOL_NAME(sys_setresuid)
+ .long SYMBOL_NAME(sys_getresuid) /* 165 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* vm86 */
+ .long SYMBOL_NAME(sys_query_module)
+ .long SYMBOL_NAME(sys_poll)
+ .long SYMBOL_NAME(sys_nfsservctl)
+ .long SYMBOL_NAME(sys_setresgid) /* 170 */
+ .long SYMBOL_NAME(sys_getresgid)
+ .long SYMBOL_NAME(sys_prctl)
+ .long SYMBOL_NAME(sys_rt_sigreturn)
+ .long SYMBOL_NAME(sys_rt_sigaction)
+ .long SYMBOL_NAME(sys_rt_sigprocmask) /* 175 */
+ .long SYMBOL_NAME(sys_rt_sigpending)
+ .long SYMBOL_NAME(sys_rt_sigtimedwait)
+ .long SYMBOL_NAME(sys_rt_sigqueueinfo)
+ .long SYMBOL_NAME(sys_rt_sigsuspend)
+ .long SYMBOL_NAME(sys_pread) /* 180 */
+ .long SYMBOL_NAME(sys_pwrite)
+ .long SYMBOL_NAME(sys_chown)
+ .long SYMBOL_NAME(sys_getcwd)
+ .long SYMBOL_NAME(sys_capget)
+ .long SYMBOL_NAME(sys_capset) /* 185 */
+ .long SYMBOL_NAME(sys_sigaltstack)
+ .long SYMBOL_NAME(sys_sendfile)
+ .long SYMBOL_NAME(sys_ni_syscall) /* streams1 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */
+ .long SYMBOL_NAME(sys_vfork) /* 190 */
+
+ /*
+ * NOTE!! This doesn't have to be exact - we just have
+ * to make sure we have _enough_ of the "sys_ni_syscall"
+ * entries. Don't panic if you notice that this hasn't
+ * been shrunk every time we add a new system call.
+ */
+ .rept NR_syscalls-190
+ .long SYMBOL_NAME(sys_ni_syscall)
+ .endr
+
+/* End of entry.S */
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
new file mode 100644
index 000000000..ed466ba38
--- /dev/null
+++ b/arch/sh/kernel/head.S
@@ -0,0 +1,69 @@
+/* $Id$
+ *
+ * arch/sh/kernel/head.S
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Head.S contains the SH exception handlers and startup code.
+ */
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/linkage.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#ifdef CONFIG_CPU_SH3
+/* Following values are assumed to be as small as immediate. */
+#define CCR 0xffffffec /* Address of Cache Control Register */
+#define CACHE_INIT 0x00000009 /* 8k-byte cache, flush, enable */
+#elif CONFIG_CPU_SH4
+/* Should fill here. */
+#endif
+
+ENTRY(_stext)
+ ! Switch to register bank 0
+ stc sr,r1 !
+ mov.l 1f,r0 ! RB=0, BL=1
+ and r1,r0
+ ldc r0,sr
+ ! Enable cache
+#ifdef CONFIG_CPU_SH3
+ mov #CCR,r1
+ mov.l @r1,r0
+ cmp/eq #1,r0 ! If it's enabled already, don't flush it
+ bt/s 8f
+ mov #CACHE_INIT,r0
+ mov.l r0,@r1
+#elif CONFIG_CPU_SH4
+ ! Should fill here.
+#endif
+8:
+ !
+ mov.l 2f,r0
+ mov r0,r15 ! Set initial r15 (stack pointer)
+ ldc r0,r4_bank ! and stack base
+ ! Clear BSS area
+ mov.l 3f,r1
+ mov.l 4f,r2
+ mov #0,r0
+9: mov.l r0,@r1
+ cmp/hs r2,r1
+ bf/s 9b
+ add #4,r1
+ ! Start kernel
+ mov.l 5f,r0
+ jmp @r0
+ nop
+
+ .balign 4
+1: .long 0xdfffffff ! RB=0, BL=1
+2: .long SYMBOL_NAME(stack)
+3: .long SYMBOL_NAME(__bss_start)
+4: .long SYMBOL_NAME(_end)
+5: .long SYMBOL_NAME(start_kernel)
+
+.data
diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c
new file mode 100644
index 000000000..aacd8f304
--- /dev/null
+++ b/arch/sh/kernel/init_task.c
@@ -0,0 +1,23 @@
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+static struct vm_area_struct init_mmap = INIT_MMAP;
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS;
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+/*
+ * Initial task structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union task_union init_task_union
+ __attribute__((__section__(".data.init_task"))) =
+ { INIT_TASK(init_task_union.task) };
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
new file mode 100644
index 000000000..f75af5003
--- /dev/null
+++ b/arch/sh/kernel/irq.c
@@ -0,0 +1,485 @@
+/*
+ * linux/arch/sh/kernel/irq.c
+ *
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ *
+ * SuperH version: Copyright (C) 1999 Niibe Yutaka
+ */
+
+/*
+ * IRQs are in fact implemented a bit like signal handlers for the kernel.
+ * Naturally it's not a 1:1 relation, but there are similarities.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/malloc.h>
+#include <linux/random.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/bitops.h>
+#include <asm/smp.h>
+#include <asm/pgtable.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <linux/irq.h>
+
+
+unsigned int local_bh_count[NR_CPUS];
+unsigned int local_irq_count[NR_CPUS];
+
+/*
+ * Micro-access to controllers is serialized over the whole
+ * system. We never hold this lock when we call the actual
+ * IRQ handler.
+ */
+spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED;
+/*
+ * Controller mappings for all interrupt sources:
+ */
+irq_desc_t irq_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }};
+
+/*
+ * Special irq handlers.
+ */
+
+void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
+
+/*
+ * Generic, controller-independent functions:
+ */
+
+int get_irq_list(char *buf)
+{
+ int i, j;
+ struct irqaction * action;
+ char *p = buf;
+
+ p += sprintf(p, " ");
+ for (j=0; j<smp_num_cpus; j++)
+ p += sprintf(p, "CPU%d ",j);
+ *p++ = '\n';
+
+ for (i = 0 ; i < NR_IRQS ; i++) {
+ action = irq_desc[i].action;
+ if (!action)
+ continue;
+ p += sprintf(p, "%3d: ",i);
+ p += sprintf(p, "%10u ", kstat_irqs(i));
+ p += sprintf(p, " %14s", irq_desc[i].handler->typename);
+ p += sprintf(p, " %s", action->name);
+
+ for (action=action->next; action; action = action->next) {
+ p += sprintf(p, ", %s", action->name);
+ }
+ *p++ = '\n';
+ }
+ return p - buf;
+}
+
+/*
+ * This should really return information about whether
+ * we should do bottom half handling etc. Right now we
+ * end up _always_ checking the bottom half, which is a
+ * waste of time and is not what some drivers would
+ * prefer.
+ */
+int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
+{
+ int status;
+ int cpu = smp_processor_id();
+
+ irq_enter(cpu, irq);
+
+ status = 1; /* Force the "do bottom halves" bit */
+
+ if (!(action->flags & SA_INTERRUPT))
+ __sti();
+
+ do {
+ status |= action->flags;
+ action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ } while (action);
+ if (status & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+ __cli();
+
+ irq_exit(cpu, irq);
+
+ return status;
+}
+
+/*
+ * Generic enable/disable code: this just calls
+ * down into the PIC-specific version for the actual
+ * hardware disable after having gotten the irq
+ * controller lock.
+ */
+void disable_irq_nosync(unsigned int irq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ if (!irq_desc[irq].depth++) {
+ irq_desc[irq].status |= IRQ_DISABLED;
+ irq_desc[irq].handler->disable(irq);
+ }
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+
+/*
+ * Synchronous version of the above, making sure the IRQ is
+ * no longer running on any other IRQ..
+ */
+void disable_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+
+ if (!local_irq_count[smp_processor_id()]) {
+ do {
+ barrier();
+ } while (irq_desc[irq].status & IRQ_INPROGRESS);
+ }
+}
+
+void enable_irq(unsigned int irq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ switch (irq_desc[irq].depth) {
+ case 1: {
+ unsigned int status = irq_desc[irq].status & ~IRQ_DISABLED;
+ irq_desc[irq].status = status;
+ if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+ irq_desc[irq].status = status | IRQ_REPLAY;
+ hw_resend_irq(irq_desc[irq].handler,irq);
+ }
+ irq_desc[irq].handler->enable(irq);
+ /* fall-through */
+ }
+ default:
+ irq_desc[irq].depth--;
+ break;
+ case 0:
+ printk("enable_irq() unbalanced from %p\n",
+ __builtin_return_address(0));
+ }
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+
+/*
+ * do_IRQ handles all normal device IRQ's.
+ */
+asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ /*
+ * We ack quickly, we don't want the irq controller
+ * thinking we're snobs just because some other CPU has
+ * disabled global interrupts (we have already done the
+ * INT_ACK cycles, it's too late to try to pretend to the
+ * controller that we aren't taking the interrupt).
+ *
+ * 0 return value means that this irq is already being
+ * handled by some other CPU. (or is disabled)
+ */
+ int irq;
+ int cpu = smp_processor_id();
+ irq_desc_t *desc;
+ struct irqaction * action;
+ unsigned int status;
+
+ /* Get IRQ number */
+ asm volatile("stc r2_bank,%0\n\t"
+ "shlr2 %0\n\t"
+ "shlr2 %0\n\t"
+ "shlr %0\n\t"
+ "add #-16,%0\n\t"
+ :"=z" (irq));
+
+ kstat.irqs[cpu][irq]++;
+ desc = irq_desc + irq;
+ spin_lock(&irq_controller_lock);
+ irq_desc[irq].handler->ack(irq);
+ /*
+ REPLAY is when Linux resends an IRQ that was dropped earlier
+ WAITING is used by probe to mark irqs that are being tested
+ */
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
+ status |= IRQ_PENDING; /* we _want_ to handle it */
+
+ /*
+ * If the IRQ is disabled for whatever reason, we cannot
+ * use the action we have.
+ */
+ action = NULL;
+ if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
+ action = desc->action;
+ status &= ~IRQ_PENDING; /* we commit to handling */
+ status |= IRQ_INPROGRESS; /* we are handling it */
+ }
+ desc->status = status;
+ spin_unlock(&irq_controller_lock);
+
+ /*
+ * If there is no IRQ handler or it was disabled, exit early.
+ Since we set PENDING, if another processor is handling
+ a different instance of this same irq, the other processor
+ will take care of it.
+ */
+ if (!action)
+ return 1;
+
+ /*
+ * Edge triggered interrupts need to remember
+ * pending events.
+ * This applies to any hw interrupts that allow a second
+ * instance of the same irq to arrive while we are in do_IRQ
+ * or in the handler. But the code here only handles the _second_
+ * instance of the irq, not the third or fourth. So it is mostly
+ * useful for irq hardware that does not mask cleanly in an
+ * SMP environment.
+ */
+ for (;;) {
+ handle_IRQ_event(irq, &regs, action);
+ spin_lock(&irq_controller_lock);
+
+ if (!(desc->status & IRQ_PENDING))
+ break;
+ desc->status &= ~IRQ_PENDING;
+ spin_unlock(&irq_controller_lock);
+ }
+ desc->status &= ~IRQ_INPROGRESS;
+ if (!(desc->status & IRQ_DISABLED)){
+ irq_desc[irq].handler->end(irq);
+ }
+ spin_unlock(&irq_controller_lock);
+
+ /*
+ * This should be conditional: we should really get
+ * a return code from the irq handler to tell us
+ * whether the handler wants us to do software bottom
+ * half handling or not..
+ */
+ if (1) {
+ if (bh_active & bh_mask)
+ do_bottom_half();
+ }
+ return 1;
+}
+
+int request_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags,
+ const char * devname,
+ void *dev_id)
+{
+ int retval;
+ struct irqaction * action;
+
+ if (irq >= NR_IRQS)
+ return -EINVAL;
+ if (!handler)
+ return -EINVAL;
+
+ action = (struct irqaction *)
+ kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = irqflags;
+ action->mask = 0;
+ action->name = devname;
+ action->next = NULL;
+ action->dev_id = dev_id;
+
+ retval = setup_irq(irq, action);
+ if (retval)
+ kfree(action);
+ return retval;
+}
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ struct irqaction **p;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS)
+ return;
+
+ spin_lock_irqsave(&irq_controller_lock,flags);
+ p = &irq_desc[irq].action;
+ for (;;) {
+ struct irqaction * action = *p;
+ if (action) {
+ struct irqaction **pp = p;
+ p = &action->next;
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found it - now remove it from the list of entries */
+ *pp = action->next;
+ if (irq_desc[irq].action)
+ break;
+ irq_desc[irq].status |= IRQ_DISABLED;
+ irq_desc[irq].handler->shutdown(irq);
+ break;
+ }
+ printk("Trying to free free IRQ%d\n",irq);
+ break;
+ }
+ spin_unlock_irqrestore(&irq_controller_lock,flags);
+}
+
+/*
+ * IRQ autodetection code..
+ *
+ * This depends on the fact that any interrupt that
+ * comes in on to an unassigned handler will get stuck
+ * with "IRQ_WAITING" cleared and the interrupt
+ * disabled.
+ */
+unsigned long probe_irq_on(void)
+{
+ unsigned int i;
+ unsigned long delay;
+
+ /*
+ * first, enable any unassigned irqs
+ */
+ spin_lock_irq(&irq_controller_lock);
+ for (i = NR_IRQS-1; i > 0; i--) {
+ if (!irq_desc[i].action) {
+ irq_desc[i].status |= IRQ_AUTODETECT | IRQ_WAITING;
+ if(irq_desc[i].handler->startup(i))
+ irq_desc[i].status |= IRQ_PENDING;
+ }
+ }
+ spin_unlock_irq(&irq_controller_lock);
+
+ /*
+ * Wait for spurious interrupts to trigger
+ */
+ for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
+ /* about 100ms delay */ synchronize_irq();
+
+ /*
+ * Now filter out any obviously spurious interrupts
+ */
+ spin_lock_irq(&irq_controller_lock);
+ for (i=0; i<NR_IRQS; i++) {
+ unsigned int status = irq_desc[i].status;
+
+ if (!(status & IRQ_AUTODETECT))
+ continue;
+
+ /* It triggered already - consider it spurious. */
+ if (!(status & IRQ_WAITING)) {
+ irq_desc[i].status = status & ~IRQ_AUTODETECT;
+ irq_desc[i].handler->shutdown(i);
+ }
+ }
+ spin_unlock_irq(&irq_controller_lock);
+
+ return 0x12345678;
+}
+
+int probe_irq_off(unsigned long unused)
+{
+ int i, irq_found, nr_irqs;
+
+ if (unused != 0x12345678)
+ printk("Bad IRQ probe from %lx\n", (&unused)[-1]);
+
+ nr_irqs = 0;
+ irq_found = 0;
+ spin_lock_irq(&irq_controller_lock);
+ for (i=0; i<NR_IRQS; i++) {
+ unsigned int status = irq_desc[i].status;
+
+ if (!(status & IRQ_AUTODETECT))
+ continue;
+
+ if (!(status & IRQ_WAITING)) {
+ if (!nr_irqs)
+ irq_found = i;
+ nr_irqs++;
+ }
+ irq_desc[i].status = status & ~IRQ_AUTODETECT;
+ irq_desc[i].handler->shutdown(i);
+ }
+ spin_unlock_irq(&irq_controller_lock);
+
+ if (nr_irqs > 1)
+ irq_found = -irq_found;
+ return irq_found;
+}
+
+int setup_irq(unsigned int irq, struct irqaction * new)
+{
+ int shared = 0;
+ struct irqaction *old, **p;
+ unsigned long flags;
+
+ /*
+ * Some drivers like serial.c use request_irq() heavily,
+ * so we have to be careful not to interfere with a
+ * running system.
+ */
+ if (new->flags & SA_SAMPLE_RANDOM) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
+ rand_initialize_irq(irq);
+ }
+
+ /*
+ * The following block of code has to be executed atomically
+ */
+ spin_lock_irqsave(&irq_controller_lock,flags);
+ p = &irq_desc[irq].action;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ)) {
+ spin_unlock_irqrestore(&irq_controller_lock,flags);
+ return -EBUSY;
+ }
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ *p = new;
+
+ if (!shared) {
+ irq_desc[irq].depth = 0;
+ irq_desc[irq].status &= ~IRQ_DISABLED;
+ irq_desc[irq].handler->startup(irq);
+ }
+ spin_unlock_irqrestore(&irq_controller_lock,flags);
+ return 0;
+}
diff --git a/arch/sh/kernel/irq_onchip.c b/arch/sh/kernel/irq_onchip.c
new file mode 100644
index 000000000..2eae049e5
--- /dev/null
+++ b/arch/sh/kernel/irq_onchip.c
@@ -0,0 +1,168 @@
+/*
+ * linux/arch/sh/kernel/irq_onchip.c
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * Interrupt handling for on-chip supporting modules (TMU, RTC, etc.).
+ *
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/malloc.h>
+#include <linux/random.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/smp.h>
+#include <asm/pgtable.h>
+#include <asm/delay.h>
+
+#include <linux/irq.h>
+
+
+/*
+ * SH (non-)specific no controller code
+ */
+
+static void enable_none(unsigned int irq) { }
+static unsigned int startup_none(unsigned int irq) { return 0; }
+static void disable_none(unsigned int irq) { }
+static void ack_none(unsigned int irq)
+{
+}
+
+/* startup is the same as "enable", shutdown is same as "disable" */
+#define shutdown_none disable_none
+#define end_none enable_none
+
+struct hw_interrupt_type no_irq_type = {
+ "none",
+ startup_none,
+ shutdown_none,
+ enable_none,
+ disable_none,
+ ack_none,
+ end_none
+};
+
+struct ipr_data {
+ int offset;
+ int priority;
+};
+static struct ipr_data ipr_data[NR_IRQS-TIMER_IRQ];
+
+void set_ipr_data(unsigned int irq, int offset, int priority)
+{
+ ipr_data[irq-TIMER_IRQ].offset = offset;
+ ipr_data[irq-TIMER_IRQ].priority = priority;
+}
+
+static void enable_onChip_irq(unsigned int irq);
+void disable_onChip_irq(unsigned int irq);
+
+/* shutdown is same as "disable" */
+#define shutdown_onChip_irq disable_onChip_irq
+
+static void mask_and_ack_onChip(unsigned int);
+static void end_onChip_irq(unsigned int irq);
+
+static unsigned int startup_onChip_irq(unsigned int irq)
+{
+ enable_onChip_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static struct hw_interrupt_type onChip_irq_type = {
+ "On-Chip Supporting Module",
+ startup_onChip_irq,
+ shutdown_onChip_irq,
+ enable_onChip_irq,
+ disable_onChip_irq,
+ mask_and_ack_onChip,
+ end_onChip_irq
+};
+
+/*
+ * These have to be protected by the irq controller spinlock
+ * before being called.
+ *
+ *
+ * IPRA 15-12 11-8 7-4 3-0
+ * IPRB 15-12 11-8 7-4 3-0
+ * IPRC 15-12 11-8 7-4 3-0
+ *
+ */
+#define INTC_IPR 0xfffffee2UL /* Word access */
+
+void disable_onChip_irq(unsigned int irq)
+{
+ /* Set priority in IPR to 0 */
+ int offset = ipr_data[irq-TIMER_IRQ].offset;
+ unsigned long intc_ipr_address = INTC_IPR + offset/16;
+ unsigned short mask = 0xffff ^ (0xf << (offset%16));
+ unsigned long __dummy;
+
+ asm volatile("mov.w @%1,%0\n\t"
+ "and %2,%0\n\t"
+ "mov.w %0,@%1"
+ : "=&z" (__dummy)
+ : "r" (intc_ipr_address), "r" (mask)
+ : "memory" );
+}
+
+static void enable_onChip_irq(unsigned int irq)
+{
+ /* Set priority in IPR back to original value */
+ int offset = ipr_data[irq-TIMER_IRQ].offset;
+ int priority = ipr_data[irq-TIMER_IRQ].priority;
+ unsigned long intc_ipr_address = INTC_IPR + offset/16;
+ unsigned short value = (priority << (offset%16));
+ unsigned long __dummy;
+
+ asm volatile("mov.w @%1,%0\n\t"
+ "or %2,%0\n\t"
+ "mov.w %0,@%1"
+ : "=&z" (__dummy)
+ : "r" (intc_ipr_address), "r" (value)
+ : "memory" );
+}
+
+void make_onChip_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+ irq_desc[irq].handler = &onChip_irq_type;
+ enable_irq(irq);
+}
+
+static void mask_and_ack_onChip(unsigned int irq)
+{
+ disable_onChip_irq(irq);
+ sti();
+}
+
+static void end_onChip_irq(unsigned int irq)
+{
+ enable_onChip_irq(irq);
+ cli();
+}
+
+void __init init_IRQ(void)
+{
+ int i;
+
+ for (i = TIMER_IRQ; i < NR_IRQS; i++) {
+ irq_desc[i].handler = &onChip_irq_type;
+ }
+}
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
new file mode 100644
index 000000000..744da694b
--- /dev/null
+++ b/arch/sh/kernel/process.c
@@ -0,0 +1,303 @@
+/*
+ * linux/arch/sh/kernel/process.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * SuperH version: Copyright (C) 1999 Niibe Yutaka
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#define __KERNEL_SYSCALLS__
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <linux/vmalloc.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/unistd.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/elf.h>
+
+#include <linux/irq.h>
+
+static int hlt_counter=0;
+
+#define HARD_IDLE_TIMEOUT (HZ / 3)
+
+void disable_hlt(void)
+{
+ hlt_counter++;
+}
+
+void enable_hlt(void)
+{
+ hlt_counter--;
+}
+
+/*
+ * The idle loop on a uniprocessor i386..
+ */
+void cpu_idle(void *unused)
+{
+ /* endless idle loop with no priority at all */
+ init_idle();
+ current->priority = 0;
+ current->counter = -100;
+
+ while (1) {
+ while (!current->need_resched) {
+ if (hlt_counter)
+ continue;
+ __sti();
+ asm volatile("sleep" : : : "memory");
+ }
+ schedule();
+ check_pgt_cache();
+ }
+}
+
+void machine_restart(char * __unused)
+{ /* Need to set MMU_TTB?? */
+}
+
+void machine_halt(void)
+{
+}
+
+void machine_power_off(void)
+{
+}
+
+void show_regs(struct pt_regs * regs)
+{
+ printk("\n");
+ printk("PC: [<%08lx>]", regs->pc);
+ printk(" SP: %08lx", regs->u_regs[UREG_SP]);
+ printk(" SR: %08lx\n", regs->sr);
+ printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
+ regs->u_regs[0],regs->u_regs[1],
+ regs->u_regs[2],regs->u_regs[3]);
+ printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
+ regs->u_regs[4],regs->u_regs[5],
+ regs->u_regs[6],regs->u_regs[7]);
+ printk("R8 : %08lx R9 : %08lx R10: %08lx R11: %08lx\n",
+ regs->u_regs[8],regs->u_regs[9],
+ regs->u_regs[10],regs->u_regs[11]);
+ printk("R12: %08lx R13: %08lx R14: %08lx\n",
+ regs->u_regs[12],regs->u_regs[13],
+ regs->u_regs[14]);
+ printk("MACH: %08lx MACL: %08lx GBR: %08lx PR: %08lx",
+ regs->mach, regs->macl, regs->gbr, regs->pr);
+}
+
+struct task_struct * alloc_task_struct(void)
+{
+ /* Get two pages */
+ return (struct task_struct *) __get_free_pages(GFP_KERNEL,1);
+}
+
+void free_task_struct(struct task_struct *p)
+{
+ free_pages((unsigned long) p, 1);
+}
+
+/*
+ * Create a kernel thread
+ */
+
+/*
+ * This is the mechanism for creating a new kernel thread.
+ *
+ * NOTE! Only a kernel-only process(ie the swapper or direct descendants
+ * who haven't done an "execve()") should use this: it will work within
+ * a system call from a "real" process, but the process memory space will
+ * not be free'd until both the parent and the child have exited.
+ */
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{ /* Don't use this in BL=1(cli). Or else, CPU resets! */
+ register unsigned long __sc0 __asm__ ("r0") = __NR_clone;
+ register unsigned long __sc4 __asm__ ("r4") = (long) flags | CLONE_VM;
+ register unsigned long __sc5 __asm__ ("r5") = 0;
+ register unsigned long __sc8 __asm__ ("r8") = (long) arg;
+ register unsigned long __sc9 __asm__ ("r9") = (long) fn;
+ __asm__ __volatile__(
+ "trapa #0\n\t" /* Linux/SH system call */
+ "tst #0xff,r0\n\t" /* child or parent? */
+ "bf 1f\n\t" /* parent - jump */
+ "jsr @r9\n\t" /* call fn */
+ " mov r8,r4\n\t" /* push argument */
+ "mov r0,r4\n\t" /* return value to arg of exit */
+ "mov %2,r0\n\t" /* exit */
+ "trapa #0\n"
+ "1:"
+ :"=z" (__sc0)
+ :"0" (__sc0), "i" (__NR_exit),
+ "r" (__sc4), "r" (__sc5), "r" (__sc8), "r" (__sc9)
+ :"memory");
+ return __sc0;
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+ /* nothing to do ... */
+}
+
+void flush_thread(void)
+{
+ /* do nothing */
+ /* Possibly, set clear debug registers */
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+ /* do nothing */
+}
+
+/* Fill in the fpu structure for a core dump.. */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
+{
+ return 0; /* Task didn't use the fpu at all. */
+}
+
+asmlinkage void ret_from_fork(void);
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+ struct task_struct *p, struct pt_regs *regs)
+{
+ struct pt_regs *childregs;
+
+ childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long) p)) - 1;
+
+ *childregs = *regs;
+ if (user_mode(regs)) {
+ childregs->u_regs[UREG_SP] = usp;
+ } else {
+ childregs->u_regs[UREG_SP] = (unsigned long)p+2*PAGE_SIZE;
+ }
+ childregs->u_regs[0] = 0; /* Set return value for child */
+
+ p->thread.sp = (unsigned long) childregs;
+ p->thread.pc = (unsigned long) ret_from_fork;
+ if (p->mm)
+ p->mm->context = NO_CONTEXT;
+
+ return 0;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+/* changed the size calculations - should hopefully work better. lbt */
+ dump->magic = CMAGIC;
+ dump->start_code = 0;
+ dump->start_stack = regs->u_regs[UREG_SP] & ~(PAGE_SIZE - 1);
+ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
+ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
+ dump->u_dsize -= dump->u_tsize;
+ dump->u_ssize = 0;
+ /* Debug registers will come here. */
+
+ if (dump->start_stack < TASK_SIZE)
+ dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
+
+ dump->regs = *regs;
+}
+
+/*
+ * switch_to(x,y) should switch tasks from x to y.
+ *
+ */
+void __switch_to(struct task_struct *prev, struct task_struct *next)
+{
+ /*
+ * Restore the kernel stack onto kernel mode register
+ * k4 (r4_bank1)
+ */
+ asm volatile("ldc %0,r4_bank"
+ : /* no output */
+ :"r" ((unsigned long)next+8192));
+}
+
+asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ return do_fork(SIGCHLD, regs.u_regs[UREG_SP], &regs);
+}
+
+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ if (!newsp)
+ newsp = regs.u_regs[UREG_SP];
+ return do_fork(clone_flags, newsp, &regs);
+}
+
+/*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
+ regs.u_regs[UREG_SP], &regs);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int sys_execve(char *ufilename, char **uargv,
+ char **uenvp, unsigned long r7,
+ struct pt_regs regs)
+{
+ int error;
+ char *filename;
+
+ lock_kernel();
+ filename = getname(ufilename);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename, uargv, uenvp, &regs);
+ if (error == 0)
+ current->flags &= ~PF_DTRACE;
+ putname(filename);
+out:
+ unlock_kernel();
+ return error;
+}
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
new file mode 100644
index 000000000..2d69b5b7c
--- /dev/null
+++ b/arch/sh/kernel/ptrace.c
@@ -0,0 +1,476 @@
+/*
+ * Surely this doesn't work... (we need to design ptrace for SupreH)
+ * linux/arch/sh/kernel/ptrace.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+/* determines which flags the user has access to. */
+/* 1 = access 0 = no access */
+#define FLAG_MASK 0x00044dd5
+
+/* set's the trap flag. */
+#define TRAP_FLAG 0x100
+
+/*
+ * Offset of eflags on child stack..
+ */
+#define EFL_OFFSET ((EFL-2)*4-sizeof(struct pt_regs))
+
+/*
+ * this routine will get a word off of the processes privileged stack.
+ * the offset is how far from the base addr as stored in the TSS.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline int get_stack_long(struct task_struct *task, int offset)
+{
+ unsigned char *stack;
+
+ stack = (unsigned char *)task->thread.sp;
+ stack += offset;
+ return (*((int *)stack));
+}
+
+/*
+ * this routine will put a word on the processes privileged stack.
+ * the offset is how far from the base addr as stored in the TSS.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline int put_stack_long(struct task_struct *task, int offset,
+ unsigned long data)
+{
+ unsigned char * stack;
+
+ stack = (unsigned char *) task->thread.sp;
+ stack += offset;
+ *(unsigned long *) stack = data;
+ return 0;
+}
+
+static int putreg(struct task_struct *child,
+ unsigned long regno, unsigned long value)
+{
+#if 0
+ switch (regno >> 2) {
+ case ORIG_EAX:
+ return -EIO;
+ case FS:
+ if (value && (value & 3) != 3)
+ return -EIO;
+ child->thread.fs = value;
+ return 0;
+ case GS:
+ if (value && (value & 3) != 3)
+ return -EIO;
+ child->thread.gs = value;
+ return 0;
+ case DS:
+ case ES:
+ if (value && (value & 3) != 3)
+ return -EIO;
+ value &= 0xffff;
+ break;
+ case SS:
+ case CS:
+ if ((value & 3) != 3)
+ return -EIO;
+ value &= 0xffff;
+ break;
+ case EFL:
+ value &= FLAG_MASK;
+ value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK;
+ }
+ if (regno > GS*4)
+ regno -= 2*4;
+ put_stack_long(child, regno - sizeof(struct pt_regs), value);
+#endif
+ return 0;
+}
+
+static unsigned long getreg(struct task_struct *child,
+ unsigned long regno)
+{
+ unsigned long retval = ~0UL;
+
+#if 0
+ switch (regno >> 2) {
+ case FS:
+ retval = child->thread.fs;
+ break;
+ case GS:
+ retval = child->thread.gs;
+ break;
+ case DS:
+ case ES:
+ case SS:
+ case CS:
+ retval = 0xffff;
+ /* fall through */
+ default:
+ if (regno > GS*4)
+ regno -= 2*4;
+ regno = regno - sizeof(struct pt_regs);
+ retval &= get_stack_long(child, regno);
+ }
+#endif
+ return retval;
+}
+
+asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
+{
+ struct task_struct *child;
+ struct user * dummy = NULL;
+ unsigned long flags;
+ int i, ret;
+
+ lock_kernel();
+ ret = -EPERM;
+ if (request == PTRACE_TRACEME) {
+ /* are we already being traced? */
+ if (current->flags & PF_PTRACED)
+ goto out;
+ /* set the ptrace bit in the process flags. */
+ current->flags |= PF_PTRACED;
+ ret = 0;
+ goto out;
+ }
+ ret = -ESRCH;
+ read_lock(&tasklist_lock);
+ child = find_task_by_pid(pid);
+ read_unlock(&tasklist_lock); /* FIXME!!! */
+ if (!child)
+ goto out;
+ ret = -EPERM;
+ if (pid == 1) /* you may not mess with init */
+ goto out;
+ if (request == PTRACE_ATTACH) {
+ if (child == current)
+ goto out;
+ if ((!child->dumpable ||
+ (current->uid != child->euid) ||
+ (current->uid != child->suid) ||
+ (current->uid != child->uid) ||
+ (current->gid != child->egid) ||
+ (current->gid != child->sgid) ||
+ (!cap_issubset(child->cap_permitted, current->cap_permitted)) ||
+ (current->gid != child->gid)) && !capable(CAP_SYS_PTRACE))
+ goto out;
+ /* the same process cannot be attached many times */
+ if (child->flags & PF_PTRACED)
+ goto out;
+ child->flags |= PF_PTRACED;
+
+ write_lock_irqsave(&tasklist_lock, flags);
+ if (child->p_pptr != current) {
+ REMOVE_LINKS(child);
+ child->p_pptr = current;
+ SET_LINKS(child);
+ }
+ write_unlock_irqrestore(&tasklist_lock, flags);
+
+ send_sig(SIGSTOP, child, 1);
+ ret = 0;
+ goto out;
+ }
+ ret = -ESRCH;
+ if (!(child->flags & PF_PTRACED))
+ goto out;
+ if (child->state != TASK_STOPPED) {
+ if (request != PTRACE_KILL)
+ goto out;
+ }
+ if (child->p_pptr != current)
+ goto out;
+
+ switch (request) {
+ /* when I and D space are separate, these will need to be fixed. */
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA: {
+ unsigned long tmp;
+ int copied;
+
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ ret = -EIO;
+ if (copied != sizeof(tmp))
+ goto out;
+ ret = put_user(tmp,(unsigned long *) data);
+ goto out;
+ }
+
+ /* read the word at location addr in the USER area. */
+ case PTRACE_PEEKUSR: {
+ unsigned long tmp;
+
+ ret = -EIO;
+ if ((addr & 3) || addr < 0 ||
+ addr > sizeof(struct user) - 3)
+ goto out;
+
+ tmp = 0; /* Default return condition */
+ if(addr < 17*sizeof(long))
+ tmp = getreg(child, addr);
+#if 0
+ if(addr >= (long) &dummy->u_debugreg[0] &&
+ addr <= (long) &dummy->u_debugreg[7]){
+ addr -= (long) &dummy->u_debugreg[0];
+ addr = addr >> 2;
+ tmp = child->thread.debugreg[addr];
+ };
+#endif
+ ret = put_user(tmp,(unsigned long *) data);
+ goto out;
+ }
+
+ /* when I and D space are separate, this will have to be fixed. */
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ ret = 0;
+ if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
+ goto out;
+ ret = -EIO;
+ goto out;
+
+ case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+ ret = -EIO;
+ if ((addr & 3) || addr < 0 ||
+ addr > sizeof(struct user) - 3)
+ goto out;
+
+ if (addr < 17*sizeof(long)) {
+ ret = putreg(child, addr, data);
+ goto out;
+ }
+
+ /* We need to be very careful here. We implicitly
+ want to modify a portion of the task_struct, and we
+ have to be selective about what portions we allow someone
+ to modify. */
+#if 0
+ if(addr >= (long) &dummy->u_debugreg[0] &&
+ addr <= (long) &dummy->u_debugreg[7]){
+
+ if(addr == (long) &dummy->u_debugreg[4]) return -EIO;
+ if(addr == (long) &dummy->u_debugreg[5]) return -EIO;
+ if(addr < (long) &dummy->u_debugreg[4] &&
+ ((unsigned long) data) >= TASK_SIZE-3) return -EIO;
+
+ ret = -EIO;
+ if(addr == (long) &dummy->u_debugreg[7]) {
+ data &= ~DR_CONTROL_RESERVED;
+ for(i=0; i<4; i++)
+ if ((0x5f54 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
+ goto out;
+ };
+
+ addr -= (long) &dummy->u_debugreg;
+ addr = addr >> 2;
+ child->thread.debugreg[addr] = data;
+ ret = 0;
+ goto out;
+ };
+#endif
+ ret = -EIO;
+ goto out;
+
+ case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+ case PTRACE_CONT: { /* restart after signal. */
+ long tmp;
+
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ goto out;
+ if (request == PTRACE_SYSCALL)
+ child->flags |= PF_TRACESYS;
+ else
+ child->flags &= ~PF_TRACESYS;
+ child->exit_code = data;
+ /* make sure the single step bit is not set. */
+#if 0
+ tmp = get_stack_long(child, EFL_OFFSET) & ~TRAP_FLAG;
+ put_stack_long(child, EFL_OFFSET,tmp);
+#endif
+ wake_up_process(child);
+ ret = 0;
+ goto out;
+ }
+
+/*
+ * make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL: {
+ long tmp;
+
+ ret = 0;
+ if (child->state == TASK_ZOMBIE) /* already dead */
+ goto out;
+ child->exit_code = SIGKILL;
+ /* make sure the single step bit is not set. */
+#if 0
+ tmp = get_stack_long(child, EFL_OFFSET) & ~TRAP_FLAG;
+ put_stack_long(child, EFL_OFFSET, tmp);
+#endif
+ wake_up_process(child);
+ goto out;
+ }
+
+ case PTRACE_SINGLESTEP: { /* set the trap flag. */
+ long tmp;
+
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ goto out;
+ child->flags &= ~PF_TRACESYS;
+ if ((child->flags & PF_DTRACE) == 0) {
+ /* Spurious delayed TF traps may occur */
+ child->flags |= PF_DTRACE;
+ }
+#if 0
+ tmp = get_stack_long(child, EFL_OFFSET) | TRAP_FLAG;
+ put_stack_long(child, EFL_OFFSET, tmp);
+#endif
+ child->exit_code = data;
+ /* give it a chance to run. */
+ wake_up_process(child);
+ ret = 0;
+ goto out;
+ }
+
+ case PTRACE_DETACH: { /* detach a process that was attached. */
+ long tmp;
+
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ goto out;
+ child->flags &= ~(PF_PTRACED|PF_TRACESYS);
+ child->exit_code = data;
+ write_lock_irqsave(&tasklist_lock, flags);
+ REMOVE_LINKS(child);
+ child->p_pptr = child->p_opptr;
+ SET_LINKS(child);
+ write_unlock_irqrestore(&tasklist_lock, flags);
+ /* make sure the single step bit is not set. */
+#if 0
+ tmp = get_stack_long(child, EFL_OFFSET) & ~TRAP_FLAG;
+ put_stack_long(child, EFL_OFFSET, tmp);
+#endif
+ wake_up_process(child);
+ ret = 0;
+ goto out;
+ }
+#if 0
+ case PTRACE_GETREGS: { /* Get all gp regs from the child. */
+ if (!access_ok(VERIFY_WRITE, (unsigned *)data,
+ 17*sizeof(long)))
+ {
+ ret = -EIO;
+ goto out;
+ }
+ for ( i = 0; i < 17*sizeof(long); i += sizeof(long) )
+ {
+ __put_user(getreg(child, i),(unsigned long *) data);
+ data += sizeof(long);
+ }
+ ret = 0;
+ goto out;
+ };
+
+ case PTRACE_SETREGS: { /* Set all gp regs in the child. */
+ unsigned long tmp;
+ if (!access_ok(VERIFY_READ, (unsigned *)data,
+ 17*sizeof(long)))
+ {
+ ret = -EIO;
+ goto out;
+ }
+ for ( i = 0; i < 17*sizeof(long); i += sizeof(long) )
+ {
+ __get_user(tmp, (unsigned long *) data);
+ putreg(child, i, tmp);
+ data += sizeof(long);
+ }
+ ret = 0;
+ goto out;
+ };
+
+ case PTRACE_GETFPREGS: { /* Get the child FPU state. */
+ if (!access_ok(VERIFY_WRITE, (unsigned *)data,
+ sizeof(struct user_i387_struct)))
+ {
+ ret = -EIO;
+ goto out;
+ }
+ ret = 0;
+ if ( !child->used_math ) {
+ /* Simulate an empty FPU. */
+ child->thread.i387.hard.cwd = 0xffff037f;
+ child->thread.i387.hard.swd = 0xffff0000;
+ child->thread.i387.hard.twd = 0xffffffff;
+ }
+ __copy_to_user((void *)data, &child->thread.i387.hard,
+ sizeof(struct user_i387_struct));
+ goto out;
+ };
+
+ case PTRACE_SETFPREGS: { /* Set the child FPU state. */
+ if (!access_ok(VERIFY_READ, (unsigned *)data,
+ sizeof(struct user_i387_struct)))
+ {
+ ret = -EIO;
+ goto out;
+ }
+ child->used_math = 1;
+ __copy_from_user(&child->thread.i387.hard, (void *)data,
+ sizeof(struct user_i387_struct));
+ ret = 0;
+ goto out;
+ };
+#endif
+ default:
+ ret = -EIO;
+ goto out;
+ }
+out:
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage void syscall_trace(void)
+{
+ if ((current->flags & (PF_PTRACED|PF_TRACESYS))
+ != (PF_PTRACED|PF_TRACESYS))
+ return;
+ current->exit_code = SIGTRAP;
+ current->state = TASK_STOPPED;
+ notify_parent(current, SIGCHLD);
+ schedule();
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
diff --git a/arch/sh/kernel/semaphore.c b/arch/sh/kernel/semaphore.c
new file mode 100644
index 000000000..b9f565dd8
--- /dev/null
+++ b/arch/sh/kernel/semaphore.c
@@ -0,0 +1,133 @@
+/*
+ * Just taken from alpha implementation.
+ * This can't work well, perhaps.
+ */
+/*
+ * Generic semaphore code. Buyer beware. Do your own
+ * specific changes in <asm/semaphore-helper.h>
+ */
+
+#include <linux/sched.h>
+#include <asm/semaphore-helper.h>
+
+/*
+ * Semaphores are implemented using a two-way counter:
+ * The "count" variable is decremented for each process
+ * that tries to sleep, while the "waking" variable is
+ * incremented when the "up()" code goes to wake up waiting
+ * processes.
+ *
+ * Notably, the inline "up()" and "down()" functions can
+ * efficiently test if they need to do any extra work (up
+ * needs to do something only if count was negative before
+ * the increment operation.
+ *
+ * waking_non_zero() (from asm/semaphore.h) must execute
+ * atomically.
+ *
+ * When __up() is called, the count was negative before
+ * incrementing it, and we need to wake up somebody.
+ *
+ * This routine adds one to the count of processes that need to
+ * wake up and exit. ALL waiting processes actually wake up but
+ * only the one that gets to the "waking" field first will gate
+ * through and acquire the semaphore. The others will go back
+ * to sleep.
+ *
+ * Note that these functions are only called when there is
+ * contention on the lock, and as such all this is the
+ * "non-critical" part of the whole semaphore business. The
+ * critical part is the inline stuff in <asm/semaphore.h>
+ * where we want to avoid any extra jumps and calls.
+ */
+void __up(struct semaphore *sem)
+{
+ wake_one_more(sem);
+ wake_up(&sem->wait);
+}
+
+/*
+ * Perform the "down" function. Return zero for semaphore acquired,
+ * return negative for signalled out of the function.
+ *
+ * If called from __down, the return is ignored and the wait loop is
+ * not interruptible. This means that a task waiting on a semaphore
+ * using "down()" cannot be killed until someone does an "up()" on
+ * the semaphore.
+ *
+ * If called from __down_interruptible, the return value gets checked
+ * upon return. If the return value is negative then the task continues
+ * with the negative value in the return register (it can be tested by
+ * the caller).
+ *
+ * Either form may be used in conjunction with "up()".
+ *
+ */
+
+#define DOWN_VAR \
+ struct task_struct *tsk = current; \
+ wait_queue_t wait; \
+ init_waitqueue_entry(&wait, tsk);
+
+#define DOWN_HEAD(task_state) \
+ \
+ \
+ tsk->state = (task_state); \
+ add_wait_queue(&sem->wait, &wait); \
+ \
+ /* \
+ * Ok, we're set up. sem->count is known to be less than zero \
+ * so we must wait. \
+ * \
+ * We can let go the lock for purposes of waiting. \
+ * We re-acquire it after awaking so as to protect \
+ * all semaphore operations. \
+ * \
+ * If "up()" is called before we call waking_non_zero() then \
+ * we will catch it right away. If it is called later then \
+ * we will have to go through a wakeup cycle to catch it. \
+ * \
+ * Multiple waiters contend for the semaphore lock to see \
+ * who gets to gate through and who has to wait some more. \
+ */ \
+ for (;;) {
+
+#define DOWN_TAIL(task_state) \
+ tsk->state = (task_state); \
+ } \
+ tsk->state = TASK_RUNNING; \
+ remove_wait_queue(&sem->wait, &wait);
+
+void __down(struct semaphore * sem)
+{
+ DOWN_VAR
+ DOWN_HEAD(TASK_UNINTERRUPTIBLE)
+ if (waking_non_zero(sem))
+ break;
+ schedule();
+ DOWN_TAIL(TASK_UNINTERRUPTIBLE)
+}
+
+int __down_interruptible(struct semaphore * sem)
+{
+ int ret = 0;
+ DOWN_VAR
+ DOWN_HEAD(TASK_INTERRUPTIBLE)
+
+ ret = waking_non_zero_interruptible(sem, tsk);
+ if (ret)
+ {
+ if (ret == 1)
+ /* ret != 0 only if we get interrupted -arca */
+ ret = 0;
+ break;
+ }
+ schedule();
+ DOWN_TAIL(TASK_INTERRUPTIBLE)
+ return ret;
+}
+
+int __down_trylock(struct semaphore * sem)
+{
+ return waking_non_zero_trylock(sem);
+}
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
new file mode 100644
index 000000000..6714fd96d
--- /dev/null
+++ b/arch/sh/kernel/setup.c
@@ -0,0 +1,188 @@
+/*
+ * linux/arch/sh/kernel/setup.c
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ */
+
+/*
+ * This file handles the architecture-dependent parts of initialization
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#ifdef CONFIG_BLK_DEV_RAM
+#include <linux/blk.h>
+#endif
+#include <asm/processor.h>
+#include <linux/console.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+
+/*
+ * Machine setup..
+ */
+
+struct sh_cpuinfo boot_cpu_data = { 0, 0, 0, 0, };
+extern int _text, _etext, _edata, _end, _stext, __bss_start;
+
+#ifdef CONFIG_BLK_DEV_RAM
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start; /* starting block # of image */
+#endif
+
+extern int root_mountflags;
+
+#define COMMAND_LINE_SIZE 1024
+static char command_line[COMMAND_LINE_SIZE] = { 0, };
+ char saved_command_line[COMMAND_LINE_SIZE];
+
+extern unsigned char *root_fs_image;
+
+struct resource standard_io_resources[] = {
+ { "dma1", 0x00, 0x1f },
+ { "pic1", 0x20, 0x3f },
+ { "timer", 0x40, 0x5f },
+ { "keyboard", 0x60, 0x6f },
+ { "dma page reg", 0x80, 0x8f },
+ { "pic2", 0xa0, 0xbf },
+ { "dma2", 0xc0, 0xdf },
+ { "fpu", 0xf0, 0xff }
+};
+
+#define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
+
+
+/* System RAM - interrupted by the 640kB-1M hole */
+#define code_resource (ram_resources[3])
+#define data_resource (ram_resources[4])
+static struct resource ram_resources[] = {
+ { "System RAM", 0x000000, 0x09ffff, IORESOURCE_BUSY },
+ { "System RAM", 0x100000, 0x100000, IORESOURCE_BUSY },
+ { "Video RAM area", 0x0a0000, 0x0bffff },
+ { "Kernel code", 0x100000, 0 },
+ { "Kernel data", 0, 0 }
+};
+
+/* System ROM resources */
+#define MAXROMS 6
+static struct resource rom_resources[MAXROMS] = {
+ { "System ROM", 0xF0000, 0xFFFFF, IORESOURCE_BUSY },
+ { "Video ROM", 0xc0000, 0xc7fff }
+};
+
+
+void __init setup_arch(char **cmdline_p,
+ unsigned long * memory_start_p,
+ unsigned long * memory_end_p)
+{
+ *cmdline_p = command_line;
+ *memory_start_p = (unsigned long) &_end;
+ *memory_end_p = 0x8c400000; /* For my board. */
+ ram_resources[1].end = *memory_end_p-1;
+
+ init_mm.start_code = (unsigned long)&_stext;
+ init_mm.end_code = (unsigned long) &_etext;
+ init_mm.end_data = (unsigned long) &_edata;
+ init_mm.brk = (unsigned long) &_end;
+
+ code_resource.start = virt_to_bus(&_text);
+ code_resource.end = virt_to_bus(&_etext)-1;
+ data_resource.start = virt_to_bus(&_etext);
+ data_resource.end = virt_to_bus(&_edata)-1;
+
+ ROOT_DEV = MKDEV(FLOPPY_MAJOR, 0);
+
+ initrd_below_start_ok = 1;
+ initrd_start = (long)&root_fs_image;
+ initrd_end = (long)&__bss_start;
+ mount_initrd = 1;
+
+
+#if 0
+ /* Request the standard RAM and ROM resources - they eat up PCI memory space */
+ request_resource(&iomem_resource, ram_resources+0);
+ request_resource(&iomem_resource, ram_resources+1);
+ request_resource(&iomem_resource, ram_resources+2);
+ request_resource(ram_resources+1, &code_resource);
+ request_resource(ram_resources+1, &data_resource);
+#endif
+
+#if 0
+ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
+ request_resource(&ioport_resource, standard_io_resources+i);
+#endif
+
+#if 0
+ rd_image_start = (long)root_fs_image;
+ rd_prompt = 0;
+ rd_doload = 1;
+#endif
+
+#if 0
+ ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV);
+
+#ifdef CONFIG_BLK_DEV_RAM
+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
+#endif
+
+ if (!MOUNT_ROOT_RDONLY)
+ root_mountflags &= ~MS_RDONLY;
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+#if 0
+ if (LOADER_TYPE) {
+ initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
+ initrd_end = initrd_start+INITRD_SIZE;
+ if (initrd_end > memory_end) {
+ printk("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ initrd_end,memory_end);
+ initrd_start = 0;
+ }
+ }
+#endif
+
+#endif
+}
+
+/*
+ * Get CPU information for use by the procfs.
+ */
+
+int get_cpuinfo(char *buffer)
+{
+ char *p = buffer;
+
+#ifdef CONFIG_CPU_SH3
+ p += sprintf(p,"cpu family\t: SH3\n"
+ "cache size\t: 8K-byte\n");
+#elif CONFIG_CPU_SH4
+ p += sprintf(p,"cpu family\t: SH4\n"
+ "cache size\t: ??K-byte\n");
+#endif
+ p += sprintf(p, "bogomips\t: %lu.%02lu\n\n",
+ (loops_per_sec+2500)/500000,
+ ((loops_per_sec+2500)/5000) % 100);
+
+ return p - buffer;
+}
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
new file mode 100644
index 000000000..2b1b9ea2e
--- /dev/null
+++ b/arch/sh/kernel/sh_ksyms.c
@@ -0,0 +1,48 @@
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/mca.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/hardirq.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+
+extern void dump_thread(struct pt_regs *, struct user *);
+extern int dump_fpu(elf_fpregset_t *);
+
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
+extern struct drive_info_struct drive_info;
+EXPORT_SYMBOL(drive_info);
+#endif
+
+/* platform dependent support */
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(local_bh_count);
+EXPORT_SYMBOL(local_irq_count);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(kernel_thread);
+
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_partial_copy);
+
+EXPORT_SYMBOL(strtok);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strstr);
+
+#ifdef CONFIG_VT
+EXPORT_SYMBOL(screen_info);
+#endif
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
new file mode 100644
index 000000000..66fa36c85
--- /dev/null
+++ b/arch/sh/kernel/signal.c
@@ -0,0 +1,597 @@
+/*
+ * linux/arch/sh/kernel/signal.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
+ *
+ * SuperH version: Copyright (C) 1999 Niibe Yutaka
+ *
+ */
+
+#include <linux/config.h>
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
+ int options, unsigned long *ru);
+asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys_sigsuspend(old_sigset_t mask,
+ unsigned long r5, unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ sigset_t saveset;
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(&current->sigmask_lock);
+ saveset = current->blocked;
+ siginitset(&current->blocked, mask);
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ regs.u_regs[0] = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&regs,&saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(&current->sigmask_lock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ regs.u_regs[0] = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&regs, &saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction *act,
+ struct old_sigaction *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t *uss, stack_t *uoss,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ return do_sigaltstack(uss, uoss, regs.u_regs[UREG_SP]);
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+struct sigframe
+{
+ struct sigcontext sc;
+ /* FPU should come here: SH-3 has no FPU */
+ unsigned long extramask[_NSIG_WORDS-1];
+ char retcode[4];
+};
+
+struct rt_sigframe
+{
+ struct siginfo *pinfo;
+ void *puc;
+ struct siginfo info;
+ struct ucontext uc;
+ /* FPU should come here: SH-3 has no FPU */
+ char retcode[4];
+};
+
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *r0_p)
+{
+ unsigned int err = 0;
+
+#define COPY(x) err |= __get_user(regs->x, &sc->x)
+ COPY(u_regs[1]);
+ COPY(u_regs[2]); COPY(u_regs[3]);
+ COPY(u_regs[4]); COPY(u_regs[5]);
+ COPY(u_regs[6]); COPY(u_regs[7]);
+ COPY(u_regs[8]); COPY(u_regs[9]);
+ COPY(u_regs[10]); COPY(u_regs[11]);
+ COPY(u_regs[12]); COPY(u_regs[13]);
+ COPY(u_regs[14]); COPY(u_regs[15]);
+ COPY(gbr); COPY(mach);
+ COPY(macl); COPY(pr);
+ COPY(sr); COPY(pc);
+#undef COPY
+
+ regs->syscall_nr = -1; /* disable syscall checks */
+ err |= __get_user(*r0_p, &sc->u_regs[0]);
+
+ return err;
+}
+
+asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ struct sigframe *frame = (struct sigframe *)regs.u_regs[UREG_SP];
+ sigset_t set;
+ int r0;
+
+ if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__get_user(set.sig[0], &frame->sc.oldmask)
+ || (_NSIG_WORDS > 1
+ && __copy_from_user(&set.sig[1], &frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sigmask_lock);
+ current->blocked = set;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ if (restore_sigcontext(&regs, &frame->sc, &r0))
+ goto badframe;
+ return r0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ struct rt_sigframe *frame = (struct rt_sigframe *)regs.u_regs[UREG_SP];
+ sigset_t set;
+ stack_t st;
+ int r0;
+
+ if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sigmask_lock);
+ current->blocked = set;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ if (restore_sigcontext(&regs, &frame->uc.uc_mcontext, &r0))
+ goto badframe;
+
+ if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+ goto badframe;
+ /* It is more difficult to avoid calling this function than to
+ call it and ignore errors. */
+ do_sigaltstack(&st, NULL, regs.u_regs[UREG_SP]);
+
+ return r0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+ unsigned long mask)
+{
+ int err = 0;
+
+#define COPY(x) err |= __put_user(regs->x, &sc->x)
+ COPY(u_regs[0]); COPY(u_regs[1]);
+ COPY(u_regs[2]); COPY(u_regs[3]);
+ COPY(u_regs[4]); COPY(u_regs[5]);
+ COPY(u_regs[6]); COPY(u_regs[7]);
+ COPY(u_regs[8]); COPY(u_regs[9]);
+ COPY(u_regs[10]); COPY(u_regs[11]);
+ COPY(u_regs[12]); COPY(u_regs[13]);
+ COPY(u_regs[14]); COPY(u_regs[15]);
+ COPY(gbr); COPY(mach);
+ COPY(macl); COPY(pr);
+ COPY(sr); COPY(pc);
+#undef COPY
+
+ /* non-iBCS2 extensions.. */
+ err |= __put_user(mask, &sc->oldmask);
+
+ return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void *
+get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
+{
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ return (void *)((sp - frame_size) & -8ul);
+}
+
+static void setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct sigframe *frame;
+ int err = 0;
+ int signal;
+
+ frame = get_sigframe(ka, regs->u_regs[UREG_SP], sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ signal = current->exec_domain
+ && current->exec_domain->signal_invmap
+ && sig < 32
+ ? current->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
+
+ if (_NSIG_WORDS > 1) {
+ err |= __copy_to_user(frame->extramask, &set->sig[1],
+ sizeof(frame->extramask));
+ }
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ regs->pr = (unsigned long) ka->sa.sa_restorer;
+ } else {
+ /* This is ; mov #__NR_sigreturn,r0 ; trapa #0 */
+#ifdef CONFIG_LITTLE_ENDIAN
+ unsigned long code = 0x00c300e0 | (__NR_sigreturn << 8);
+#else
+ unsigned long code = 0xe000c300 | (__NR_sigreturn << 16);
+#endif
+
+ regs->pr = (unsigned long) frame->retcode;
+ err |= __put_user(code, (long *)(frame->retcode+0));
+ }
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up registers for signal handler */
+ regs->u_regs[UREG_SP] = (unsigned long) frame;
+ regs->u_regs[4] = signal; /* Arg for signal handler */
+ regs->pc = (unsigned long) ka->sa.sa_handler;
+
+ set_fs(USER_DS);
+
+#if DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
+ current->comm, current->pid, frame, regs->pc, regs->pr);
+#endif
+
+ return;
+
+give_sigsegv:
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ int err = 0;
+ int signal;
+
+ frame = get_sigframe(ka, regs->u_regs[UREG_SP], sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ signal = current->exec_domain
+ && current->exec_domain->signal_invmap
+ && sig < 32
+ ? current->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= __put_user(&frame->info, &frame->pinfo);
+ err |= __put_user(&frame->uc, &frame->puc);
+ err |= __copy_to_user(&frame->info, info, sizeof(*info));
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->u_regs[UREG_SP]),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext,
+ regs, set->sig[0]);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ regs->pr = (unsigned long) ka->sa.sa_restorer;
+ } else {
+ /* This is ; mov #__NR_sigreturn,r0 ; trapa #0 */
+#ifdef CONFIG_LITTLE_ENDIAN
+ unsigned long code = 0x00c300e0 | (__NR_sigreturn << 8);
+#else
+ unsigned long code = 0xe000c300 | (__NR_sigreturn << 16);
+#endif
+
+ regs->pr = (unsigned long) frame->retcode;
+ err |= __put_user(code, (long *)(frame->retcode+0));
+ }
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up registers for signal handler */
+ regs->u_regs[UREG_SP] = (unsigned long) frame;
+ regs->u_regs[4] = signal; /* Arg for signal handler */
+ regs->pc = (unsigned long) ka->sa.sa_handler;
+
+ set_fs(USER_DS);
+
+#if DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
+ current->comm, current->pid, frame, regs->pc, regs->pr);
+#endif
+
+ return;
+
+give_sigsegv:
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
+{
+ /* Are we from a system call? */
+ if (regs->syscall_nr >= 0) {
+ /* If so, check system call restarting.. */
+ switch (regs->u_regs[0]) {
+ case -ERESTARTNOHAND:
+ regs->u_regs[0] = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->u_regs[0] = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->u_regs[0] = regs->syscall_nr;
+ regs->pc -= 2;
+ }
+ }
+
+ /* Set up the stack frame */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame(sig, ka, info, oldset, regs);
+ else
+ setup_frame(sig, ka, oldset, regs);
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(&current->sigmask_lock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigaddset(&current->blocked,sig);
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+ }
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+int do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+ siginfo_t info;
+ struct k_sigaction *ka;
+
+ if (!oldset)
+ oldset = &current->blocked;
+
+ for (;;) {
+ unsigned long signr;
+
+ spin_lock_irq(&current->sigmask_lock);
+ signr = dequeue_signal(&current->blocked, &info);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ if (!signr)
+ break;
+
+ if ((current->flags & PF_PTRACED) && signr != SIGKILL) {
+ /* Let the debugger run. */
+ current->exit_code = signr;
+ current->state = TASK_STOPPED;
+ notify_parent(current, SIGCHLD);
+ schedule();
+
+ /* We're back. Did the debugger cancel the sig? */
+ if (!(signr = current->exit_code))
+ continue;
+ current->exit_code = 0;
+
+ /* The debugger continued. Ignore SIGSTOP. */
+ if (signr == SIGSTOP)
+ continue;
+
+ /* Update the siginfo structure. Is this good? */
+ if (signr != info.si_signo) {
+ info.si_signo = signr;
+ info.si_errno = 0;
+ info.si_code = SI_USER;
+ info.si_pid = current->p_pptr->pid;
+ info.si_uid = current->p_pptr->uid;
+ }
+
+ /* If the (new) signal is now blocked, requeue it. */
+ if (sigismember(&current->blocked, signr)) {
+ send_sig_info(signr, &info, current);
+ continue;
+ }
+ }
+
+ ka = &current->sig->action[signr-1];
+ if (ka->sa.sa_handler == SIG_IGN) {
+ if (signr != SIGCHLD)
+ continue;
+ /* Check for SIGCHLD: it's special. */
+ while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
+ /* nothing */;
+ continue;
+ }
+
+ if (ka->sa.sa_handler == SIG_DFL) {
+ int exit_code = signr;
+
+ /* Init gets no signals it doesn't want. */
+ if (current->pid == 1)
+ continue;
+
+ switch (signr) {
+ case SIGCONT: case SIGCHLD: case SIGWINCH:
+ continue;
+
+ case SIGTSTP: case SIGTTIN: case SIGTTOU:
+ if (is_orphaned_pgrp(current->pgrp))
+ continue;
+ /* FALLTHRU */
+
+ case SIGSTOP:
+ current->state = TASK_STOPPED;
+ current->exit_code = signr;
+ if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
+ notify_parent(current, SIGCHLD);
+ schedule();
+ continue;
+
+ case SIGQUIT: case SIGILL: case SIGTRAP:
+ case SIGABRT: case SIGFPE: case SIGSEGV:
+ if (do_coredump(signr, regs))
+ exit_code |= 0x80;
+ /* FALLTHRU */
+
+ default:
+ lock_kernel();
+ sigaddset(&current->signal, signr);
+ recalc_sigpending(current);
+ current->flags |= PF_SIGNALED;
+ do_exit(exit_code);
+ /* NOTREACHED */
+ }
+ }
+
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, ka, &info, oldset, regs);
+ return 1;
+ }
+
+ /* Did we come from a system call? */
+ if (regs->syscall_nr >= 0) {
+ /* Restart the system call - no handlers present */
+ if (regs->u_regs[0] == -ERESTARTNOHAND ||
+ regs->u_regs[0] == -ERESTARTSYS ||
+ regs->u_regs[0] == -ERESTARTNOINTR) {
+ regs->u_regs[0] = regs->syscall_nr;
+ regs->pc -= 2;
+ }
+ }
+ return 0;
+}
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
new file mode 100644
index 000000000..6999cff81
--- /dev/null
+++ b/arch/sh/kernel/sys_sh.c
@@ -0,0 +1,249 @@
+/*
+ * linux/arch/i386/kernel/sys_i386.c
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/i386
+ * platform.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+
+#include <asm/uaccess.h>
+#include <asm/ipc.h>
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way Unix traditionally does this, though.
+ */
+asmlinkage int sys_pipe(unsigned long * fildes)
+{
+ int fd[2];
+ int error;
+
+ lock_kernel();
+ error = do_pipe(fd);
+ unlock_kernel();
+ if (!error) {
+ if (copy_to_user(fildes, fd, 2*sizeof(int)))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+/*
+ * Perform the select(nd, in, out, ex, tv) and mmap() system
+ * calls. Linux/i386 didn't use to be able to handle more than
+ * 4 system call parameters, so these system calls used a memory
+ * block for parameter passing..
+ */
+
+struct mmap_arg_struct {
+ unsigned long addr;
+ unsigned long len;
+ unsigned long prot;
+ unsigned long flags;
+ unsigned long fd;
+ unsigned long offset;
+};
+
+asmlinkage int old_mmap(struct mmap_arg_struct *arg)
+{
+ int error = -EFAULT;
+ struct file * file = NULL;
+ struct mmap_arg_struct a;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+
+ down(&current->mm->mmap_sem);
+ lock_kernel();
+ if (!(a.flags & MAP_ANONYMOUS)) {
+ error = -EBADF;
+ file = fget(a.fd);
+ if (!file)
+ goto out;
+ }
+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ error = do_mmap(file, a.addr, a.len, a.prot, a.flags, a.offset);
+ if (file)
+ fput(file);
+out:
+ unlock_kernel();
+ up(&current->mm->mmap_sem);
+ return error;
+}
+
+extern asmlinkage int sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
+
+struct sel_arg_struct {
+ unsigned long n;
+ fd_set *inp, *outp, *exp;
+ struct timeval *tvp;
+};
+
+asmlinkage int old_select(struct sel_arg_struct *arg)
+{
+ struct sel_arg_struct a;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+ /* sys_select() does the appropriate kernel locking */
+ return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+asmlinkage int sys_ipc (uint call, int first, int second,
+ int third, void *ptr, long fifth)
+{
+ int version, ret;
+
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+
+ if (call <= SEMCTL)
+ switch (call) {
+ case SEMOP:
+ return sys_semop (first, (struct sembuf *)ptr, second);
+ case SEMGET:
+ return sys_semget (first, second, third);
+ case SEMCTL: {
+ union semun fourth;
+ if (!ptr)
+ return -EINVAL;
+ if (get_user(fourth.__pad, (void **) ptr))
+ return -EFAULT;
+ return sys_semctl (first, second, third, fourth);
+ }
+ default:
+ return -EINVAL;
+ }
+
+ if (call <= MSGCTL)
+ switch (call) {
+ case MSGSND:
+ return sys_msgsnd (first, (struct msgbuf *) ptr,
+ second, third);
+ case MSGRCV:
+ switch (version) {
+ case 0: {
+ struct ipc_kludge tmp;
+ if (!ptr)
+ return -EINVAL;
+
+ if (copy_from_user(&tmp,
+ (struct ipc_kludge *) ptr,
+ sizeof (tmp)))
+ return -EFAULT;
+ return sys_msgrcv (first, tmp.msgp, second,
+ tmp.msgtyp, third);
+ }
+ default:
+ return sys_msgrcv (first,
+ (struct msgbuf *) ptr,
+ second, fifth, third);
+ }
+ case MSGGET:
+ return sys_msgget ((key_t) first, second);
+ case MSGCTL:
+ return sys_msgctl (first, second,
+ (struct msqid_ds *) ptr);
+ default:
+ return -EINVAL;
+ }
+ if (call <= SHMCTL)
+ switch (call) {
+ case SHMAT:
+ switch (version) {
+ default: {
+ ulong raddr;
+ ret = sys_shmat (first, (char *) ptr,
+ second, &raddr);
+ if (ret)
+ return ret;
+ return put_user (raddr, (ulong *) third);
+ }
+ case 1: /* iBCS2 emulator entry point */
+ if (!segment_eq(get_fs(), get_ds()))
+ return -EINVAL;
+ return sys_shmat (first, (char *) ptr,
+ second, (ulong *) third);
+ }
+ case SHMDT:
+ return sys_shmdt ((char *)ptr);
+ case SHMGET:
+ return sys_shmget (first, second, third);
+ case SHMCTL:
+ return sys_shmctl (first, second,
+ (struct shmid_ds *) ptr);
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Old cruft
+ */
+asmlinkage int sys_uname(struct old_utsname * name)
+{
+ int err;
+ if (!name)
+ return -EFAULT;
+ down(&uts_sem);
+ err=copy_to_user(name, &system_utsname, sizeof (*name));
+ up(&uts_sem);
+ return err?-EFAULT:0;
+}
+
+asmlinkage int sys_olduname(struct oldold_utsname * name)
+{
+ int error;
+
+ if (!name)
+ return -EFAULT;
+ if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
+ return -EFAULT;
+
+ down(&uts_sem);
+
+ error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
+ error |= __put_user(0,name->sysname+__OLD_UTS_LEN);
+ error |= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
+ error |= __put_user(0,name->nodename+__OLD_UTS_LEN);
+ error |= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
+ error |= __put_user(0,name->release+__OLD_UTS_LEN);
+ error |= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
+ error |= __put_user(0,name->version+__OLD_UTS_LEN);
+ error |= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
+ error |= __put_user(0,name->machine+__OLD_UTS_LEN);
+
+ up(&uts_sem);
+
+ error = error ? -EFAULT : 0;
+
+ return error;
+}
+
+asmlinkage int sys_pause(void)
+{
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ return -ERESTARTNOHAND;
+}
diff --git a/arch/sh/kernel/test-img.c b/arch/sh/kernel/test-img.c
new file mode 100644
index 000000000..daade9f6d
--- /dev/null
+++ b/arch/sh/kernel/test-img.c
@@ -0,0 +1,69 @@
+unsigned char root_fs_image[]
+__attribute__((__section__(".data.disk_image")))
+= {
+0x1f,0x8b,0x08,0x08,0x5d,0xd5,0xc7,0x37,0x00,0x03,0x72,0x2e,0x62,0x69,0x6e,0x00,
+0xed,0xdc,0x3f,0x6c,0x1b,0x55,0x1c,0xc0,0xf1,0xdf,0xf9,0xdc,0x04,0x27,0x69,0xb1,
+0x93,0x14,0x10,0x48,0x91,0xd3,0x02,0x4d,0x8a,0xb8,0xd4,0x21,0x8a,0x09,0x02,0x02,
+0xb5,0x4a,0xab,0x52,0x65,0x69,0x11,0x03,0x42,0xc2,0xb1,0x8f,0xc4,0x92,0xe3,0x03,
+0x9f,0x8d,0xca,0x14,0xd8,0x88,0x2a,0xa6,0x0e,0x88,0xa9,0x20,0xb1,0x87,0x8d,0xa5,
+0x5b,0x86,0xcc,0x90,0x78,0x77,0xd4,0x60,0x75,0xa9,0x40,0xe2,0xdf,0xd0,0x42,0x78,
+0x77,0xef,0x9c,0x38,0x24,0x72,0x49,0x20,0xc9,0x70,0xdf,0x8f,0xf2,0xf3,0xd9,0x77,
+0xbf,0xf3,0xbb,0x67,0xbf,0xdf,0xf9,0x4f,0xf4,0x2c,0x02,0x20,0xac,0xe2,0x2a,0x5e,
+0x53,0x61,0xaa,0x18,0x0e,0xd6,0x19,0xad,0x09,0x49,0x1d,0x5e,0x5e,0x7d,0x75,0x39,
+0xfd,0x6c,0x6d,0x39,0x6d,0x48,0xbf,0x5c,0xfd,0xc9,0xf0,0xf3,0x56,0xd5,0x3a,0x99,
+0xba,0xf7,0xd0,0x76,0x8a,0x53,0x5f,0xc4,0xdf,0xcd,0x24,0x56,0x6e,0x9e,0x59,0xb9,
+0x30,0x3e,0x73,0x3b,0xf7,0x3f,0x76,0x01,0xc0,0x3e,0x79,0x75,0x1f,0x55,0x71,0x4c,
+0x74,0xfd,0x47,0x8f,0xf6,0x70,0x00,0x1c,0xa2,0x8d,0x8d,0x49,0x6f,0xf1,0xc9,0x06,
+0x00,0x00,0x08,0x8d,0xe6,0xfb,0x00,0xef,0x73,0x7c,0x33,0x0e,0xf3,0xfd,0xc7,0xbd,
+0xd7,0xc5,0xff,0xd0,0x31,0x5a,0x5b,0x4e,0xf7,0x05,0xa1,0xb7,0x1c,0x93,0x48,0x4b,
+0x5e,0xe7,0x61,0x1e,0x14,0x80,0x50,0xf0,0xcf,0x3f,0xe7,0x76,0x3b,0xff,0x45,0xe4,
+0x89,0x96,0xbc,0x47,0x54,0xc4,0x54,0x74,0xa9,0xe8,0x56,0xd1,0xa3,0xe2,0xb8,0x8a,
+0x13,0x2a,0x1e,0x15,0xfd,0xfd,0x68,0x42,0x45,0xaf,0x8a,0xbe,0xbd,0xb6,0xaf,0xce,
+0x7f,0x7f,0xaa,0x76,0xef,0x07,0xd1,0x6c,0xbf,0xf5,0xfc,0xd7,0xbf,0xf7,0xae,0x6d,
+0x32,0xda,0x6c,0x6b,0xb6,0x7f,0x56,0x9d,0x77,0x4f,0x05,0xb1,0x5b,0xfb,0x27,0x0f,
+0xa8,0xfd,0x6f,0x06,0xf5,0xf2,0xfe,0x8e,0xfe,0xff,0x63,0xaf,0xff,0xf0,0xc5,0x54,
+0xdb,0xfe,0x7f,0x7a,0xeb,0xf2,0x15,0x53,0xe4,0xe6,0xaa,0x7e,0xed,0x19,0x0b,0xda,
+0xbf,0x75,0xd9,0xd8,0xd6,0xff,0xc7,0xf6,0xdf,0x7c,0xdb,0xf6,0x37,0xbe,0xd6,0x63,
+0x6a,0xe7,0xe3,0xbf,0x7d,0x2f,0xcb,0x1a,0x29,0x16,0x4a,0xd5,0xeb,0xe5,0x7d,0x7c,
+0x73,0xde,0xae,0x7d,0xaf,0x8f,0x3d,0x2a,0xc3,0xda,0xbc,0x1e,0x51,0x6d,0xe9,0x31,
+0xde,0xaf,0x8e,0xac,0xe8,0xb8,0x95,0xe7,0xde,0x77,0xaa,0xa5,0xbc,0x1e,0xf3,0x3d,
+0x62,0x4a,0xde,0xfe,0xc8,0x1f,0xfb,0x3d,0xea,0x49,0x71,0xa7,0x0b,0x25,0x6f,0xfc,
+0xdf,0x36,0x3b,0x65,0xdf,0x07,0x08,0xe0,0x48,0xe8,0xd7,0xb2,0xad,0xfa,0xff,0xd5,
+0xd4,0xf5,0x0f,0x20,0x24,0xf8,0xa7,0x1f,0x10,0x5e,0xd4,0x3f,0x10,0x5e,0xd4,0x3f,
+0x10,0x5e,0xd4,0x3f,0x10,0x5e,0xd4,0x3f,0x10,0x5e,0xd4,0x3f,0x10,0x5e,0xd4,0x3f,
+0x10,0x5e,0xd4,0x3f,0x10,0x5e,0xd4,0x3f,0x10,0x5e,0xd4,0x3f,0x10,0x5e,0xd4,0x3f,
+0x10,0x4a,0x7a,0x4e,0xcf,0xce,0xf9,0x3f,0xde,0xbc,0xb6,0xbb,0x66,0xa7,0xe4,0x9c,
+0x92,0xeb,0x14,0xed,0xa3,0x3d,0x48,0x00,0x07,0x42,0xcf,0xe3,0xdb,0x59,0xff,0xde,
+0x7c,0xd6,0xbb,0x66,0x54,0x0a,0xa5,0x42,0xe5,0x68,0x8f,0x10,0xc0,0x41,0x99,0xbf,
+0x70,0xe5,0x0d,0x23,0xd2,0x32,0x43,0x38,0x22,0x67,0xc5,0x9f,0x32,0x1c,0xff,0x4a,
+0x2d,0xc7,0xd4,0xd5,0x75,0x7f,0xfd,0x98,0x24,0xd5,0xb6,0x21,0x89,0xf9,0x53,0xe1,
+0x83,0x1d,0xe2,0x41,0x18,0xd3,0x3a,0xfc,0x9f,0x11,0x34,0x74,0x78,0xb7,0x07,0x83,
+0xd8,0xd4,0xe1,0x27,0x67,0xd6,0x8d,0x46,0x7c,0xa4,0x51,0x8f,0xd6,0x3a,0x4a,0xbf,
+0x2c,0xc9,0x7b,0xa7,0x3f,0x33,0x16,0xcc,0x5a,0xb4,0x61,0xd6,0xa3,0x4b,0xe2,0xdc,
+0x91,0xee,0xd2,0xef,0x22,0x89,0xa7,0x55,0xbc,0x38,0xd2,0x98,0xff,0xb9,0x1e,0xf1,
+0xb2,0xa6,0xcd,0xf3,0x89,0x85,0xce,0x75,0xa3,0xf6,0x78,0xe3,0xa4,0x97,0x27,0xb1,
+0xc5,0xbf,0x24,0x76,0x6a,0x68,0xa1,0x7b,0xa5,0x6f,0x4d,0x3e,0x34,0x52,0xe9,0x1b,
+0x0f,0xf2,0xa7,0x7f,0x34,0xea,0xcf,0x2c,0xc9,0xe2,0x1f,0x6b,0x6a,0xfb,0xf7,0x27,
+0xd6,0x0d,0xab,0xd7,0xbe,0xb3,0x26,0x03,0x89,0x86,0x0c,0xf4,0xd6,0x33,0x03,0x7d,
+0x4b,0xf2,0x43,0xd7,0xba,0x21,0xb1,0x5a,0xac,0x71,0xdc,0xbb,0x17,0x2f,0x4f,0xed,
+0x7b,0xe6,0xc6,0x83,0xc5,0xdf,0xbc,0xf5,0xaa,0xcd,0x97,0xe5,0x9d,0xcf,0xe7,0x55,
+0xbf,0x2a,0xf2,0xdd,0x93,0x1b,0xea,0xf6,0xb5,0x6b,0xb3,0x05,0x37,0xa9,0xfe,0xae,
+0x56,0x3f,0xb0,0xcb,0x97,0x06,0xbd,0xe9,0xda,0x32,0x39,0xd9,0x25,0xae,0x33,0x67,
+0x57,0x66,0x0b,0xa5,0x99,0x64,0xb5,0x54,0x75,0xab,0xd9,0xa2,0x65,0x59,0xde,0xc6,
+0x4b,0x76,0xb1,0xe8,0x24,0xdf,0x76,0xca,0xc5,0xbc,0x97,0x7c,0x31,0x93,0x79,0x29,
+0x39,0x74,0x71,0xea,0xad,0xe1,0xa4,0x3d,0x93,0x73,0x9f,0x1f,0xb5,0x26,0x52,0xd6,
+0xf8,0x78,0x32,0x35,0x31,0x31,0x71,0xee,0x85,0xd4,0x58,0x72,0xc8,0x5f,0x9d,0xb2,
+0x52,0xd6,0x68,0xb2,0x6c,0x17,0xed,0xac,0x6b,0x0f,0x8b,0x58,0xee,0xc7,0x73,0x95,
+0xec,0xb4,0x5a,0x56,0xca,0x7a,0x39,0xdb,0xbc,0x56,0xb1,0xaf,0x57,0xc4,0x2a,0x3b,
+0xf9,0x6c,0x25,0x2b,0x96,0xbe,0xcc,0x55,0x9c,0xb2,0xab,0x6e,0xe8,0xc5,0xb4,0xab,
+0x2e,0x72,0xce,0xdc,0x9c,0x5d,0xda,0xd3,0xe9,0xfb,0xa9,0xe0,0xf9,0xeb,0xf0,0xfb,
+0x2f,0xe2,0xc5,0xb7,0x2d,0xdb,0x9b,0x9f,0x14,0x07,0x83,0xbc,0x88,0x7e,0x9e,0x0c,
+0x15,0xf2,0xea,0x2e,0x79,0xc3,0x41,0x9e,0xa9,0xc7,0x81,0xd1,0x3a,0x16,0x64,0x6b,
+0x1c,0xc9,0xc8,0xd6,0xb8,0x69,0x9b,0x37,0xfe,0x2f,0xf3,0x5e,0x11,0xfd,0x93,0x0d,
+0x0f,0x6b,0xf7,0xbc,0x6c,0x9b,0x1e,0xef,0xe7,0xa5,0x77,0xc9,0x4b,0xe8,0xfb,0xda,
+0x5c,0xfd,0xa5,0xba,0x78,0x73,0x97,0x3c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x87,0xe8,0x6f,0x20,0x01,0xec,0xc5,0x00,0x00,0x01,0x00,
+};
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
new file mode 100644
index 000000000..6f4598a7e
--- /dev/null
+++ b/arch/sh/kernel/time.c
@@ -0,0 +1,224 @@
+/*
+ * linux/arch/sh/kernel/time.c
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * Some code taken from i386 version.
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/delay.h>
+
+#include <linux/timex.h>
+#include <linux/irq.h>
+
+#define TMU_TOCR 0xfffffe90 /* Byte access */
+#define TMU_TSTR 0xfffffe92 /* Byte access */
+
+#define TMU0_TCOR 0xfffffe94 /* Long access */
+#define TMU0_TCNT 0xfffffe98 /* Long access */
+#define TMU0_TCR 0xfffffe9c /* Word access */
+
+#define TMU_TOCR_INIT 0x00
+#define TMU0_TCR_INIT 0x0020
+#define TMU_TSTR_INIT 1
+
+#define CLOCK_MHZ (60/4)
+#define INTERVAL 37500 /* (1000000*CLOCK_MHZ/HZ/2) ??? */
+
+extern rwlock_t xtime_lock;
+#define TICK_SIZE tick
+
+void do_gettimeofday(struct timeval *tv)
+{
+ extern volatile unsigned long lost_ticks;
+ unsigned long flags;
+ unsigned long usec, sec;
+
+ read_lock_irqsave(&xtime_lock, flags);
+ usec = 0;
+ {
+ unsigned long lost = lost_ticks;
+ if (lost)
+ usec += lost * (1000000 / HZ);
+ }
+ sec = xtime.tv_sec;
+ usec += xtime.tv_usec;
+ read_unlock_irqrestore(&xtime_lock, flags);
+
+ while (usec >= 1000000) {
+ usec -= 1000000;
+ sec++;
+ }
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+
+void do_settimeofday(struct timeval *tv)
+{
+ write_lock_irq(&xtime_lock);
+ xtime = *tv;
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+ write_unlock_irq(&xtime_lock);
+}
+
+/*
+ */
+static int set_rtc_time(unsigned long nowtime)
+{
+/* XXX should be implemented XXXXXXXXXX */
+ int retval = -1;
+
+ return retval;
+}
+
+/* last time the RTC clock got updated */
+static long last_rtc_update = 0;
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ do_timer(regs);
+
+#if 0
+ if (!user_mode(regs))
+ sh_do_profile(regs->pc);
+#endif
+
+ /*
+ * If we have an externally synchronized Linux clock, then update
+ * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+ * called as close as possible to 500 ms before the new second starts.
+ */
+ if ((time_status & STA_UNSYNC) == 0 &&
+ xtime.tv_sec > last_rtc_update + 660 &&
+ xtime.tv_usec >= 500000 - ((unsigned) tick) / 2 &&
+ xtime.tv_usec <= 500000 + ((unsigned) tick) / 2) {
+ if (set_rtc_time(xtime.tv_sec) == 0)
+ last_rtc_update = xtime.tv_sec;
+ else
+ last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
+ }
+}
+
+/*
+ * This is the same as the above, except we _also_ save the current
+ * Time Stamp Counter value at the time of the timer interrupt, so that
+ * we later on can estimate the time of day more exactly.
+ */
+static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned long __dummy;
+
+ /* Clear UNF bit */
+ asm volatile("mov.w %1,%0\n\t"
+ "and %2,%0\n\t"
+ "mov.w %0,%1"
+ : "=&z" (__dummy)
+ : "m" (__m(TMU0_TCR)), "r" (~0x100));
+
+ /*
+ * Here we are in the timer irq handler. We just have irqs locally
+ * disabled but we don't know if the timer_bh is running on the other
+ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+ * the irq version of write_lock because as just said we have irq
+ * locally disabled. -arca
+ */
+ write_lock(&xtime_lock);
+
+ do_timer_interrupt(irq, NULL, regs);
+
+ write_unlock(&xtime_lock);
+}
+
+/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
+ * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
+ * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
+ *
+ * [For the Julian calendar (which was used in Russia before 1917,
+ * Britain & colonies before 1752, anywhere else before 1582,
+ * and is still in use by some communities) leave out the
+ * -year/100+year/400 terms, and add 10.]
+ *
+ * This algorithm was first published by Gauss (I think).
+ *
+ * WARNING: this function will overflow on 2106-02-07 06:28:16 on
+ * machines were long is 32-bit! (However, as time_t is signed, we
+ * will already get problems at other places on 2038-01-19 03:14:08)
+ */
+static inline unsigned long mktime(unsigned int year, unsigned int mon,
+ unsigned int day, unsigned int hour,
+ unsigned int min, unsigned int sec)
+{
+ if (0 >= (int) (mon -= 2)) { /* 1..12 -> 11,12,1..10 */
+ mon += 12; /* Puts Feb last since it has leap day */
+ year -= 1;
+ }
+ return (((
+ (unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day) +
+ year*365 - 719499
+ )*24 + hour /* now have hours */
+ )*60 + min /* now have minutes */
+ )*60 + sec; /* finally seconds */
+}
+
+static unsigned long get_rtc_time(void)
+{
+/* XXX not implemented yet */
+ return 0;
+}
+
+static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, 0, "timer", NULL, NULL};
+
+void __init time_init(void)
+{
+ unsigned long __dummy;
+
+ xtime.tv_sec = get_rtc_time();
+ xtime.tv_usec = 0;
+
+ set_ipr_data(TIMER_IRQ, TIMER_IRP_OFFSET, TIMER_PRIORITY);
+ setup_irq(TIMER_IRQ, &irq0);
+
+ /* Start TMU0 */
+ asm volatile("mov %1,%0\n\t"
+ "mov.b %0,%2 ! external clock input\n\t"
+ "mov %3,%0\n\t"
+ "mov.w %0,%4 ! enable timer0 interrupt\n\t"
+ "mov.l %5,%6\n\t"
+ "mov.l %5,%7\n\t"
+ "mov %8,%0\n\t"
+ "mov.b %0,%9"
+ : "=&z" (__dummy)
+ : "i" (TMU_TOCR_INIT), "m" (__m(TMU_TOCR)),
+ "i" (TMU0_TCR_INIT), "m" (__m(TMU0_TCR)),
+ "r" (INTERVAL), "m" (__m(TMU0_TCOR)), "m" (__m(TMU0_TCNT)),
+ "i" (TMU_TSTR_INIT), "m" (__m(TMU_TSTR)));
+#if 0
+ /* Start RTC */
+ asm volatile("");
+#endif
+}
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
new file mode 100644
index 000000000..3d3cba23c
--- /dev/null
+++ b/arch/sh/kernel/traps.c
@@ -0,0 +1,127 @@
+/*
+ * linux/arch/sh/traps.c
+ *
+ * SuperH version: Copyright (C) 1999 Niibe Yutaka
+ */
+
+/*
+ * 'Traps.c' handles hardware traps and faults after we have saved some
+ * state in 'entry.S'.
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+
+static inline void console_verbose(void)
+{
+ extern int console_loglevel;
+ console_loglevel = 15;
+}
+
+#define DO_ERROR(trapnr, signr, str, name, tsk) \
+asmlinkage void do_##name(unsigned long r4, unsigned long r5, \
+ unsigned long r6, unsigned long r7, \
+ struct pt_regs regs) \
+{ \
+ unsigned long error_code; \
+ \
+ asm volatile("stc r2_bank,%0": "=r" (error_code)); \
+ sti(); \
+ regs.syscall_nr = -1; \
+ tsk->thread.error_code = error_code; \
+ tsk->thread.trap_no = trapnr; \
+ force_sig(signr, tsk); \
+ die_if_no_fixup(str,&regs,error_code); \
+}
+
+/*
+ * These constants are for searching for possible module text
+ * segments. VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is
+ * a guess of how much space is likely to be vmalloced.
+ */
+#define VMALLOC_OFFSET (8*1024*1024)
+#define MODULE_RANGE (8*1024*1024)
+
+static void show_registers(struct pt_regs *regs)
+{/* Not implemented yet. */
+}
+
+spinlock_t die_lock;
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ printk("%s: %04lx\n", str, err & 0xffff);
+ show_registers(regs);
+ spin_unlock_irq(&die_lock);
+ do_exit(SIGSEGV);
+}
+
+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
+{
+ if (!user_mode(regs))
+ die(str, regs, err);
+}
+
+static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
+{
+ if (!user_mode(regs))
+ {
+ unsigned long fixup;
+ fixup = search_exception_table(regs->pc);
+ if (fixup) {
+ regs->pc = fixup;
+ return;
+ }
+ die(str, regs, err);
+ }
+}
+
+DO_ERROR( 7, SIGSEGV, "address error (load)", address_error_load, current)
+DO_ERROR( 8, SIGSEGV, "address error (store)", address_error_store, current)
+DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
+DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
+
+asmlinkage void do_exception_error (unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ long ex;
+ asm volatile("stc r2_bank,%0" : "=r" (ex));
+ die_if_kernel("exception", &regs, ex);
+}
+
+void __init trap_init(void)
+{
+ extern void *vbr_base;
+ extern void *exception_handling_table[14];
+
+ exception_handling_table[7] = (void *)do_address_error_load;
+ exception_handling_table[8] = (void *)do_address_error_store;
+ exception_handling_table[12] = (void *)do_reserved_inst;
+ exception_handling_table[13] = (void *)do_illegal_slot_inst;
+
+ /* NOTE: The VBR value should be at P1
+ (or P2, virtural "fixed" address space).
+ It's definitely should not in physical address. */
+
+ asm volatile("ldc %0,vbr"
+ : /* no output */
+ : "r" (&vbr_base)
+ : "memory");
+}